1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/mii.h>
46 #include <linux/ethtool.h>
47 #include <linux/if_vlan.h>
48 #include <linux/cpu.h>
49 #include <linux/smp.h>
50 #include <linux/pm_qos.h>
51 #include <linux/pm_runtime.h>
52 #include <linux/aer.h>
53 #include <linux/prefetch.h>
57 #define DRV_EXTRAVERSION "-k"
59 #define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
60 char e1000e_driver_name[] = "e1000e";
61 const char e1000e_driver_version[] = DRV_VERSION;
63 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
65 static const struct e1000_info *e1000_info_tbl[] = {
66 [board_82571] = &e1000_82571_info,
67 [board_82572] = &e1000_82572_info,
68 [board_82573] = &e1000_82573_info,
69 [board_82574] = &e1000_82574_info,
70 [board_82583] = &e1000_82583_info,
71 [board_80003es2lan] = &e1000_es2_info,
72 [board_ich8lan] = &e1000_ich8_info,
73 [board_ich9lan] = &e1000_ich9_info,
74 [board_ich10lan] = &e1000_ich10_info,
75 [board_pchlan] = &e1000_pch_info,
76 [board_pch2lan] = &e1000_pch2_info,
79 struct e1000_reg_info {
84 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
85 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
86 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
87 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
88 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
90 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
91 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
92 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
93 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
94 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
96 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
98 /* General Registers */
100 {E1000_STATUS, "STATUS"},
101 {E1000_CTRL_EXT, "CTRL_EXT"},
103 /* Interrupt Registers */
107 {E1000_RCTL, "RCTL"},
108 {E1000_RDLEN, "RDLEN"},
111 {E1000_RDTR, "RDTR"},
112 {E1000_RXDCTL(0), "RXDCTL"},
114 {E1000_RDBAL, "RDBAL"},
115 {E1000_RDBAH, "RDBAH"},
116 {E1000_RDFH, "RDFH"},
117 {E1000_RDFT, "RDFT"},
118 {E1000_RDFHS, "RDFHS"},
119 {E1000_RDFTS, "RDFTS"},
120 {E1000_RDFPC, "RDFPC"},
123 {E1000_TCTL, "TCTL"},
124 {E1000_TDBAL, "TDBAL"},
125 {E1000_TDBAH, "TDBAH"},
126 {E1000_TDLEN, "TDLEN"},
129 {E1000_TIDV, "TIDV"},
130 {E1000_TXDCTL(0), "TXDCTL"},
131 {E1000_TADV, "TADV"},
132 {E1000_TARC(0), "TARC"},
133 {E1000_TDFH, "TDFH"},
134 {E1000_TDFT, "TDFT"},
135 {E1000_TDFHS, "TDFHS"},
136 {E1000_TDFTS, "TDFTS"},
137 {E1000_TDFPC, "TDFPC"},
139 /* List Terminator */
144 * e1000_regdump - register printout routine
146 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
152 switch (reginfo->ofs) {
153 case E1000_RXDCTL(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_RXDCTL(n));
157 case E1000_TXDCTL(0):
158 for (n = 0; n < 2; n++)
159 regs[n] = __er32(hw, E1000_TXDCTL(n));
162 for (n = 0; n < 2; n++)
163 regs[n] = __er32(hw, E1000_TARC(n));
166 printk(KERN_INFO "%-15s %08x\n",
167 reginfo->name, __er32(hw, reginfo->ofs));
171 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
172 printk(KERN_INFO "%-15s ", rname);
173 for (n = 0; n < 2; n++)
174 printk(KERN_CONT "%08x ", regs[n]);
175 printk(KERN_CONT "\n");
179 * e1000e_dump - Print registers, Tx-ring and Rx-ring
181 static void e1000e_dump(struct e1000_adapter *adapter)
183 struct net_device *netdev = adapter->netdev;
184 struct e1000_hw *hw = &adapter->hw;
185 struct e1000_reg_info *reginfo;
186 struct e1000_ring *tx_ring = adapter->tx_ring;
187 struct e1000_tx_desc *tx_desc;
192 struct e1000_buffer *buffer_info;
193 struct e1000_ring *rx_ring = adapter->rx_ring;
194 union e1000_rx_desc_packet_split *rx_desc_ps;
195 union e1000_rx_desc_extended *rx_desc;
205 if (!netif_msg_hw(adapter))
208 /* Print netdevice Info */
210 dev_info(&adapter->pdev->dev, "Net device Info\n");
211 printk(KERN_INFO "Device Name state "
212 "trans_start last_rx\n");
213 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
214 netdev->name, netdev->state, netdev->trans_start,
218 /* Print Registers */
219 dev_info(&adapter->pdev->dev, "Register Dump\n");
220 printk(KERN_INFO " Register Name Value\n");
221 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
222 reginfo->name; reginfo++) {
223 e1000_regdump(hw, reginfo);
226 /* Print Tx Ring Summary */
227 if (!netdev || !netif_running(netdev))
230 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
231 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
232 " leng ntw timestamp\n");
233 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
234 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
235 0, tx_ring->next_to_use, tx_ring->next_to_clean,
236 (unsigned long long)buffer_info->dma,
238 buffer_info->next_to_watch,
239 (unsigned long long)buffer_info->time_stamp);
242 if (!netif_msg_tx_done(adapter))
243 goto rx_ring_summary;
245 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
247 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
249 * Legacy Transmit Descriptor
250 * +--------------------------------------------------------------+
251 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
252 * +--------------------------------------------------------------+
253 * 8 | Special | CSS | Status | CMD | CSO | Length |
254 * +--------------------------------------------------------------+
255 * 63 48 47 36 35 32 31 24 23 16 15 0
257 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
258 * 63 48 47 40 39 32 31 16 15 8 7 0
259 * +----------------------------------------------------------------+
260 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
261 * +----------------------------------------------------------------+
262 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
263 * +----------------------------------------------------------------+
264 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
266 * Extended Data Descriptor (DTYP=0x1)
267 * +----------------------------------------------------------------+
268 * 0 | Buffer Address [63:0] |
269 * +----------------------------------------------------------------+
270 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
271 * +----------------------------------------------------------------+
272 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
274 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
275 " [bi->dma ] leng ntw timestamp bi->skb "
276 "<-- Legacy format\n");
277 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
278 " [bi->dma ] leng ntw timestamp bi->skb "
279 "<-- Ext Context format\n");
280 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
281 " [bi->dma ] leng ntw timestamp bi->skb "
282 "<-- Ext Data format\n");
283 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
284 tx_desc = E1000_TX_DESC(*tx_ring, i);
285 buffer_info = &tx_ring->buffer_info[i];
286 u0 = (struct my_u0 *)tx_desc;
287 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
288 "%04X %3X %016llX %p",
289 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
290 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
291 (unsigned long long)le64_to_cpu(u0->a),
292 (unsigned long long)le64_to_cpu(u0->b),
293 (unsigned long long)buffer_info->dma,
294 buffer_info->length, buffer_info->next_to_watch,
295 (unsigned long long)buffer_info->time_stamp,
297 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
298 printk(KERN_CONT " NTC/U\n");
299 else if (i == tx_ring->next_to_use)
300 printk(KERN_CONT " NTU\n");
301 else if (i == tx_ring->next_to_clean)
302 printk(KERN_CONT " NTC\n");
304 printk(KERN_CONT "\n");
306 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
307 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
308 16, 1, phys_to_virt(buffer_info->dma),
309 buffer_info->length, true);
312 /* Print Rx Ring Summary */
314 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
315 printk(KERN_INFO "Queue [NTU] [NTC]\n");
316 printk(KERN_INFO " %5d %5X %5X\n", 0,
317 rx_ring->next_to_use, rx_ring->next_to_clean);
320 if (!netif_msg_rx_status(adapter))
323 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
324 switch (adapter->rx_ps_pages) {
328 /* [Extended] Packet Split Receive Descriptor Format
330 * +-----------------------------------------------------+
331 * 0 | Buffer Address 0 [63:0] |
332 * +-----------------------------------------------------+
333 * 8 | Buffer Address 1 [63:0] |
334 * +-----------------------------------------------------+
335 * 16 | Buffer Address 2 [63:0] |
336 * +-----------------------------------------------------+
337 * 24 | Buffer Address 3 [63:0] |
338 * +-----------------------------------------------------+
340 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
342 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
343 "[bi->skb] <-- Ext Pkt Split format\n");
344 /* [Extended] Receive Descriptor (Write-Back) Format
346 * 63 48 47 32 31 13 12 8 7 4 3 0
347 * +------------------------------------------------------+
348 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
349 * | Checksum | Ident | | Queue | | Type |
350 * +------------------------------------------------------+
351 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
352 * +------------------------------------------------------+
353 * 63 48 47 32 31 20 19 0
355 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
357 "[ l3 l2 l1 hs] [reserved ] ---------------- "
358 "[bi->skb] <-- Ext Rx Write-Back format\n");
359 for (i = 0; i < rx_ring->count; i++) {
360 buffer_info = &rx_ring->buffer_info[i];
361 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
362 u1 = (struct my_u1 *)rx_desc_ps;
364 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
365 if (staterr & E1000_RXD_STAT_DD) {
366 /* Descriptor Done */
367 printk(KERN_INFO "RWB[0x%03X] %016llX "
368 "%016llX %016llX %016llX "
369 "---------------- %p", i,
370 (unsigned long long)le64_to_cpu(u1->a),
371 (unsigned long long)le64_to_cpu(u1->b),
372 (unsigned long long)le64_to_cpu(u1->c),
373 (unsigned long long)le64_to_cpu(u1->d),
376 printk(KERN_INFO "R [0x%03X] %016llX "
377 "%016llX %016llX %016llX %016llX %p", i,
378 (unsigned long long)le64_to_cpu(u1->a),
379 (unsigned long long)le64_to_cpu(u1->b),
380 (unsigned long long)le64_to_cpu(u1->c),
381 (unsigned long long)le64_to_cpu(u1->d),
382 (unsigned long long)buffer_info->dma,
385 if (netif_msg_pktdata(adapter))
386 print_hex_dump(KERN_INFO, "",
387 DUMP_PREFIX_ADDRESS, 16, 1,
388 phys_to_virt(buffer_info->dma),
389 adapter->rx_ps_bsize0, true);
392 if (i == rx_ring->next_to_use)
393 printk(KERN_CONT " NTU\n");
394 else if (i == rx_ring->next_to_clean)
395 printk(KERN_CONT " NTC\n");
397 printk(KERN_CONT "\n");
402 /* Extended Receive Descriptor (Read) Format
404 * +-----------------------------------------------------+
405 * 0 | Buffer Address [63:0] |
406 * +-----------------------------------------------------+
408 * +-----------------------------------------------------+
410 printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
411 "[reserved 63:0 ] [bi->dma ] "
412 "[bi->skb] <-- Ext (Read) format\n");
413 /* Extended Receive Descriptor (Write-Back) Format
415 * 63 48 47 32 31 24 23 4 3 0
416 * +------------------------------------------------------+
418 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
419 * | Packet | IP | | | Type |
420 * | Checksum | Ident | | | |
421 * +------------------------------------------------------+
422 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
423 * +------------------------------------------------------+
424 * 63 48 47 32 31 20 19 0
426 printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
428 "[bi->skb] <-- Ext (Write-Back) format\n");
430 for (i = 0; i < rx_ring->count; i++) {
431 buffer_info = &rx_ring->buffer_info[i];
432 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
433 u1 = (struct my_u1 *)rx_desc;
434 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
435 if (staterr & E1000_RXD_STAT_DD) {
436 /* Descriptor Done */
437 printk(KERN_INFO "RWB[0x%03X] %016llX "
438 "%016llX ---------------- %p", i,
439 (unsigned long long)le64_to_cpu(u1->a),
440 (unsigned long long)le64_to_cpu(u1->b),
443 printk(KERN_INFO "R [0x%03X] %016llX "
444 "%016llX %016llX %p", i,
445 (unsigned long long)le64_to_cpu(u1->a),
446 (unsigned long long)le64_to_cpu(u1->b),
447 (unsigned long long)buffer_info->dma,
450 if (netif_msg_pktdata(adapter))
451 print_hex_dump(KERN_INFO, "",
452 DUMP_PREFIX_ADDRESS, 16,
456 adapter->rx_buffer_len,
460 if (i == rx_ring->next_to_use)
461 printk(KERN_CONT " NTU\n");
462 else if (i == rx_ring->next_to_clean)
463 printk(KERN_CONT " NTC\n");
465 printk(KERN_CONT "\n");
474 * e1000_desc_unused - calculate if we have unused descriptors
476 static int e1000_desc_unused(struct e1000_ring *ring)
478 if (ring->next_to_clean > ring->next_to_use)
479 return ring->next_to_clean - ring->next_to_use - 1;
481 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
485 * e1000_receive_skb - helper function to handle Rx indications
486 * @adapter: board private structure
487 * @status: descriptor status field as written by hardware
488 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
489 * @skb: pointer to sk_buff to be indicated to stack
491 static void e1000_receive_skb(struct e1000_adapter *adapter,
492 struct net_device *netdev, struct sk_buff *skb,
493 u8 status, __le16 vlan)
495 u16 tag = le16_to_cpu(vlan);
496 skb->protocol = eth_type_trans(skb, netdev);
498 if (status & E1000_RXD_STAT_VP)
499 __vlan_hwaccel_put_tag(skb, tag);
501 napi_gro_receive(&adapter->napi, skb);
505 * e1000_rx_checksum - Receive Checksum Offload
506 * @adapter: board private structure
507 * @status_err: receive descriptor status and error fields
508 * @csum: receive descriptor csum field
509 * @sk_buff: socket buffer with received data
511 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
512 u32 csum, struct sk_buff *skb)
514 u16 status = (u16)status_err;
515 u8 errors = (u8)(status_err >> 24);
517 skb_checksum_none_assert(skb);
519 /* Ignore Checksum bit is set */
520 if (status & E1000_RXD_STAT_IXSM)
522 /* TCP/UDP checksum error bit is set */
523 if (errors & E1000_RXD_ERR_TCPE) {
524 /* let the stack verify checksum errors */
525 adapter->hw_csum_err++;
529 /* TCP/UDP Checksum has not been calculated */
530 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
533 /* It must be a TCP or UDP packet with a valid checksum */
534 if (status & E1000_RXD_STAT_TCPCS) {
535 /* TCP checksum is good */
536 skb->ip_summed = CHECKSUM_UNNECESSARY;
539 * IP fragment with UDP payload
540 * Hardware complements the payload checksum, so we undo it
541 * and then put the value in host order for further stack use.
543 __sum16 sum = (__force __sum16)htons(csum);
544 skb->csum = csum_unfold(~sum);
545 skb->ip_summed = CHECKSUM_COMPLETE;
547 adapter->hw_csum_good++;
551 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
552 * @hw: pointer to the HW structure
553 * @tail: address of tail descriptor register
554 * @i: value to write to tail descriptor register
556 * When updating the tail register, the ME could be accessing Host CSR
557 * registers at the same time. Normally, this is handled in h/w by an
558 * arbiter but on some parts there is a bug that acknowledges Host accesses
559 * later than it should which could result in the descriptor register to
560 * have an incorrect value. Workaround this by checking the FWSM register
561 * which has bit 24 set while ME is accessing Host CSR registers, wait
562 * if it is set and try again a number of times.
564 static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
569 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
570 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
575 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
576 return E1000_ERR_SWFW_SYNC;
581 static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
583 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
584 struct e1000_hw *hw = &adapter->hw;
586 if (e1000e_update_tail_wa(hw, tail, i)) {
587 u32 rctl = er32(RCTL);
588 ew32(RCTL, rctl & ~E1000_RCTL_EN);
589 e_err("ME firmware caused invalid RDT - resetting\n");
590 schedule_work(&adapter->reset_task);
594 static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
596 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
597 struct e1000_hw *hw = &adapter->hw;
599 if (e1000e_update_tail_wa(hw, tail, i)) {
600 u32 tctl = er32(TCTL);
601 ew32(TCTL, tctl & ~E1000_TCTL_EN);
602 e_err("ME firmware caused invalid TDT - resetting\n");
603 schedule_work(&adapter->reset_task);
608 * e1000_alloc_rx_buffers - Replace used receive buffers
609 * @adapter: address of board private structure
611 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
612 int cleaned_count, gfp_t gfp)
614 struct net_device *netdev = adapter->netdev;
615 struct pci_dev *pdev = adapter->pdev;
616 struct e1000_ring *rx_ring = adapter->rx_ring;
617 union e1000_rx_desc_extended *rx_desc;
618 struct e1000_buffer *buffer_info;
621 unsigned int bufsz = adapter->rx_buffer_len;
623 i = rx_ring->next_to_use;
624 buffer_info = &rx_ring->buffer_info[i];
626 while (cleaned_count--) {
627 skb = buffer_info->skb;
633 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
635 /* Better luck next round */
636 adapter->alloc_rx_buff_failed++;
640 buffer_info->skb = skb;
642 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
643 adapter->rx_buffer_len,
645 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
646 dev_err(&pdev->dev, "Rx DMA map failed\n");
647 adapter->rx_dma_failed++;
651 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
652 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
654 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
656 * Force memory writes to complete before letting h/w
657 * know there are new descriptors to fetch. (Only
658 * applicable for weak-ordered memory model archs,
662 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
663 e1000e_update_rdt_wa(adapter, i);
665 writel(i, adapter->hw.hw_addr + rx_ring->tail);
668 if (i == rx_ring->count)
670 buffer_info = &rx_ring->buffer_info[i];
673 rx_ring->next_to_use = i;
677 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
678 * @adapter: address of board private structure
680 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
681 int cleaned_count, gfp_t gfp)
683 struct net_device *netdev = adapter->netdev;
684 struct pci_dev *pdev = adapter->pdev;
685 union e1000_rx_desc_packet_split *rx_desc;
686 struct e1000_ring *rx_ring = adapter->rx_ring;
687 struct e1000_buffer *buffer_info;
688 struct e1000_ps_page *ps_page;
692 i = rx_ring->next_to_use;
693 buffer_info = &rx_ring->buffer_info[i];
695 while (cleaned_count--) {
696 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
698 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
699 ps_page = &buffer_info->ps_pages[j];
700 if (j >= adapter->rx_ps_pages) {
701 /* all unused desc entries get hw null ptr */
702 rx_desc->read.buffer_addr[j + 1] =
706 if (!ps_page->page) {
707 ps_page->page = alloc_page(gfp);
708 if (!ps_page->page) {
709 adapter->alloc_rx_buff_failed++;
712 ps_page->dma = dma_map_page(&pdev->dev,
716 if (dma_mapping_error(&pdev->dev,
718 dev_err(&adapter->pdev->dev,
719 "Rx DMA page map failed\n");
720 adapter->rx_dma_failed++;
725 * Refresh the desc even if buffer_addrs
726 * didn't change because each write-back
729 rx_desc->read.buffer_addr[j + 1] =
730 cpu_to_le64(ps_page->dma);
733 skb = __netdev_alloc_skb_ip_align(netdev,
734 adapter->rx_ps_bsize0,
738 adapter->alloc_rx_buff_failed++;
742 buffer_info->skb = skb;
743 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
744 adapter->rx_ps_bsize0,
746 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
747 dev_err(&pdev->dev, "Rx DMA map failed\n");
748 adapter->rx_dma_failed++;
750 dev_kfree_skb_any(skb);
751 buffer_info->skb = NULL;
755 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
757 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
759 * Force memory writes to complete before letting h/w
760 * know there are new descriptors to fetch. (Only
761 * applicable for weak-ordered memory model archs,
765 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
766 e1000e_update_rdt_wa(adapter, i << 1);
769 adapter->hw.hw_addr + rx_ring->tail);
773 if (i == rx_ring->count)
775 buffer_info = &rx_ring->buffer_info[i];
779 rx_ring->next_to_use = i;
783 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
784 * @adapter: address of board private structure
785 * @cleaned_count: number of buffers to allocate this pass
788 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
789 int cleaned_count, gfp_t gfp)
791 struct net_device *netdev = adapter->netdev;
792 struct pci_dev *pdev = adapter->pdev;
793 union e1000_rx_desc_extended *rx_desc;
794 struct e1000_ring *rx_ring = adapter->rx_ring;
795 struct e1000_buffer *buffer_info;
798 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
800 i = rx_ring->next_to_use;
801 buffer_info = &rx_ring->buffer_info[i];
803 while (cleaned_count--) {
804 skb = buffer_info->skb;
810 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
811 if (unlikely(!skb)) {
812 /* Better luck next round */
813 adapter->alloc_rx_buff_failed++;
817 buffer_info->skb = skb;
819 /* allocate a new page if necessary */
820 if (!buffer_info->page) {
821 buffer_info->page = alloc_page(gfp);
822 if (unlikely(!buffer_info->page)) {
823 adapter->alloc_rx_buff_failed++;
828 if (!buffer_info->dma)
829 buffer_info->dma = dma_map_page(&pdev->dev,
830 buffer_info->page, 0,
834 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
835 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
837 if (unlikely(++i == rx_ring->count))
839 buffer_info = &rx_ring->buffer_info[i];
842 if (likely(rx_ring->next_to_use != i)) {
843 rx_ring->next_to_use = i;
844 if (unlikely(i-- == 0))
845 i = (rx_ring->count - 1);
847 /* Force memory writes to complete before letting h/w
848 * know there are new descriptors to fetch. (Only
849 * applicable for weak-ordered memory model archs,
852 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
853 e1000e_update_rdt_wa(adapter, i);
855 writel(i, adapter->hw.hw_addr + rx_ring->tail);
860 * e1000_clean_rx_irq - Send received data up the network stack; legacy
861 * @adapter: board private structure
863 * the return value indicates whether actual cleaning was done, there
864 * is no guarantee that everything was cleaned
866 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
867 int *work_done, int work_to_do)
869 struct net_device *netdev = adapter->netdev;
870 struct pci_dev *pdev = adapter->pdev;
871 struct e1000_hw *hw = &adapter->hw;
872 struct e1000_ring *rx_ring = adapter->rx_ring;
873 union e1000_rx_desc_extended *rx_desc, *next_rxd;
874 struct e1000_buffer *buffer_info, *next_buffer;
877 int cleaned_count = 0;
879 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
881 i = rx_ring->next_to_clean;
882 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
883 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
884 buffer_info = &rx_ring->buffer_info[i];
886 while (staterr & E1000_RXD_STAT_DD) {
889 if (*work_done >= work_to_do)
892 rmb(); /* read descriptor and rx_buffer_info after status DD */
894 skb = buffer_info->skb;
895 buffer_info->skb = NULL;
897 prefetch(skb->data - NET_IP_ALIGN);
900 if (i == rx_ring->count)
902 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
905 next_buffer = &rx_ring->buffer_info[i];
909 dma_unmap_single(&pdev->dev,
911 adapter->rx_buffer_len,
913 buffer_info->dma = 0;
915 length = le16_to_cpu(rx_desc->wb.upper.length);
918 * !EOP means multiple descriptors were used to store a single
919 * packet, if that's the case we need to toss it. In fact, we
920 * need to toss every packet with the EOP bit clear and the
921 * next frame that _does_ have the EOP bit set, as it is by
922 * definition only a frame fragment
924 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
925 adapter->flags2 |= FLAG2_IS_DISCARDING;
927 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
928 /* All receives must fit into a single buffer */
929 e_dbg("Receive packet consumed multiple buffers\n");
931 buffer_info->skb = skb;
932 if (staterr & E1000_RXD_STAT_EOP)
933 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
937 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
939 buffer_info->skb = skb;
943 /* adjust length to remove Ethernet CRC */
944 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
947 total_rx_bytes += length;
951 * code added for copybreak, this should improve
952 * performance for small packets with large amounts
953 * of reassembly being done in the stack
955 if (length < copybreak) {
956 struct sk_buff *new_skb =
957 netdev_alloc_skb_ip_align(netdev, length);
959 skb_copy_to_linear_data_offset(new_skb,
965 /* save the skb in buffer_info as good */
966 buffer_info->skb = skb;
969 /* else just continue with the old one */
971 /* end copybreak code */
972 skb_put(skb, length);
974 /* Receive Checksum Offload */
975 e1000_rx_checksum(adapter, staterr,
976 le16_to_cpu(rx_desc->wb.lower.hi_dword.
979 e1000_receive_skb(adapter, netdev, skb, staterr,
980 rx_desc->wb.upper.vlan);
983 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
985 /* return some buffers to hardware, one at a time is too slow */
986 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
987 adapter->alloc_rx_buf(adapter, cleaned_count,
992 /* use prefetched values */
994 buffer_info = next_buffer;
996 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
998 rx_ring->next_to_clean = i;
1000 cleaned_count = e1000_desc_unused(rx_ring);
1002 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
1004 adapter->total_rx_bytes += total_rx_bytes;
1005 adapter->total_rx_packets += total_rx_packets;
1009 static void e1000_put_txbuf(struct e1000_adapter *adapter,
1010 struct e1000_buffer *buffer_info)
1012 if (buffer_info->dma) {
1013 if (buffer_info->mapped_as_page)
1014 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1015 buffer_info->length, DMA_TO_DEVICE);
1017 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1018 buffer_info->length, DMA_TO_DEVICE);
1019 buffer_info->dma = 0;
1021 if (buffer_info->skb) {
1022 dev_kfree_skb_any(buffer_info->skb);
1023 buffer_info->skb = NULL;
1025 buffer_info->time_stamp = 0;
1028 static void e1000_print_hw_hang(struct work_struct *work)
1030 struct e1000_adapter *adapter = container_of(work,
1031 struct e1000_adapter,
1033 struct net_device *netdev = adapter->netdev;
1034 struct e1000_ring *tx_ring = adapter->tx_ring;
1035 unsigned int i = tx_ring->next_to_clean;
1036 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1037 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1038 struct e1000_hw *hw = &adapter->hw;
1039 u16 phy_status, phy_1000t_status, phy_ext_status;
1042 if (test_bit(__E1000_DOWN, &adapter->state))
1045 if (!adapter->tx_hang_recheck &&
1046 (adapter->flags2 & FLAG2_DMA_BURST)) {
1047 /* May be block on write-back, flush and detect again
1048 * flush pending descriptor writebacks to memory
1050 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1051 /* execute the writes immediately */
1053 adapter->tx_hang_recheck = true;
1056 /* Real hang detected */
1057 adapter->tx_hang_recheck = false;
1058 netif_stop_queue(netdev);
1060 e1e_rphy(hw, PHY_STATUS, &phy_status);
1061 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
1062 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
1064 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1066 /* detected Hardware unit hang */
1067 e_err("Detected Hardware Unit Hang:\n"
1070 " next_to_use <%x>\n"
1071 " next_to_clean <%x>\n"
1072 "buffer_info[next_to_clean]:\n"
1073 " time_stamp <%lx>\n"
1074 " next_to_watch <%x>\n"
1076 " next_to_watch.status <%x>\n"
1079 "PHY 1000BASE-T Status <%x>\n"
1080 "PHY Extended Status <%x>\n"
1081 "PCI Status <%x>\n",
1082 readl(adapter->hw.hw_addr + tx_ring->head),
1083 readl(adapter->hw.hw_addr + tx_ring->tail),
1084 tx_ring->next_to_use,
1085 tx_ring->next_to_clean,
1086 tx_ring->buffer_info[eop].time_stamp,
1089 eop_desc->upper.fields.status,
1098 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1099 * @adapter: board private structure
1101 * the return value indicates whether actual cleaning was done, there
1102 * is no guarantee that everything was cleaned
1104 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1106 struct net_device *netdev = adapter->netdev;
1107 struct e1000_hw *hw = &adapter->hw;
1108 struct e1000_ring *tx_ring = adapter->tx_ring;
1109 struct e1000_tx_desc *tx_desc, *eop_desc;
1110 struct e1000_buffer *buffer_info;
1111 unsigned int i, eop;
1112 unsigned int count = 0;
1113 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1115 i = tx_ring->next_to_clean;
1116 eop = tx_ring->buffer_info[i].next_to_watch;
1117 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1119 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1120 (count < tx_ring->count)) {
1121 bool cleaned = false;
1122 rmb(); /* read buffer_info after eop_desc */
1123 for (; !cleaned; count++) {
1124 tx_desc = E1000_TX_DESC(*tx_ring, i);
1125 buffer_info = &tx_ring->buffer_info[i];
1126 cleaned = (i == eop);
1129 total_tx_packets += buffer_info->segs;
1130 total_tx_bytes += buffer_info->bytecount;
1133 e1000_put_txbuf(adapter, buffer_info);
1134 tx_desc->upper.data = 0;
1137 if (i == tx_ring->count)
1141 if (i == tx_ring->next_to_use)
1143 eop = tx_ring->buffer_info[i].next_to_watch;
1144 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1147 tx_ring->next_to_clean = i;
1149 #define TX_WAKE_THRESHOLD 32
1150 if (count && netif_carrier_ok(netdev) &&
1151 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1152 /* Make sure that anybody stopping the queue after this
1153 * sees the new next_to_clean.
1157 if (netif_queue_stopped(netdev) &&
1158 !(test_bit(__E1000_DOWN, &adapter->state))) {
1159 netif_wake_queue(netdev);
1160 ++adapter->restart_queue;
1164 if (adapter->detect_tx_hung) {
1166 * Detect a transmit hang in hardware, this serializes the
1167 * check with the clearing of time_stamp and movement of i
1169 adapter->detect_tx_hung = 0;
1170 if (tx_ring->buffer_info[i].time_stamp &&
1171 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1172 + (adapter->tx_timeout_factor * HZ)) &&
1173 !(er32(STATUS) & E1000_STATUS_TXOFF))
1174 schedule_work(&adapter->print_hang_task);
1176 adapter->tx_hang_recheck = false;
1178 adapter->total_tx_bytes += total_tx_bytes;
1179 adapter->total_tx_packets += total_tx_packets;
1180 return count < tx_ring->count;
1184 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1185 * @adapter: board private structure
1187 * the return value indicates whether actual cleaning was done, there
1188 * is no guarantee that everything was cleaned
1190 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1191 int *work_done, int work_to_do)
1193 struct e1000_hw *hw = &adapter->hw;
1194 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1195 struct net_device *netdev = adapter->netdev;
1196 struct pci_dev *pdev = adapter->pdev;
1197 struct e1000_ring *rx_ring = adapter->rx_ring;
1198 struct e1000_buffer *buffer_info, *next_buffer;
1199 struct e1000_ps_page *ps_page;
1200 struct sk_buff *skb;
1202 u32 length, staterr;
1203 int cleaned_count = 0;
1205 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1207 i = rx_ring->next_to_clean;
1208 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1209 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1210 buffer_info = &rx_ring->buffer_info[i];
1212 while (staterr & E1000_RXD_STAT_DD) {
1213 if (*work_done >= work_to_do)
1216 skb = buffer_info->skb;
1217 rmb(); /* read descriptor and rx_buffer_info after status DD */
1219 /* in the packet split case this is header only */
1220 prefetch(skb->data - NET_IP_ALIGN);
1223 if (i == rx_ring->count)
1225 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1228 next_buffer = &rx_ring->buffer_info[i];
1232 dma_unmap_single(&pdev->dev, buffer_info->dma,
1233 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1234 buffer_info->dma = 0;
1236 /* see !EOP comment in other Rx routine */
1237 if (!(staterr & E1000_RXD_STAT_EOP))
1238 adapter->flags2 |= FLAG2_IS_DISCARDING;
1240 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1241 e_dbg("Packet Split buffers didn't pick up the full "
1243 dev_kfree_skb_irq(skb);
1244 if (staterr & E1000_RXD_STAT_EOP)
1245 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1249 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1250 dev_kfree_skb_irq(skb);
1254 length = le16_to_cpu(rx_desc->wb.middle.length0);
1257 e_dbg("Last part of the packet spanning multiple "
1259 dev_kfree_skb_irq(skb);
1264 skb_put(skb, length);
1268 * this looks ugly, but it seems compiler issues make it
1269 * more efficient than reusing j
1271 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1274 * page alloc/put takes too long and effects small packet
1275 * throughput, so unsplit small packets and save the alloc/put
1276 * only valid in softirq (napi) context to call kmap_*
1278 if (l1 && (l1 <= copybreak) &&
1279 ((length + l1) <= adapter->rx_ps_bsize0)) {
1282 ps_page = &buffer_info->ps_pages[0];
1285 * there is no documentation about how to call
1286 * kmap_atomic, so we can't hold the mapping
1289 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1290 PAGE_SIZE, DMA_FROM_DEVICE);
1291 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1292 memcpy(skb_tail_pointer(skb), vaddr, l1);
1293 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1294 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1295 PAGE_SIZE, DMA_FROM_DEVICE);
1297 /* remove the CRC */
1298 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1306 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1307 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1311 ps_page = &buffer_info->ps_pages[j];
1312 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1315 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1316 ps_page->page = NULL;
1318 skb->data_len += length;
1319 skb->truesize += PAGE_SIZE;
1322 /* strip the ethernet crc, problem is we're using pages now so
1323 * this whole operation can get a little cpu intensive
1325 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1326 pskb_trim(skb, skb->len - 4);
1329 total_rx_bytes += skb->len;
1332 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1333 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1335 if (rx_desc->wb.upper.header_status &
1336 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1337 adapter->rx_hdr_split++;
1339 e1000_receive_skb(adapter, netdev, skb,
1340 staterr, rx_desc->wb.middle.vlan);
1343 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1344 buffer_info->skb = NULL;
1346 /* return some buffers to hardware, one at a time is too slow */
1347 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1348 adapter->alloc_rx_buf(adapter, cleaned_count,
1353 /* use prefetched values */
1355 buffer_info = next_buffer;
1357 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1359 rx_ring->next_to_clean = i;
1361 cleaned_count = e1000_desc_unused(rx_ring);
1363 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
1365 adapter->total_rx_bytes += total_rx_bytes;
1366 adapter->total_rx_packets += total_rx_packets;
1371 * e1000_consume_page - helper function
1373 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1378 skb->data_len += length;
1379 skb->truesize += PAGE_SIZE;
1383 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1384 * @adapter: board private structure
1386 * the return value indicates whether actual cleaning was done, there
1387 * is no guarantee that everything was cleaned
1390 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1391 int *work_done, int work_to_do)
1393 struct net_device *netdev = adapter->netdev;
1394 struct pci_dev *pdev = adapter->pdev;
1395 struct e1000_ring *rx_ring = adapter->rx_ring;
1396 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1397 struct e1000_buffer *buffer_info, *next_buffer;
1398 u32 length, staterr;
1400 int cleaned_count = 0;
1401 bool cleaned = false;
1402 unsigned int total_rx_bytes=0, total_rx_packets=0;
1404 i = rx_ring->next_to_clean;
1405 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1406 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1407 buffer_info = &rx_ring->buffer_info[i];
1409 while (staterr & E1000_RXD_STAT_DD) {
1410 struct sk_buff *skb;
1412 if (*work_done >= work_to_do)
1415 rmb(); /* read descriptor and rx_buffer_info after status DD */
1417 skb = buffer_info->skb;
1418 buffer_info->skb = NULL;
1421 if (i == rx_ring->count)
1423 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1426 next_buffer = &rx_ring->buffer_info[i];
1430 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1432 buffer_info->dma = 0;
1434 length = le16_to_cpu(rx_desc->wb.upper.length);
1436 /* errors is only valid for DD + EOP descriptors */
1437 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1438 (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
1439 /* recycle both page and skb */
1440 buffer_info->skb = skb;
1441 /* an error means any chain goes out the window too */
1442 if (rx_ring->rx_skb_top)
1443 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1444 rx_ring->rx_skb_top = NULL;
1448 #define rxtop (rx_ring->rx_skb_top)
1449 if (!(staterr & E1000_RXD_STAT_EOP)) {
1450 /* this descriptor is only the beginning (or middle) */
1452 /* this is the beginning of a chain */
1454 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1457 /* this is the middle of a chain */
1458 skb_fill_page_desc(rxtop,
1459 skb_shinfo(rxtop)->nr_frags,
1460 buffer_info->page, 0, length);
1461 /* re-use the skb, only consumed the page */
1462 buffer_info->skb = skb;
1464 e1000_consume_page(buffer_info, rxtop, length);
1468 /* end of the chain */
1469 skb_fill_page_desc(rxtop,
1470 skb_shinfo(rxtop)->nr_frags,
1471 buffer_info->page, 0, length);
1472 /* re-use the current skb, we only consumed the
1474 buffer_info->skb = skb;
1477 e1000_consume_page(buffer_info, skb, length);
1479 /* no chain, got EOP, this buf is the packet
1480 * copybreak to save the put_page/alloc_page */
1481 if (length <= copybreak &&
1482 skb_tailroom(skb) >= length) {
1484 vaddr = kmap_atomic(buffer_info->page,
1485 KM_SKB_DATA_SOFTIRQ);
1486 memcpy(skb_tail_pointer(skb), vaddr,
1488 kunmap_atomic(vaddr,
1489 KM_SKB_DATA_SOFTIRQ);
1490 /* re-use the page, so don't erase
1491 * buffer_info->page */
1492 skb_put(skb, length);
1494 skb_fill_page_desc(skb, 0,
1495 buffer_info->page, 0,
1497 e1000_consume_page(buffer_info, skb,
1503 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1504 e1000_rx_checksum(adapter, staterr,
1505 le16_to_cpu(rx_desc->wb.lower.hi_dword.
1506 csum_ip.csum), skb);
1508 /* probably a little skewed due to removing CRC */
1509 total_rx_bytes += skb->len;
1512 /* eth type trans needs skb->data to point to something */
1513 if (!pskb_may_pull(skb, ETH_HLEN)) {
1514 e_err("pskb_may_pull failed.\n");
1515 dev_kfree_skb_irq(skb);
1519 e1000_receive_skb(adapter, netdev, skb, staterr,
1520 rx_desc->wb.upper.vlan);
1523 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1525 /* return some buffers to hardware, one at a time is too slow */
1526 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1527 adapter->alloc_rx_buf(adapter, cleaned_count,
1532 /* use prefetched values */
1534 buffer_info = next_buffer;
1536 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1538 rx_ring->next_to_clean = i;
1540 cleaned_count = e1000_desc_unused(rx_ring);
1542 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
1544 adapter->total_rx_bytes += total_rx_bytes;
1545 adapter->total_rx_packets += total_rx_packets;
1550 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1551 * @adapter: board private structure
1553 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1555 struct e1000_ring *rx_ring = adapter->rx_ring;
1556 struct e1000_buffer *buffer_info;
1557 struct e1000_ps_page *ps_page;
1558 struct pci_dev *pdev = adapter->pdev;
1561 /* Free all the Rx ring sk_buffs */
1562 for (i = 0; i < rx_ring->count; i++) {
1563 buffer_info = &rx_ring->buffer_info[i];
1564 if (buffer_info->dma) {
1565 if (adapter->clean_rx == e1000_clean_rx_irq)
1566 dma_unmap_single(&pdev->dev, buffer_info->dma,
1567 adapter->rx_buffer_len,
1569 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1570 dma_unmap_page(&pdev->dev, buffer_info->dma,
1573 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1574 dma_unmap_single(&pdev->dev, buffer_info->dma,
1575 adapter->rx_ps_bsize0,
1577 buffer_info->dma = 0;
1580 if (buffer_info->page) {
1581 put_page(buffer_info->page);
1582 buffer_info->page = NULL;
1585 if (buffer_info->skb) {
1586 dev_kfree_skb(buffer_info->skb);
1587 buffer_info->skb = NULL;
1590 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1591 ps_page = &buffer_info->ps_pages[j];
1594 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1597 put_page(ps_page->page);
1598 ps_page->page = NULL;
1602 /* there also may be some cached data from a chained receive */
1603 if (rx_ring->rx_skb_top) {
1604 dev_kfree_skb(rx_ring->rx_skb_top);
1605 rx_ring->rx_skb_top = NULL;
1608 /* Zero out the descriptor ring */
1609 memset(rx_ring->desc, 0, rx_ring->size);
1611 rx_ring->next_to_clean = 0;
1612 rx_ring->next_to_use = 0;
1613 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1615 writel(0, adapter->hw.hw_addr + rx_ring->head);
1616 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1619 static void e1000e_downshift_workaround(struct work_struct *work)
1621 struct e1000_adapter *adapter = container_of(work,
1622 struct e1000_adapter, downshift_task);
1624 if (test_bit(__E1000_DOWN, &adapter->state))
1627 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1631 * e1000_intr_msi - Interrupt Handler
1632 * @irq: interrupt number
1633 * @data: pointer to a network interface device structure
1635 static irqreturn_t e1000_intr_msi(int irq, void *data)
1637 struct net_device *netdev = data;
1638 struct e1000_adapter *adapter = netdev_priv(netdev);
1639 struct e1000_hw *hw = &adapter->hw;
1640 u32 icr = er32(ICR);
1643 * read ICR disables interrupts using IAM
1646 if (icr & E1000_ICR_LSC) {
1647 hw->mac.get_link_status = 1;
1649 * ICH8 workaround-- Call gig speed drop workaround on cable
1650 * disconnect (LSC) before accessing any PHY registers
1652 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1653 (!(er32(STATUS) & E1000_STATUS_LU)))
1654 schedule_work(&adapter->downshift_task);
1657 * 80003ES2LAN workaround-- For packet buffer work-around on
1658 * link down event; disable receives here in the ISR and reset
1659 * adapter in watchdog
1661 if (netif_carrier_ok(netdev) &&
1662 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1663 /* disable receives */
1664 u32 rctl = er32(RCTL);
1665 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1666 adapter->flags |= FLAG_RX_RESTART_NOW;
1668 /* guard against interrupt when we're going down */
1669 if (!test_bit(__E1000_DOWN, &adapter->state))
1670 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1673 if (napi_schedule_prep(&adapter->napi)) {
1674 adapter->total_tx_bytes = 0;
1675 adapter->total_tx_packets = 0;
1676 adapter->total_rx_bytes = 0;
1677 adapter->total_rx_packets = 0;
1678 __napi_schedule(&adapter->napi);
1685 * e1000_intr - Interrupt Handler
1686 * @irq: interrupt number
1687 * @data: pointer to a network interface device structure
1689 static irqreturn_t e1000_intr(int irq, void *data)
1691 struct net_device *netdev = data;
1692 struct e1000_adapter *adapter = netdev_priv(netdev);
1693 struct e1000_hw *hw = &adapter->hw;
1694 u32 rctl, icr = er32(ICR);
1696 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1697 return IRQ_NONE; /* Not our interrupt */
1700 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1701 * not set, then the adapter didn't send an interrupt
1703 if (!(icr & E1000_ICR_INT_ASSERTED))
1707 * Interrupt Auto-Mask...upon reading ICR,
1708 * interrupts are masked. No need for the
1712 if (icr & E1000_ICR_LSC) {
1713 hw->mac.get_link_status = 1;
1715 * ICH8 workaround-- Call gig speed drop workaround on cable
1716 * disconnect (LSC) before accessing any PHY registers
1718 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1719 (!(er32(STATUS) & E1000_STATUS_LU)))
1720 schedule_work(&adapter->downshift_task);
1723 * 80003ES2LAN workaround--
1724 * For packet buffer work-around on link down event;
1725 * disable receives here in the ISR and
1726 * reset adapter in watchdog
1728 if (netif_carrier_ok(netdev) &&
1729 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1730 /* disable receives */
1732 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1733 adapter->flags |= FLAG_RX_RESTART_NOW;
1735 /* guard against interrupt when we're going down */
1736 if (!test_bit(__E1000_DOWN, &adapter->state))
1737 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1740 if (napi_schedule_prep(&adapter->napi)) {
1741 adapter->total_tx_bytes = 0;
1742 adapter->total_tx_packets = 0;
1743 adapter->total_rx_bytes = 0;
1744 adapter->total_rx_packets = 0;
1745 __napi_schedule(&adapter->napi);
1751 static irqreturn_t e1000_msix_other(int irq, void *data)
1753 struct net_device *netdev = data;
1754 struct e1000_adapter *adapter = netdev_priv(netdev);
1755 struct e1000_hw *hw = &adapter->hw;
1756 u32 icr = er32(ICR);
1758 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1759 if (!test_bit(__E1000_DOWN, &adapter->state))
1760 ew32(IMS, E1000_IMS_OTHER);
1764 if (icr & adapter->eiac_mask)
1765 ew32(ICS, (icr & adapter->eiac_mask));
1767 if (icr & E1000_ICR_OTHER) {
1768 if (!(icr & E1000_ICR_LSC))
1769 goto no_link_interrupt;
1770 hw->mac.get_link_status = 1;
1771 /* guard against interrupt when we're going down */
1772 if (!test_bit(__E1000_DOWN, &adapter->state))
1773 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1777 if (!test_bit(__E1000_DOWN, &adapter->state))
1778 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1784 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1786 struct net_device *netdev = data;
1787 struct e1000_adapter *adapter = netdev_priv(netdev);
1788 struct e1000_hw *hw = &adapter->hw;
1789 struct e1000_ring *tx_ring = adapter->tx_ring;
1792 adapter->total_tx_bytes = 0;
1793 adapter->total_tx_packets = 0;
1795 if (!e1000_clean_tx_irq(adapter))
1796 /* Ring was not completely cleaned, so fire another interrupt */
1797 ew32(ICS, tx_ring->ims_val);
1802 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1804 struct net_device *netdev = data;
1805 struct e1000_adapter *adapter = netdev_priv(netdev);
1807 /* Write the ITR value calculated at the end of the
1808 * previous interrupt.
1810 if (adapter->rx_ring->set_itr) {
1811 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1812 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1813 adapter->rx_ring->set_itr = 0;
1816 if (napi_schedule_prep(&adapter->napi)) {
1817 adapter->total_rx_bytes = 0;
1818 adapter->total_rx_packets = 0;
1819 __napi_schedule(&adapter->napi);
1825 * e1000_configure_msix - Configure MSI-X hardware
1827 * e1000_configure_msix sets up the hardware to properly
1828 * generate MSI-X interrupts.
1830 static void e1000_configure_msix(struct e1000_adapter *adapter)
1832 struct e1000_hw *hw = &adapter->hw;
1833 struct e1000_ring *rx_ring = adapter->rx_ring;
1834 struct e1000_ring *tx_ring = adapter->tx_ring;
1836 u32 ctrl_ext, ivar = 0;
1838 adapter->eiac_mask = 0;
1840 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1841 if (hw->mac.type == e1000_82574) {
1842 u32 rfctl = er32(RFCTL);
1843 rfctl |= E1000_RFCTL_ACK_DIS;
1847 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1848 /* Configure Rx vector */
1849 rx_ring->ims_val = E1000_IMS_RXQ0;
1850 adapter->eiac_mask |= rx_ring->ims_val;
1851 if (rx_ring->itr_val)
1852 writel(1000000000 / (rx_ring->itr_val * 256),
1853 hw->hw_addr + rx_ring->itr_register);
1855 writel(1, hw->hw_addr + rx_ring->itr_register);
1856 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1858 /* Configure Tx vector */
1859 tx_ring->ims_val = E1000_IMS_TXQ0;
1861 if (tx_ring->itr_val)
1862 writel(1000000000 / (tx_ring->itr_val * 256),
1863 hw->hw_addr + tx_ring->itr_register);
1865 writel(1, hw->hw_addr + tx_ring->itr_register);
1866 adapter->eiac_mask |= tx_ring->ims_val;
1867 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1869 /* set vector for Other Causes, e.g. link changes */
1871 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1872 if (rx_ring->itr_val)
1873 writel(1000000000 / (rx_ring->itr_val * 256),
1874 hw->hw_addr + E1000_EITR_82574(vector));
1876 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1878 /* Cause Tx interrupts on every write back */
1883 /* enable MSI-X PBA support */
1884 ctrl_ext = er32(CTRL_EXT);
1885 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1887 /* Auto-Mask Other interrupts upon ICR read */
1888 #define E1000_EIAC_MASK_82574 0x01F00000
1889 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1890 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1891 ew32(CTRL_EXT, ctrl_ext);
1895 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1897 if (adapter->msix_entries) {
1898 pci_disable_msix(adapter->pdev);
1899 kfree(adapter->msix_entries);
1900 adapter->msix_entries = NULL;
1901 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1902 pci_disable_msi(adapter->pdev);
1903 adapter->flags &= ~FLAG_MSI_ENABLED;
1908 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1910 * Attempt to configure interrupts using the best available
1911 * capabilities of the hardware and kernel.
1913 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1918 switch (adapter->int_mode) {
1919 case E1000E_INT_MODE_MSIX:
1920 if (adapter->flags & FLAG_HAS_MSIX) {
1921 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1922 adapter->msix_entries = kcalloc(adapter->num_vectors,
1923 sizeof(struct msix_entry),
1925 if (adapter->msix_entries) {
1926 for (i = 0; i < adapter->num_vectors; i++)
1927 adapter->msix_entries[i].entry = i;
1929 err = pci_enable_msix(adapter->pdev,
1930 adapter->msix_entries,
1931 adapter->num_vectors);
1935 /* MSI-X failed, so fall through and try MSI */
1936 e_err("Failed to initialize MSI-X interrupts. "
1937 "Falling back to MSI interrupts.\n");
1938 e1000e_reset_interrupt_capability(adapter);
1940 adapter->int_mode = E1000E_INT_MODE_MSI;
1942 case E1000E_INT_MODE_MSI:
1943 if (!pci_enable_msi(adapter->pdev)) {
1944 adapter->flags |= FLAG_MSI_ENABLED;
1946 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1947 e_err("Failed to initialize MSI interrupts. Falling "
1948 "back to legacy interrupts.\n");
1951 case E1000E_INT_MODE_LEGACY:
1952 /* Don't do anything; this is the system default */
1956 /* store the number of vectors being used */
1957 adapter->num_vectors = 1;
1961 * e1000_request_msix - Initialize MSI-X interrupts
1963 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1966 static int e1000_request_msix(struct e1000_adapter *adapter)
1968 struct net_device *netdev = adapter->netdev;
1969 int err = 0, vector = 0;
1971 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1972 snprintf(adapter->rx_ring->name,
1973 sizeof(adapter->rx_ring->name) - 1,
1974 "%s-rx-0", netdev->name);
1976 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1977 err = request_irq(adapter->msix_entries[vector].vector,
1978 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1982 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1983 adapter->rx_ring->itr_val = adapter->itr;
1986 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1987 snprintf(adapter->tx_ring->name,
1988 sizeof(adapter->tx_ring->name) - 1,
1989 "%s-tx-0", netdev->name);
1991 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1992 err = request_irq(adapter->msix_entries[vector].vector,
1993 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1997 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1998 adapter->tx_ring->itr_val = adapter->itr;
2001 err = request_irq(adapter->msix_entries[vector].vector,
2002 e1000_msix_other, 0, netdev->name, netdev);
2006 e1000_configure_msix(adapter);
2013 * e1000_request_irq - initialize interrupts
2015 * Attempts to configure interrupts using the best available
2016 * capabilities of the hardware and kernel.
2018 static int e1000_request_irq(struct e1000_adapter *adapter)
2020 struct net_device *netdev = adapter->netdev;
2023 if (adapter->msix_entries) {
2024 err = e1000_request_msix(adapter);
2027 /* fall back to MSI */
2028 e1000e_reset_interrupt_capability(adapter);
2029 adapter->int_mode = E1000E_INT_MODE_MSI;
2030 e1000e_set_interrupt_capability(adapter);
2032 if (adapter->flags & FLAG_MSI_ENABLED) {
2033 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2034 netdev->name, netdev);
2038 /* fall back to legacy interrupt */
2039 e1000e_reset_interrupt_capability(adapter);
2040 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2043 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2044 netdev->name, netdev);
2046 e_err("Unable to allocate interrupt, Error: %d\n", err);
2051 static void e1000_free_irq(struct e1000_adapter *adapter)
2053 struct net_device *netdev = adapter->netdev;
2055 if (adapter->msix_entries) {
2058 free_irq(adapter->msix_entries[vector].vector, netdev);
2061 free_irq(adapter->msix_entries[vector].vector, netdev);
2064 /* Other Causes interrupt vector */
2065 free_irq(adapter->msix_entries[vector].vector, netdev);
2069 free_irq(adapter->pdev->irq, netdev);
2073 * e1000_irq_disable - Mask off interrupt generation on the NIC
2075 static void e1000_irq_disable(struct e1000_adapter *adapter)
2077 struct e1000_hw *hw = &adapter->hw;
2080 if (adapter->msix_entries)
2081 ew32(EIAC_82574, 0);
2084 if (adapter->msix_entries) {
2086 for (i = 0; i < adapter->num_vectors; i++)
2087 synchronize_irq(adapter->msix_entries[i].vector);
2089 synchronize_irq(adapter->pdev->irq);
2094 * e1000_irq_enable - Enable default interrupt generation settings
2096 static void e1000_irq_enable(struct e1000_adapter *adapter)
2098 struct e1000_hw *hw = &adapter->hw;
2100 if (adapter->msix_entries) {
2101 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2102 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2104 ew32(IMS, IMS_ENABLE_MASK);
2110 * e1000e_get_hw_control - get control of the h/w from f/w
2111 * @adapter: address of board private structure
2113 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2114 * For ASF and Pass Through versions of f/w this means that
2115 * the driver is loaded. For AMT version (only with 82573)
2116 * of the f/w this means that the network i/f is open.
2118 void e1000e_get_hw_control(struct e1000_adapter *adapter)
2120 struct e1000_hw *hw = &adapter->hw;
2124 /* Let firmware know the driver has taken over */
2125 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2127 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2128 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2129 ctrl_ext = er32(CTRL_EXT);
2130 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2135 * e1000e_release_hw_control - release control of the h/w to f/w
2136 * @adapter: address of board private structure
2138 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2139 * For ASF and Pass Through versions of f/w this means that the
2140 * driver is no longer loaded. For AMT version (only with 82573) i
2141 * of the f/w this means that the network i/f is closed.
2144 void e1000e_release_hw_control(struct e1000_adapter *adapter)
2146 struct e1000_hw *hw = &adapter->hw;
2150 /* Let firmware taken over control of h/w */
2151 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2153 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2154 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2155 ctrl_ext = er32(CTRL_EXT);
2156 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2161 * @e1000_alloc_ring - allocate memory for a ring structure
2163 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2164 struct e1000_ring *ring)
2166 struct pci_dev *pdev = adapter->pdev;
2168 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2177 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2178 * @adapter: board private structure
2180 * Return 0 on success, negative on failure
2182 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2184 struct e1000_ring *tx_ring = adapter->tx_ring;
2185 int err = -ENOMEM, size;
2187 size = sizeof(struct e1000_buffer) * tx_ring->count;
2188 tx_ring->buffer_info = vzalloc(size);
2189 if (!tx_ring->buffer_info)
2192 /* round up to nearest 4K */
2193 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2194 tx_ring->size = ALIGN(tx_ring->size, 4096);
2196 err = e1000_alloc_ring_dma(adapter, tx_ring);
2200 tx_ring->next_to_use = 0;
2201 tx_ring->next_to_clean = 0;
2205 vfree(tx_ring->buffer_info);
2206 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2211 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2212 * @adapter: board private structure
2214 * Returns 0 on success, negative on failure
2216 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2218 struct e1000_ring *rx_ring = adapter->rx_ring;
2219 struct e1000_buffer *buffer_info;
2220 int i, size, desc_len, err = -ENOMEM;
2222 size = sizeof(struct e1000_buffer) * rx_ring->count;
2223 rx_ring->buffer_info = vzalloc(size);
2224 if (!rx_ring->buffer_info)
2227 for (i = 0; i < rx_ring->count; i++) {
2228 buffer_info = &rx_ring->buffer_info[i];
2229 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2230 sizeof(struct e1000_ps_page),
2232 if (!buffer_info->ps_pages)
2236 desc_len = sizeof(union e1000_rx_desc_packet_split);
2238 /* Round up to nearest 4K */
2239 rx_ring->size = rx_ring->count * desc_len;
2240 rx_ring->size = ALIGN(rx_ring->size, 4096);
2242 err = e1000_alloc_ring_dma(adapter, rx_ring);
2246 rx_ring->next_to_clean = 0;
2247 rx_ring->next_to_use = 0;
2248 rx_ring->rx_skb_top = NULL;
2253 for (i = 0; i < rx_ring->count; i++) {
2254 buffer_info = &rx_ring->buffer_info[i];
2255 kfree(buffer_info->ps_pages);
2258 vfree(rx_ring->buffer_info);
2259 e_err("Unable to allocate memory for the receive descriptor ring\n");
2264 * e1000_clean_tx_ring - Free Tx Buffers
2265 * @adapter: board private structure
2267 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2269 struct e1000_ring *tx_ring = adapter->tx_ring;
2270 struct e1000_buffer *buffer_info;
2274 for (i = 0; i < tx_ring->count; i++) {
2275 buffer_info = &tx_ring->buffer_info[i];
2276 e1000_put_txbuf(adapter, buffer_info);
2279 size = sizeof(struct e1000_buffer) * tx_ring->count;
2280 memset(tx_ring->buffer_info, 0, size);
2282 memset(tx_ring->desc, 0, tx_ring->size);
2284 tx_ring->next_to_use = 0;
2285 tx_ring->next_to_clean = 0;
2287 writel(0, adapter->hw.hw_addr + tx_ring->head);
2288 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2292 * e1000e_free_tx_resources - Free Tx Resources per Queue
2293 * @adapter: board private structure
2295 * Free all transmit software resources
2297 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2299 struct pci_dev *pdev = adapter->pdev;
2300 struct e1000_ring *tx_ring = adapter->tx_ring;
2302 e1000_clean_tx_ring(adapter);
2304 vfree(tx_ring->buffer_info);
2305 tx_ring->buffer_info = NULL;
2307 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2309 tx_ring->desc = NULL;
2313 * e1000e_free_rx_resources - Free Rx Resources
2314 * @adapter: board private structure
2316 * Free all receive software resources
2319 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2321 struct pci_dev *pdev = adapter->pdev;
2322 struct e1000_ring *rx_ring = adapter->rx_ring;
2325 e1000_clean_rx_ring(adapter);
2327 for (i = 0; i < rx_ring->count; i++)
2328 kfree(rx_ring->buffer_info[i].ps_pages);
2330 vfree(rx_ring->buffer_info);
2331 rx_ring->buffer_info = NULL;
2333 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2335 rx_ring->desc = NULL;
2339 * e1000_update_itr - update the dynamic ITR value based on statistics
2340 * @adapter: pointer to adapter
2341 * @itr_setting: current adapter->itr
2342 * @packets: the number of packets during this measurement interval
2343 * @bytes: the number of bytes during this measurement interval
2345 * Stores a new ITR value based on packets and byte
2346 * counts during the last interrupt. The advantage of per interrupt
2347 * computation is faster updates and more accurate ITR for the current
2348 * traffic pattern. Constants in this function were computed
2349 * based on theoretical maximum wire speed and thresholds were set based
2350 * on testing data as well as attempting to minimize response time
2351 * while increasing bulk throughput. This functionality is controlled
2352 * by the InterruptThrottleRate module parameter.
2354 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2355 u16 itr_setting, int packets,
2358 unsigned int retval = itr_setting;
2361 goto update_itr_done;
2363 switch (itr_setting) {
2364 case lowest_latency:
2365 /* handle TSO and jumbo frames */
2366 if (bytes/packets > 8000)
2367 retval = bulk_latency;
2368 else if ((packets < 5) && (bytes > 512))
2369 retval = low_latency;
2371 case low_latency: /* 50 usec aka 20000 ints/s */
2372 if (bytes > 10000) {
2373 /* this if handles the TSO accounting */
2374 if (bytes/packets > 8000)
2375 retval = bulk_latency;
2376 else if ((packets < 10) || ((bytes/packets) > 1200))
2377 retval = bulk_latency;
2378 else if ((packets > 35))
2379 retval = lowest_latency;
2380 } else if (bytes/packets > 2000) {
2381 retval = bulk_latency;
2382 } else if (packets <= 2 && bytes < 512) {
2383 retval = lowest_latency;
2386 case bulk_latency: /* 250 usec aka 4000 ints/s */
2387 if (bytes > 25000) {
2389 retval = low_latency;
2390 } else if (bytes < 6000) {
2391 retval = low_latency;
2400 static void e1000_set_itr(struct e1000_adapter *adapter)
2402 struct e1000_hw *hw = &adapter->hw;
2404 u32 new_itr = adapter->itr;
2406 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2407 if (adapter->link_speed != SPEED_1000) {
2413 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2418 adapter->tx_itr = e1000_update_itr(adapter,
2420 adapter->total_tx_packets,
2421 adapter->total_tx_bytes);
2422 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2423 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2424 adapter->tx_itr = low_latency;
2426 adapter->rx_itr = e1000_update_itr(adapter,
2428 adapter->total_rx_packets,
2429 adapter->total_rx_bytes);
2430 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2431 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2432 adapter->rx_itr = low_latency;
2434 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2436 switch (current_itr) {
2437 /* counts and packets in update_itr are dependent on these numbers */
2438 case lowest_latency:
2442 new_itr = 20000; /* aka hwitr = ~200 */
2452 if (new_itr != adapter->itr) {
2454 * this attempts to bias the interrupt rate towards Bulk
2455 * by adding intermediate steps when interrupt rate is
2458 new_itr = new_itr > adapter->itr ?
2459 min(adapter->itr + (new_itr >> 2), new_itr) :
2461 adapter->itr = new_itr;
2462 adapter->rx_ring->itr_val = new_itr;
2463 if (adapter->msix_entries)
2464 adapter->rx_ring->set_itr = 1;
2467 ew32(ITR, 1000000000 / (new_itr * 256));
2474 * e1000_alloc_queues - Allocate memory for all rings
2475 * @adapter: board private structure to initialize
2477 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2479 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2480 if (!adapter->tx_ring)
2483 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2484 if (!adapter->rx_ring)
2489 e_err("Unable to allocate memory for queues\n");
2490 kfree(adapter->rx_ring);
2491 kfree(adapter->tx_ring);
2496 * e1000_clean - NAPI Rx polling callback
2497 * @napi: struct associated with this polling callback
2498 * @budget: amount of packets driver is allowed to process this poll
2500 static int e1000_clean(struct napi_struct *napi, int budget)
2502 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2503 struct e1000_hw *hw = &adapter->hw;
2504 struct net_device *poll_dev = adapter->netdev;
2505 int tx_cleaned = 1, work_done = 0;
2507 adapter = netdev_priv(poll_dev);
2509 if (adapter->msix_entries &&
2510 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2513 tx_cleaned = e1000_clean_tx_irq(adapter);
2516 adapter->clean_rx(adapter, &work_done, budget);
2521 /* If budget not fully consumed, exit the polling mode */
2522 if (work_done < budget) {
2523 if (adapter->itr_setting & 3)
2524 e1000_set_itr(adapter);
2525 napi_complete(napi);
2526 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2527 if (adapter->msix_entries)
2528 ew32(IMS, adapter->rx_ring->ims_val);
2530 e1000_irq_enable(adapter);
2537 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2539 struct e1000_adapter *adapter = netdev_priv(netdev);
2540 struct e1000_hw *hw = &adapter->hw;
2543 /* don't update vlan cookie if already programmed */
2544 if ((adapter->hw.mng_cookie.status &
2545 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2546 (vid == adapter->mng_vlan_id))
2549 /* add VID to filter table */
2550 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2551 index = (vid >> 5) & 0x7F;
2552 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2553 vfta |= (1 << (vid & 0x1F));
2554 hw->mac.ops.write_vfta(hw, index, vfta);
2557 set_bit(vid, adapter->active_vlans);
2560 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2562 struct e1000_adapter *adapter = netdev_priv(netdev);
2563 struct e1000_hw *hw = &adapter->hw;
2566 if ((adapter->hw.mng_cookie.status &
2567 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2568 (vid == adapter->mng_vlan_id)) {
2569 /* release control to f/w */
2570 e1000e_release_hw_control(adapter);
2574 /* remove VID from filter table */
2575 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2576 index = (vid >> 5) & 0x7F;
2577 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2578 vfta &= ~(1 << (vid & 0x1F));
2579 hw->mac.ops.write_vfta(hw, index, vfta);
2582 clear_bit(vid, adapter->active_vlans);
2586 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2587 * @adapter: board private structure to initialize
2589 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2591 struct net_device *netdev = adapter->netdev;
2592 struct e1000_hw *hw = &adapter->hw;
2595 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2596 /* disable VLAN receive filtering */
2598 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2601 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2602 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2603 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2609 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2610 * @adapter: board private structure to initialize
2612 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2614 struct e1000_hw *hw = &adapter->hw;
2617 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2618 /* enable VLAN receive filtering */
2620 rctl |= E1000_RCTL_VFE;
2621 rctl &= ~E1000_RCTL_CFIEN;
2627 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2628 * @adapter: board private structure to initialize
2630 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2632 struct e1000_hw *hw = &adapter->hw;
2635 /* disable VLAN tag insert/strip */
2637 ctrl &= ~E1000_CTRL_VME;
2642 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2643 * @adapter: board private structure to initialize
2645 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2647 struct e1000_hw *hw = &adapter->hw;
2650 /* enable VLAN tag insert/strip */
2652 ctrl |= E1000_CTRL_VME;
2656 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2658 struct net_device *netdev = adapter->netdev;
2659 u16 vid = adapter->hw.mng_cookie.vlan_id;
2660 u16 old_vid = adapter->mng_vlan_id;
2662 if (adapter->hw.mng_cookie.status &
2663 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2664 e1000_vlan_rx_add_vid(netdev, vid);
2665 adapter->mng_vlan_id = vid;
2668 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2669 e1000_vlan_rx_kill_vid(netdev, old_vid);
2672 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2676 e1000_vlan_rx_add_vid(adapter->netdev, 0);
2678 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2679 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2682 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2684 struct e1000_hw *hw = &adapter->hw;
2685 u32 manc, manc2h, mdef, i, j;
2687 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2693 * enable receiving management packets to the host. this will probably
2694 * generate destination unreachable messages from the host OS, but
2695 * the packets will be handled on SMBUS
2697 manc |= E1000_MANC_EN_MNG2HOST;
2698 manc2h = er32(MANC2H);
2700 switch (hw->mac.type) {
2702 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2707 * Check if IPMI pass-through decision filter already exists;
2710 for (i = 0, j = 0; i < 8; i++) {
2711 mdef = er32(MDEF(i));
2713 /* Ignore filters with anything other than IPMI ports */
2714 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2717 /* Enable this decision filter in MANC2H */
2724 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2727 /* Create new decision filter in an empty filter */
2728 for (i = 0, j = 0; i < 8; i++)
2729 if (er32(MDEF(i)) == 0) {
2730 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2731 E1000_MDEF_PORT_664));
2738 e_warn("Unable to create IPMI pass-through filter\n");
2742 ew32(MANC2H, manc2h);
2747 * e1000_configure_tx - Configure Transmit Unit after Reset
2748 * @adapter: board private structure
2750 * Configure the Tx unit of the MAC after a reset.
2752 static void e1000_configure_tx(struct e1000_adapter *adapter)
2754 struct e1000_hw *hw = &adapter->hw;
2755 struct e1000_ring *tx_ring = adapter->tx_ring;
2757 u32 tdlen, tctl, tipg, tarc;
2760 /* Setup the HW Tx Head and Tail descriptor pointers */
2761 tdba = tx_ring->dma;
2762 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2763 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2764 ew32(TDBAH, (tdba >> 32));
2768 tx_ring->head = E1000_TDH;
2769 tx_ring->tail = E1000_TDT;
2771 /* Set the default values for the Tx Inter Packet Gap timer */
2772 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2773 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2774 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2776 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2777 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2779 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2780 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2783 /* Set the Tx Interrupt Delay register */
2784 ew32(TIDV, adapter->tx_int_delay);
2785 /* Tx irq moderation */
2786 ew32(TADV, adapter->tx_abs_int_delay);
2788 if (adapter->flags2 & FLAG2_DMA_BURST) {
2789 u32 txdctl = er32(TXDCTL(0));
2790 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2791 E1000_TXDCTL_WTHRESH);
2793 * set up some performance related parameters to encourage the
2794 * hardware to use the bus more efficiently in bursts, depends
2795 * on the tx_int_delay to be enabled,
2796 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
2797 * hthresh = 1 ==> prefetch when one or more available
2798 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2799 * BEWARE: this seems to work but should be considered first if
2800 * there are Tx hangs or other Tx related bugs
2802 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2803 ew32(TXDCTL(0), txdctl);
2804 /* erratum work around: set txdctl the same for both queues */
2805 ew32(TXDCTL(1), txdctl);
2808 /* Program the Transmit Control Register */
2810 tctl &= ~E1000_TCTL_CT;
2811 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2812 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2814 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2815 tarc = er32(TARC(0));
2817 * set the speed mode bit, we'll clear it if we're not at
2818 * gigabit link later
2820 #define SPEED_MODE_BIT (1 << 21)
2821 tarc |= SPEED_MODE_BIT;
2822 ew32(TARC(0), tarc);
2825 /* errata: program both queues to unweighted RR */
2826 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2827 tarc = er32(TARC(0));
2829 ew32(TARC(0), tarc);
2830 tarc = er32(TARC(1));
2832 ew32(TARC(1), tarc);
2835 /* Setup Transmit Descriptor Settings for eop descriptor */
2836 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2838 /* only set IDE if we are delaying interrupts using the timers */
2839 if (adapter->tx_int_delay)
2840 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2842 /* enable Report Status bit */
2843 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2847 e1000e_config_collision_dist(hw);
2851 * e1000_setup_rctl - configure the receive control registers
2852 * @adapter: Board private structure
2854 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2855 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2856 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2858 struct e1000_hw *hw = &adapter->hw;
2862 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2863 if (hw->mac.type == e1000_pch2lan) {
2866 if (adapter->netdev->mtu > ETH_DATA_LEN)
2867 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2869 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2872 e_dbg("failed to enable jumbo frame workaround mode\n");
2875 /* Program MC offset vector base */
2877 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2878 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2879 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2880 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2882 /* Do not Store bad packets */
2883 rctl &= ~E1000_RCTL_SBP;
2885 /* Enable Long Packet receive */
2886 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2887 rctl &= ~E1000_RCTL_LPE;
2889 rctl |= E1000_RCTL_LPE;
2891 /* Some systems expect that the CRC is included in SMBUS traffic. The
2892 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2893 * host memory when this is enabled
2895 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2896 rctl |= E1000_RCTL_SECRC;
2898 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2899 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2902 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2904 phy_data |= (1 << 2);
2905 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2907 e1e_rphy(hw, 22, &phy_data);
2909 phy_data |= (1 << 14);
2910 e1e_wphy(hw, 0x10, 0x2823);
2911 e1e_wphy(hw, 0x11, 0x0003);
2912 e1e_wphy(hw, 22, phy_data);
2915 /* Setup buffer sizes */
2916 rctl &= ~E1000_RCTL_SZ_4096;
2917 rctl |= E1000_RCTL_BSEX;
2918 switch (adapter->rx_buffer_len) {
2921 rctl |= E1000_RCTL_SZ_2048;
2922 rctl &= ~E1000_RCTL_BSEX;
2925 rctl |= E1000_RCTL_SZ_4096;
2928 rctl |= E1000_RCTL_SZ_8192;
2931 rctl |= E1000_RCTL_SZ_16384;
2935 /* Enable Extended Status in all Receive Descriptors */
2936 rfctl = er32(RFCTL);
2937 rfctl |= E1000_RFCTL_EXTEN;
2940 * 82571 and greater support packet-split where the protocol
2941 * header is placed in skb->data and the packet data is
2942 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2943 * In the case of a non-split, skb->data is linearly filled,
2944 * followed by the page buffers. Therefore, skb->data is
2945 * sized to hold the largest protocol header.
2947 * allocations using alloc_page take too long for regular MTU
2948 * so only enable packet split for jumbo frames
2950 * Using pages when the page size is greater than 16k wastes
2951 * a lot of memory, since we allocate 3 pages at all times
2954 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2955 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
2956 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2957 adapter->rx_ps_pages = pages;
2959 adapter->rx_ps_pages = 0;
2961 if (adapter->rx_ps_pages) {
2965 * disable packet split support for IPv6 extension headers,
2966 * because some malformed IPv6 headers can hang the Rx
2968 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2969 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2971 /* Enable Packet split descriptors */
2972 rctl |= E1000_RCTL_DTYP_PS;
2974 psrctl |= adapter->rx_ps_bsize0 >>
2975 E1000_PSRCTL_BSIZE0_SHIFT;
2977 switch (adapter->rx_ps_pages) {
2979 psrctl |= PAGE_SIZE <<
2980 E1000_PSRCTL_BSIZE3_SHIFT;
2982 psrctl |= PAGE_SIZE <<
2983 E1000_PSRCTL_BSIZE2_SHIFT;
2985 psrctl |= PAGE_SIZE >>
2986 E1000_PSRCTL_BSIZE1_SHIFT;
2990 ew32(PSRCTL, psrctl);
2995 /* just started the receive unit, no need to restart */
2996 adapter->flags &= ~FLAG_RX_RESTART_NOW;
3000 * e1000_configure_rx - Configure Receive Unit after Reset
3001 * @adapter: board private structure
3003 * Configure the Rx unit of the MAC after a reset.
3005 static void e1000_configure_rx(struct e1000_adapter *adapter)
3007 struct e1000_hw *hw = &adapter->hw;
3008 struct e1000_ring *rx_ring = adapter->rx_ring;
3010 u32 rdlen, rctl, rxcsum, ctrl_ext;
3012 if (adapter->rx_ps_pages) {
3013 /* this is a 32 byte descriptor */
3014 rdlen = rx_ring->count *
3015 sizeof(union e1000_rx_desc_packet_split);
3016 adapter->clean_rx = e1000_clean_rx_irq_ps;
3017 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3018 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3019 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3020 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3021 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3023 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3024 adapter->clean_rx = e1000_clean_rx_irq;
3025 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3028 /* disable receives while setting up the descriptors */
3030 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3031 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3033 usleep_range(10000, 20000);
3035 if (adapter->flags2 & FLAG2_DMA_BURST) {
3037 * set the writeback threshold (only takes effect if the RDTR
3038 * is set). set GRAN=1 and write back up to 0x4 worth, and
3039 * enable prefetching of 0x20 Rx descriptors
3045 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3046 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3049 * override the delay timers for enabling bursting, only if
3050 * the value was not set by the user via module options
3052 if (adapter->rx_int_delay == DEFAULT_RDTR)
3053 adapter->rx_int_delay = BURST_RDTR;
3054 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3055 adapter->rx_abs_int_delay = BURST_RADV;
3058 /* set the Receive Delay Timer Register */
3059 ew32(RDTR, adapter->rx_int_delay);
3061 /* irq moderation */
3062 ew32(RADV, adapter->rx_abs_int_delay);
3063 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3064 ew32(ITR, 1000000000 / (adapter->itr * 256));
3066 ctrl_ext = er32(CTRL_EXT);
3067 /* Auto-Mask interrupts upon ICR access */
3068 ctrl_ext |= E1000_CTRL_EXT_IAME;
3069 ew32(IAM, 0xffffffff);
3070 ew32(CTRL_EXT, ctrl_ext);
3074 * Setup the HW Rx Head and Tail Descriptor Pointers and
3075 * the Base and Length of the Rx Descriptor Ring
3077 rdba = rx_ring->dma;
3078 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
3079 ew32(RDBAH, (rdba >> 32));
3083 rx_ring->head = E1000_RDH;
3084 rx_ring->tail = E1000_RDT;
3086 /* Enable Receive Checksum Offload for TCP and UDP */
3087 rxcsum = er32(RXCSUM);
3088 if (adapter->netdev->features & NETIF_F_RXCSUM) {
3089 rxcsum |= E1000_RXCSUM_TUOFL;
3092 * IPv4 payload checksum for UDP fragments must be
3093 * used in conjunction with packet-split.
3095 if (adapter->rx_ps_pages)
3096 rxcsum |= E1000_RXCSUM_IPPCSE;
3098 rxcsum &= ~E1000_RXCSUM_TUOFL;
3099 /* no need to clear IPPCSE as it defaults to 0 */
3101 ew32(RXCSUM, rxcsum);
3104 * Enable early receives on supported devices, only takes effect when
3105 * packet size is equal or larger than the specified value (in 8 byte
3106 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
3108 if ((adapter->flags & FLAG_HAS_ERT) ||
3109 (adapter->hw.mac.type == e1000_pch2lan)) {
3110 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3111 u32 rxdctl = er32(RXDCTL(0));
3112 ew32(RXDCTL(0), rxdctl | 0x3);
3113 if (adapter->flags & FLAG_HAS_ERT)
3114 ew32(ERT, E1000_ERT_2048 | (1 << 13));
3116 * With jumbo frames and early-receive enabled,
3117 * excessive C-state transition latencies result in
3118 * dropped transactions.
3120 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3122 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3123 PM_QOS_DEFAULT_VALUE);
3127 /* Enable Receives */
3132 * e1000_update_mc_addr_list - Update Multicast addresses
3133 * @hw: pointer to the HW structure
3134 * @mc_addr_list: array of multicast addresses to program
3135 * @mc_addr_count: number of multicast addresses to program
3137 * Updates the Multicast Table Array.
3138 * The caller must have a packed mc_addr_list of multicast addresses.
3140 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
3143 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
3147 * e1000_set_multi - Multicast and Promiscuous mode set
3148 * @netdev: network interface device structure
3150 * The set_multi entry point is called whenever the multicast address
3151 * list or the network interface flags are updated. This routine is
3152 * responsible for configuring the hardware for proper multicast,
3153 * promiscuous mode, and all-multi behavior.
3155 static void e1000_set_multi(struct net_device *netdev)
3157 struct e1000_adapter *adapter = netdev_priv(netdev);
3158 struct e1000_hw *hw = &adapter->hw;
3159 struct netdev_hw_addr *ha;
3163 /* Check for Promiscuous and All Multicast modes */
3167 if (netdev->flags & IFF_PROMISC) {
3168 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3169 rctl &= ~E1000_RCTL_VFE;
3170 /* Do not hardware filter VLANs in promisc mode */
3171 e1000e_vlan_filter_disable(adapter);
3173 if (netdev->flags & IFF_ALLMULTI) {
3174 rctl |= E1000_RCTL_MPE;
3175 rctl &= ~E1000_RCTL_UPE;
3177 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3179 e1000e_vlan_filter_enable(adapter);
3184 if (!netdev_mc_empty(netdev)) {
3187 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3191 /* prepare a packed array of only addresses. */
3192 netdev_for_each_mc_addr(ha, netdev)
3193 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3195 e1000_update_mc_addr_list(hw, mta_list, i);
3199 * if we're called from probe, we might not have
3200 * anything to do here, so clear out the list
3202 e1000_update_mc_addr_list(hw, NULL, 0);
3205 if (netdev->features & NETIF_F_HW_VLAN_RX)
3206 e1000e_vlan_strip_enable(adapter);
3208 e1000e_vlan_strip_disable(adapter);
3212 * e1000_configure - configure the hardware for Rx and Tx
3213 * @adapter: private board structure
3215 static void e1000_configure(struct e1000_adapter *adapter)
3217 e1000_set_multi(adapter->netdev);
3219 e1000_restore_vlan(adapter);
3220 e1000_init_manageability_pt(adapter);
3222 e1000_configure_tx(adapter);
3223 e1000_setup_rctl(adapter);
3224 e1000_configure_rx(adapter);
3225 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
3230 * e1000e_power_up_phy - restore link in case the phy was powered down
3231 * @adapter: address of board private structure
3233 * The phy may be powered down to save power and turn off link when the
3234 * driver is unloaded and wake on lan is not enabled (among others)
3235 * *** this routine MUST be followed by a call to e1000e_reset ***
3237 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3239 if (adapter->hw.phy.ops.power_up)
3240 adapter->hw.phy.ops.power_up(&adapter->hw);
3242 adapter->hw.mac.ops.setup_link(&adapter->hw);
3246 * e1000_power_down_phy - Power down the PHY
3248 * Power down the PHY so no link is implied when interface is down.
3249 * The PHY cannot be powered down if management or WoL is active.
3251 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3253 /* WoL is enabled */
3257 if (adapter->hw.phy.ops.power_down)
3258 adapter->hw.phy.ops.power_down(&adapter->hw);
3262 * e1000e_reset - bring the hardware into a known good state
3264 * This function boots the hardware and enables some settings that
3265 * require a configuration cycle of the hardware - those cannot be
3266 * set/changed during runtime. After reset the device needs to be
3267 * properly configured for Rx, Tx etc.
3269 void e1000e_reset(struct e1000_adapter *adapter)
3271 struct e1000_mac_info *mac = &adapter->hw.mac;
3272 struct e1000_fc_info *fc = &adapter->hw.fc;
3273 struct e1000_hw *hw = &adapter->hw;
3274 u32 tx_space, min_tx_space, min_rx_space;
3275 u32 pba = adapter->pba;
3278 /* reset Packet Buffer Allocation to default */
3281 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3283 * To maintain wire speed transmits, the Tx FIFO should be
3284 * large enough to accommodate two full transmit packets,
3285 * rounded up to the next 1KB and expressed in KB. Likewise,
3286 * the Rx FIFO should be large enough to accommodate at least
3287 * one full receive packet and is similarly rounded up and
3291 /* upper 16 bits has Tx packet buffer allocation size in KB */
3292 tx_space = pba >> 16;
3293 /* lower 16 bits has Rx packet buffer allocation size in KB */
3296 * the Tx fifo also stores 16 bytes of information about the Tx
3297 * but don't include ethernet FCS because hardware appends it
3299 min_tx_space = (adapter->max_frame_size +
3300 sizeof(struct e1000_tx_desc) -
3302 min_tx_space = ALIGN(min_tx_space, 1024);
3303 min_tx_space >>= 10;
3304 /* software strips receive CRC, so leave room for it */
3305 min_rx_space = adapter->max_frame_size;
3306 min_rx_space = ALIGN(min_rx_space, 1024);
3307 min_rx_space >>= 10;
3310 * If current Tx allocation is less than the min Tx FIFO size,
3311 * and the min Tx FIFO size is less than the current Rx FIFO
3312 * allocation, take space away from current Rx allocation
3314 if ((tx_space < min_tx_space) &&
3315 ((min_tx_space - tx_space) < pba)) {
3316 pba -= min_tx_space - tx_space;
3319 * if short on Rx space, Rx wins and must trump Tx
3320 * adjustment or use Early Receive if available
3322 if ((pba < min_rx_space) &&
3323 (!(adapter->flags & FLAG_HAS_ERT)))
3324 /* ERT enabled in e1000_configure_rx */
3332 * flow control settings
3334 * The high water mark must be low enough to fit one full frame
3335 * (or the size used for early receive) above it in the Rx FIFO.
3336 * Set it to the lower of:
3337 * - 90% of the Rx FIFO size, and
3338 * - the full Rx FIFO size minus the early receive size (for parts
3339 * with ERT support assuming ERT set to E1000_ERT_2048), or
3340 * - the full Rx FIFO size minus one full frame
3342 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3343 fc->pause_time = 0xFFFF;
3345 fc->pause_time = E1000_FC_PAUSE_TIME;
3347 fc->current_mode = fc->requested_mode;
3349 switch (hw->mac.type) {
3351 if ((adapter->flags & FLAG_HAS_ERT) &&
3352 (adapter->netdev->mtu > ETH_DATA_LEN))
3353 hwm = min(((pba << 10) * 9 / 10),
3354 ((pba << 10) - (E1000_ERT_2048 << 3)));
3356 hwm = min(((pba << 10) * 9 / 10),
3357 ((pba << 10) - adapter->max_frame_size));
3359 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3360 fc->low_water = fc->high_water - 8;
3364 * Workaround PCH LOM adapter hangs with certain network
3365 * loads. If hangs persist, try disabling Tx flow control.
3367 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3368 fc->high_water = 0x3500;
3369 fc->low_water = 0x1500;
3371 fc->high_water = 0x5000;
3372 fc->low_water = 0x3000;
3374 fc->refresh_time = 0x1000;
3377 fc->high_water = 0x05C20;
3378 fc->low_water = 0x05048;
3379 fc->pause_time = 0x0650;
3380 fc->refresh_time = 0x0400;
3381 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3389 * Alignment of Tx data is on an arbitrary byte boundary with the
3390 * maximum size per Tx descriptor limited only to the transmit
3391 * allocation of the packet buffer minus 96 bytes with an upper
3392 * limit of 24KB due to receive synchronization limitations.
3394 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3398 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3399 * fit in receive buffer and early-receive not supported.
3401 if (adapter->itr_setting & 0x3) {
3402 if (((adapter->max_frame_size * 2) > (pba << 10)) &&
3403 !(adapter->flags & FLAG_HAS_ERT)) {
3404 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3405 dev_info(&adapter->pdev->dev,
3406 "Interrupt Throttle Rate turned off\n");
3407 adapter->flags2 |= FLAG2_DISABLE_AIM;
3410 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3411 dev_info(&adapter->pdev->dev,
3412 "Interrupt Throttle Rate turned on\n");
3413 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3414 adapter->itr = 20000;
3415 ew32(ITR, 1000000000 / (adapter->itr * 256));
3419 /* Allow time for pending master requests to run */
3420 mac->ops.reset_hw(hw);
3423 * For parts with AMT enabled, let the firmware know
3424 * that the network interface is in control
3426 if (adapter->flags & FLAG_HAS_AMT)
3427 e1000e_get_hw_control(adapter);
3431 if (mac->ops.init_hw(hw))
3432 e_err("Hardware Error\n");
3434 e1000_update_mng_vlan(adapter);
3436 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3437 ew32(VET, ETH_P_8021Q);
3439 e1000e_reset_adaptive(hw);
3441 if (!netif_running(adapter->netdev) &&
3442 !test_bit(__E1000_TESTING, &adapter->state)) {
3443 e1000_power_down_phy(adapter);
3447 e1000_get_phy_info(hw);
3449 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3450 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3453 * speed up time to link by disabling smart power down, ignore
3454 * the return value of this function because there is nothing
3455 * different we would do if it failed
3457 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3458 phy_data &= ~IGP02E1000_PM_SPD;
3459 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3463 int e1000e_up(struct e1000_adapter *adapter)
3465 struct e1000_hw *hw = &adapter->hw;
3467 /* hardware has been reset, we need to reload some things */
3468 e1000_configure(adapter);
3470 clear_bit(__E1000_DOWN, &adapter->state);
3472 napi_enable(&adapter->napi);
3473 if (adapter->msix_entries)
3474 e1000_configure_msix(adapter);
3475 e1000_irq_enable(adapter);
3477 netif_start_queue(adapter->netdev);
3479 /* fire a link change interrupt to start the watchdog */
3480 if (adapter->msix_entries)
3481 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3483 ew32(ICS, E1000_ICS_LSC);
3488 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3490 struct e1000_hw *hw = &adapter->hw;
3492 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3495 /* flush pending descriptor writebacks to memory */
3496 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3497 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3499 /* execute the writes immediately */
3503 static void e1000e_update_stats(struct e1000_adapter *adapter);
3505 void e1000e_down(struct e1000_adapter *adapter)
3507 struct net_device *netdev = adapter->netdev;
3508 struct e1000_hw *hw = &adapter->hw;
3512 * signal that we're down so the interrupt handler does not
3513 * reschedule our watchdog timer
3515 set_bit(__E1000_DOWN, &adapter->state);
3517 /* disable receives in the hardware */
3519 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3520 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3521 /* flush and sleep below */
3523 netif_stop_queue(netdev);
3525 /* disable transmits in the hardware */
3527 tctl &= ~E1000_TCTL_EN;
3530 /* flush both disables and wait for them to finish */
3532 usleep_range(10000, 20000);
3534 napi_disable(&adapter->napi);
3535 e1000_irq_disable(adapter);
3537 del_timer_sync(&adapter->watchdog_timer);
3538 del_timer_sync(&adapter->phy_info_timer);
3540 netif_carrier_off(netdev);
3542 spin_lock(&adapter->stats64_lock);
3543 e1000e_update_stats(adapter);
3544 spin_unlock(&adapter->stats64_lock);
3546 e1000e_flush_descriptors(adapter);
3547 e1000_clean_tx_ring(adapter);
3548 e1000_clean_rx_ring(adapter);
3550 adapter->link_speed = 0;
3551 adapter->link_duplex = 0;
3553 if (!pci_channel_offline(adapter->pdev))
3554 e1000e_reset(adapter);
3557 * TODO: for power management, we could drop the link and
3558 * pci_disable_device here.
3562 void e1000e_reinit_locked(struct e1000_adapter *adapter)
3565 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3566 usleep_range(1000, 2000);
3567 e1000e_down(adapter);
3569 clear_bit(__E1000_RESETTING, &adapter->state);
3573 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3574 * @adapter: board private structure to initialize
3576 * e1000_sw_init initializes the Adapter private data structure.
3577 * Fields are initialized based on PCI device information and
3578 * OS network device settings (MTU size).
3580 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3582 struct net_device *netdev = adapter->netdev;
3584 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3585 adapter->rx_ps_bsize0 = 128;
3586 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3587 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3589 spin_lock_init(&adapter->stats64_lock);
3591 e1000e_set_interrupt_capability(adapter);
3593 if (e1000_alloc_queues(adapter))
3596 /* Explicitly disable IRQ since the NIC can be in any state. */
3597 e1000_irq_disable(adapter);
3599 set_bit(__E1000_DOWN, &adapter->state);
3604 * e1000_intr_msi_test - Interrupt Handler
3605 * @irq: interrupt number
3606 * @data: pointer to a network interface device structure
3608 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3610 struct net_device *netdev = data;
3611 struct e1000_adapter *adapter = netdev_priv(netdev);
3612 struct e1000_hw *hw = &adapter->hw;
3613 u32 icr = er32(ICR);
3615 e_dbg("icr is %08X\n", icr);
3616 if (icr & E1000_ICR_RXSEQ) {
3617 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3625 * e1000_test_msi_interrupt - Returns 0 for successful test
3626 * @adapter: board private struct
3628 * code flow taken from tg3.c
3630 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3632 struct net_device *netdev = adapter->netdev;
3633 struct e1000_hw *hw = &adapter->hw;
3636 /* poll_enable hasn't been called yet, so don't need disable */
3637 /* clear any pending events */
3640 /* free the real vector and request a test handler */
3641 e1000_free_irq(adapter);
3642 e1000e_reset_interrupt_capability(adapter);
3644 /* Assume that the test fails, if it succeeds then the test
3645 * MSI irq handler will unset this flag */
3646 adapter->flags |= FLAG_MSI_TEST_FAILED;
3648 err = pci_enable_msi(adapter->pdev);
3650 goto msi_test_failed;
3652 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3653 netdev->name, netdev);
3655 pci_disable_msi(adapter->pdev);
3656 goto msi_test_failed;
3661 e1000_irq_enable(adapter);
3663 /* fire an unusual interrupt on the test handler */
3664 ew32(ICS, E1000_ICS_RXSEQ);
3668 e1000_irq_disable(adapter);
3672 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3673 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3674 e_info("MSI interrupt test failed, using legacy interrupt.\n");
3676 e_dbg("MSI interrupt test succeeded!\n");
3678 free_irq(adapter->pdev->irq, netdev);
3679 pci_disable_msi(adapter->pdev);
3682 e1000e_set_interrupt_capability(adapter);
3683 return e1000_request_irq(adapter);
3687 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3688 * @adapter: board private struct
3690 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3692 static int e1000_test_msi(struct e1000_adapter *adapter)
3697 if (!(adapter->flags & FLAG_MSI_ENABLED))
3700 /* disable SERR in case the MSI write causes a master abort */
3701 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3702 if (pci_cmd & PCI_COMMAND_SERR)
3703 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3704 pci_cmd & ~PCI_COMMAND_SERR);
3706 err = e1000_test_msi_interrupt(adapter);
3708 /* re-enable SERR */
3709 if (pci_cmd & PCI_COMMAND_SERR) {
3710 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3711 pci_cmd |= PCI_COMMAND_SERR;
3712 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3719 * e1000_open - Called when a network interface is made active
3720 * @netdev: network interface device structure
3722 * Returns 0 on success, negative value on failure
3724 * The open entry point is called when a network interface is made
3725 * active by the system (IFF_UP). At this point all resources needed
3726 * for transmit and receive operations are allocated, the interrupt
3727 * handler is registered with the OS, the watchdog timer is started,
3728 * and the stack is notified that the interface is ready.
3730 static int e1000_open(struct net_device *netdev)
3732 struct e1000_adapter *adapter = netdev_priv(netdev);
3733 struct e1000_hw *hw = &adapter->hw;
3734 struct pci_dev *pdev = adapter->pdev;
3737 /* disallow open during test */
3738 if (test_bit(__E1000_TESTING, &adapter->state))
3741 pm_runtime_get_sync(&pdev->dev);
3743 netif_carrier_off(netdev);
3745 /* allocate transmit descriptors */
3746 err = e1000e_setup_tx_resources(adapter);
3750 /* allocate receive descriptors */
3751 err = e1000e_setup_rx_resources(adapter);
3756 * If AMT is enabled, let the firmware know that the network
3757 * interface is now open and reset the part to a known state.
3759 if (adapter->flags & FLAG_HAS_AMT) {
3760 e1000e_get_hw_control(adapter);
3761 e1000e_reset(adapter);
3764 e1000e_power_up_phy(adapter);
3766 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3767 if ((adapter->hw.mng_cookie.status &
3768 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3769 e1000_update_mng_vlan(adapter);
3771 /* DMA latency requirement to workaround early-receive/jumbo issue */
3772 if ((adapter->flags & FLAG_HAS_ERT) ||
3773 (adapter->hw.mac.type == e1000_pch2lan))
3774 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3775 PM_QOS_CPU_DMA_LATENCY,
3776 PM_QOS_DEFAULT_VALUE);
3779 * before we allocate an interrupt, we must be ready to handle it.
3780 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3781 * as soon as we call pci_request_irq, so we have to setup our
3782 * clean_rx handler before we do so.
3784 e1000_configure(adapter);
3786 err = e1000_request_irq(adapter);
3791 * Work around PCIe errata with MSI interrupts causing some chipsets to
3792 * ignore e1000e MSI messages, which means we need to test our MSI
3795 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3796 err = e1000_test_msi(adapter);
3798 e_err("Interrupt allocation failed\n");
3803 /* From here on the code is the same as e1000e_up() */
3804 clear_bit(__E1000_DOWN, &adapter->state);
3806 napi_enable(&adapter->napi);
3808 e1000_irq_enable(adapter);
3810 adapter->tx_hang_recheck = false;
3811 netif_start_queue(netdev);
3813 adapter->idle_check = true;
3814 pm_runtime_put(&pdev->dev);
3816 /* fire a link status change interrupt to start the watchdog */
3817 if (adapter->msix_entries)
3818 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3820 ew32(ICS, E1000_ICS_LSC);
3825 e1000e_release_hw_control(adapter);
3826 e1000_power_down_phy(adapter);
3827 e1000e_free_rx_resources(adapter);
3829 e1000e_free_tx_resources(adapter);
3831 e1000e_reset(adapter);
3832 pm_runtime_put_sync(&pdev->dev);
3838 * e1000_close - Disables a network interface
3839 * @netdev: network interface device structure
3841 * Returns 0, this is not allowed to fail
3843 * The close entry point is called when an interface is de-activated
3844 * by the OS. The hardware is still under the drivers control, but
3845 * needs to be disabled. A global MAC reset is issued to stop the
3846 * hardware, and all transmit and receive resources are freed.
3848 static int e1000_close(struct net_device *netdev)
3850 struct e1000_adapter *adapter = netdev_priv(netdev);
3851 struct pci_dev *pdev = adapter->pdev;
3853 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3855 pm_runtime_get_sync(&pdev->dev);
3857 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3858 e1000e_down(adapter);
3859 e1000_free_irq(adapter);
3861 e1000_power_down_phy(adapter);
3863 e1000e_free_tx_resources(adapter);
3864 e1000e_free_rx_resources(adapter);
3867 * kill manageability vlan ID if supported, but not if a vlan with
3868 * the same ID is registered on the host OS (let 8021q kill it)
3870 if (adapter->hw.mng_cookie.status &
3871 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
3872 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3875 * If AMT is enabled, let the firmware know that the network
3876 * interface is now closed
3878 if ((adapter->flags & FLAG_HAS_AMT) &&
3879 !test_bit(__E1000_TESTING, &adapter->state))
3880 e1000e_release_hw_control(adapter);
3882 if ((adapter->flags & FLAG_HAS_ERT) ||
3883 (adapter->hw.mac.type == e1000_pch2lan))
3884 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3886 pm_runtime_put_sync(&pdev->dev);
3891 * e1000_set_mac - Change the Ethernet Address of the NIC
3892 * @netdev: network interface device structure
3893 * @p: pointer to an address structure
3895 * Returns 0 on success, negative on failure
3897 static int e1000_set_mac(struct net_device *netdev, void *p)
3899 struct e1000_adapter *adapter = netdev_priv(netdev);
3900 struct sockaddr *addr = p;
3902 if (!is_valid_ether_addr(addr->sa_data))
3903 return -EADDRNOTAVAIL;
3905 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3906 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3908 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3910 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3911 /* activate the work around */
3912 e1000e_set_laa_state_82571(&adapter->hw, 1);
3915 * Hold a copy of the LAA in RAR[14] This is done so that
3916 * between the time RAR[0] gets clobbered and the time it
3917 * gets fixed (in e1000_watchdog), the actual LAA is in one
3918 * of the RARs and no incoming packets directed to this port
3919 * are dropped. Eventually the LAA will be in RAR[0] and
3922 e1000e_rar_set(&adapter->hw,
3923 adapter->hw.mac.addr,
3924 adapter->hw.mac.rar_entry_count - 1);
3931 * e1000e_update_phy_task - work thread to update phy
3932 * @work: pointer to our work struct
3934 * this worker thread exists because we must acquire a
3935 * semaphore to read the phy, which we could msleep while
3936 * waiting for it, and we can't msleep in a timer.
3938 static void e1000e_update_phy_task(struct work_struct *work)
3940 struct e1000_adapter *adapter = container_of(work,
3941 struct e1000_adapter, update_phy_task);
3943 if (test_bit(__E1000_DOWN, &adapter->state))
3946 e1000_get_phy_info(&adapter->hw);
3950 * Need to wait a few seconds after link up to get diagnostic information from
3953 static void e1000_update_phy_info(unsigned long data)
3955 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3957 if (test_bit(__E1000_DOWN, &adapter->state))
3960 schedule_work(&adapter->update_phy_task);
3964 * e1000e_update_phy_stats - Update the PHY statistics counters
3965 * @adapter: board private structure
3967 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
3969 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
3971 struct e1000_hw *hw = &adapter->hw;
3975 ret_val = hw->phy.ops.acquire(hw);
3980 * A page set is expensive so check if already on desired page.
3981 * If not, set to the page with the PHY status registers.
3984 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3988 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
3989 ret_val = hw->phy.ops.set_page(hw,
3990 HV_STATS_PAGE << IGP_PAGE_SHIFT);
3995 /* Single Collision Count */
3996 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
3997 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
3999 adapter->stats.scc += phy_data;
4001 /* Excessive Collision Count */
4002 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4003 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4005 adapter->stats.ecol += phy_data;
4007 /* Multiple Collision Count */
4008 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4009 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4011 adapter->stats.mcc += phy_data;
4013 /* Late Collision Count */
4014 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4015 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4017 adapter->stats.latecol += phy_data;
4019 /* Collision Count - also used for adaptive IFS */
4020 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4021 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4023 hw->mac.collision_delta = phy_data;
4026 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4027 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4029 adapter->stats.dc += phy_data;
4031 /* Transmit with no CRS */
4032 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4033 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4035 adapter->stats.tncrs += phy_data;
4038 hw->phy.ops.release(hw);
4042 * e1000e_update_stats - Update the board statistics counters
4043 * @adapter: board private structure
4045 static void e1000e_update_stats(struct e1000_adapter *adapter)
4047 struct net_device *netdev = adapter->netdev;
4048 struct e1000_hw *hw = &adapter->hw;
4049 struct pci_dev *pdev = adapter->pdev;
4052 * Prevent stats update while adapter is being reset, or if the pci
4053 * connection is down.
4055 if (adapter->link_speed == 0)
4057 if (pci_channel_offline(pdev))
4060 adapter->stats.crcerrs += er32(CRCERRS);
4061 adapter->stats.gprc += er32(GPRC);
4062 adapter->stats.gorc += er32(GORCL);
4063 er32(GORCH); /* Clear gorc */
4064 adapter->stats.bprc += er32(BPRC);
4065 adapter->stats.mprc += er32(MPRC);
4066 adapter->stats.roc += er32(ROC);
4068 adapter->stats.mpc += er32(MPC);
4070 /* Half-duplex statistics */
4071 if (adapter->link_duplex == HALF_DUPLEX) {
4072 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4073 e1000e_update_phy_stats(adapter);
4075 adapter->stats.scc += er32(SCC);
4076 adapter->stats.ecol += er32(ECOL);
4077 adapter->stats.mcc += er32(MCC);
4078 adapter->stats.latecol += er32(LATECOL);
4079 adapter->stats.dc += er32(DC);
4081 hw->mac.collision_delta = er32(COLC);
4083 if ((hw->mac.type != e1000_82574) &&
4084 (hw->mac.type != e1000_82583))
4085 adapter->stats.tncrs += er32(TNCRS);
4087 adapter->stats.colc += hw->mac.collision_delta;
4090 adapter->stats.xonrxc += er32(XONRXC);
4091 adapter->stats.xontxc += er32(XONTXC);
4092 adapter->stats.xoffrxc += er32(XOFFRXC);
4093 adapter->stats.xofftxc += er32(XOFFTXC);
4094 adapter->stats.gptc += er32(GPTC);
4095 adapter->stats.gotc += er32(GOTCL);
4096 er32(GOTCH); /* Clear gotc */
4097 adapter->stats.rnbc += er32(RNBC);
4098 adapter->stats.ruc += er32(RUC);
4100 adapter->stats.mptc += er32(MPTC);
4101 adapter->stats.bptc += er32(BPTC);
4103 /* used for adaptive IFS */
4105 hw->mac.tx_packet_delta = er32(TPT);
4106 adapter->stats.tpt += hw->mac.tx_packet_delta;
4108 adapter->stats.algnerrc += er32(ALGNERRC);
4109 adapter->stats.rxerrc += er32(RXERRC);
4110 adapter->stats.cexterr += er32(CEXTERR);
4111 adapter->stats.tsctc += er32(TSCTC);
4112 adapter->stats.tsctfc += er32(TSCTFC);
4114 /* Fill out the OS statistics structure */
4115 netdev->stats.multicast = adapter->stats.mprc;
4116 netdev->stats.collisions = adapter->stats.colc;
4121 * RLEC on some newer hardware can be incorrect so build
4122 * our own version based on RUC and ROC
4124 netdev->stats.rx_errors = adapter->stats.rxerrc +
4125 adapter->stats.crcerrs + adapter->stats.algnerrc +
4126 adapter->stats.ruc + adapter->stats.roc +
4127 adapter->stats.cexterr;
4128 netdev->stats.rx_length_errors = adapter->stats.ruc +
4130 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4131 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4132 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4135 netdev->stats.tx_errors = adapter->stats.ecol +
4136 adapter->stats.latecol;
4137 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4138 netdev->stats.tx_window_errors = adapter->stats.latecol;
4139 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4141 /* Tx Dropped needs to be maintained elsewhere */
4143 /* Management Stats */
4144 adapter->stats.mgptc += er32(MGTPTC);
4145 adapter->stats.mgprc += er32(MGTPRC);
4146 adapter->stats.mgpdc += er32(MGTPDC);
4150 * e1000_phy_read_status - Update the PHY register status snapshot
4151 * @adapter: board private structure
4153 static void e1000_phy_read_status(struct e1000_adapter *adapter)
4155 struct e1000_hw *hw = &adapter->hw;
4156 struct e1000_phy_regs *phy = &adapter->phy_regs;
4158 if ((er32(STATUS) & E1000_STATUS_LU) &&
4159 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4162 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4163 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4164 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4165 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4166 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4167 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4168 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4169 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4171 e_warn("Error reading PHY register\n");
4174 * Do not read PHY registers if link is not up
4175 * Set values to typical power-on defaults
4177 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4178 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4179 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4181 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4182 ADVERTISE_ALL | ADVERTISE_CSMA);
4184 phy->expansion = EXPANSION_ENABLENPAGE;
4185 phy->ctrl1000 = ADVERTISE_1000FULL;
4187 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4191 static void e1000_print_link_info(struct e1000_adapter *adapter)
4193 struct e1000_hw *hw = &adapter->hw;
4194 u32 ctrl = er32(CTRL);
4196 /* Link status message must follow this format for user tools */
4197 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
4198 "Flow Control: %s\n",
4199 adapter->netdev->name,
4200 adapter->link_speed,
4201 (adapter->link_duplex == FULL_DUPLEX) ?
4202 "Full Duplex" : "Half Duplex",
4203 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
4205 ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
4206 ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
4209 static bool e1000e_has_link(struct e1000_adapter *adapter)
4211 struct e1000_hw *hw = &adapter->hw;
4212 bool link_active = 0;
4216 * get_link_status is set on LSC (link status) interrupt or
4217 * Rx sequence error interrupt. get_link_status will stay
4218 * false until the check_for_link establishes link
4219 * for copper adapters ONLY
4221 switch (hw->phy.media_type) {
4222 case e1000_media_type_copper:
4223 if (hw->mac.get_link_status) {
4224 ret_val = hw->mac.ops.check_for_link(hw);
4225 link_active = !hw->mac.get_link_status;
4230 case e1000_media_type_fiber:
4231 ret_val = hw->mac.ops.check_for_link(hw);
4232 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4234 case e1000_media_type_internal_serdes:
4235 ret_val = hw->mac.ops.check_for_link(hw);
4236 link_active = adapter->hw.mac.serdes_has_link;
4239 case e1000_media_type_unknown:
4243 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4244 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4245 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4246 e_info("Gigabit has been disabled, downgrading speed\n");
4252 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4254 /* make sure the receive unit is started */
4255 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4256 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4257 struct e1000_hw *hw = &adapter->hw;
4258 u32 rctl = er32(RCTL);
4259 ew32(RCTL, rctl | E1000_RCTL_EN);
4260 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4264 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4266 struct e1000_hw *hw = &adapter->hw;
4269 * With 82574 controllers, PHY needs to be checked periodically
4270 * for hung state and reset, if two calls return true
4272 if (e1000_check_phy_82574(hw))
4273 adapter->phy_hang_count++;
4275 adapter->phy_hang_count = 0;
4277 if (adapter->phy_hang_count > 1) {
4278 adapter->phy_hang_count = 0;
4279 schedule_work(&adapter->reset_task);
4284 * e1000_watchdog - Timer Call-back
4285 * @data: pointer to adapter cast into an unsigned long
4287 static void e1000_watchdog(unsigned long data)
4289 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4291 /* Do the rest outside of interrupt context */
4292 schedule_work(&adapter->watchdog_task);
4294 /* TODO: make this use queue_delayed_work() */
4297 static void e1000_watchdog_task(struct work_struct *work)
4299 struct e1000_adapter *adapter = container_of(work,
4300 struct e1000_adapter, watchdog_task);
4301 struct net_device *netdev = adapter->netdev;
4302 struct e1000_mac_info *mac = &adapter->hw.mac;
4303 struct e1000_phy_info *phy = &adapter->hw.phy;
4304 struct e1000_ring *tx_ring = adapter->tx_ring;
4305 struct e1000_hw *hw = &adapter->hw;
4308 if (test_bit(__E1000_DOWN, &adapter->state))
4311 link = e1000e_has_link(adapter);
4312 if ((netif_carrier_ok(netdev)) && link) {
4313 /* Cancel scheduled suspend requests. */
4314 pm_runtime_resume(netdev->dev.parent);
4316 e1000e_enable_receives(adapter);
4320 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4321 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4322 e1000_update_mng_vlan(adapter);
4325 if (!netif_carrier_ok(netdev)) {
4328 /* Cancel scheduled suspend requests. */
4329 pm_runtime_resume(netdev->dev.parent);
4331 /* update snapshot of PHY registers on LSC */
4332 e1000_phy_read_status(adapter);
4333 mac->ops.get_link_up_info(&adapter->hw,
4334 &adapter->link_speed,
4335 &adapter->link_duplex);
4336 e1000_print_link_info(adapter);
4338 * On supported PHYs, check for duplex mismatch only
4339 * if link has autonegotiated at 10/100 half
4341 if ((hw->phy.type == e1000_phy_igp_3 ||
4342 hw->phy.type == e1000_phy_bm) &&
4343 (hw->mac.autoneg == true) &&
4344 (adapter->link_speed == SPEED_10 ||
4345 adapter->link_speed == SPEED_100) &&
4346 (adapter->link_duplex == HALF_DUPLEX)) {
4349 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4351 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4352 e_info("Autonegotiated half duplex but"
4353 " link partner cannot autoneg. "
4354 " Try forcing full duplex if "
4355 "link gets many collisions.\n");
4358 /* adjust timeout factor according to speed/duplex */
4359 adapter->tx_timeout_factor = 1;
4360 switch (adapter->link_speed) {
4363 adapter->tx_timeout_factor = 16;
4367 adapter->tx_timeout_factor = 10;
4372 * workaround: re-program speed mode bit after
4375 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4378 tarc0 = er32(TARC(0));
4379 tarc0 &= ~SPEED_MODE_BIT;
4380 ew32(TARC(0), tarc0);
4384 * disable TSO for pcie and 10/100 speeds, to avoid
4385 * some hardware issues
4387 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4388 switch (adapter->link_speed) {
4391 e_info("10/100 speed: disabling TSO\n");
4392 netdev->features &= ~NETIF_F_TSO;
4393 netdev->features &= ~NETIF_F_TSO6;
4396 netdev->features |= NETIF_F_TSO;
4397 netdev->features |= NETIF_F_TSO6;
4406 * enable transmits in the hardware, need to do this
4407 * after setting TARC(0)
4410 tctl |= E1000_TCTL_EN;
4414 * Perform any post-link-up configuration before
4415 * reporting link up.
4417 if (phy->ops.cfg_on_link_up)
4418 phy->ops.cfg_on_link_up(hw);
4420 netif_carrier_on(netdev);
4422 if (!test_bit(__E1000_DOWN, &adapter->state))
4423 mod_timer(&adapter->phy_info_timer,
4424 round_jiffies(jiffies + 2 * HZ));
4427 if (netif_carrier_ok(netdev)) {
4428 adapter->link_speed = 0;
4429 adapter->link_duplex = 0;
4430 /* Link status message must follow this format */
4431 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4432 adapter->netdev->name);
4433 netif_carrier_off(netdev);
4434 if (!test_bit(__E1000_DOWN, &adapter->state))
4435 mod_timer(&adapter->phy_info_timer,
4436 round_jiffies(jiffies + 2 * HZ));
4438 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4439 schedule_work(&adapter->reset_task);
4441 pm_schedule_suspend(netdev->dev.parent,
4447 spin_lock(&adapter->stats64_lock);
4448 e1000e_update_stats(adapter);
4450 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4451 adapter->tpt_old = adapter->stats.tpt;
4452 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4453 adapter->colc_old = adapter->stats.colc;
4455 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4456 adapter->gorc_old = adapter->stats.gorc;
4457 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4458 adapter->gotc_old = adapter->stats.gotc;
4459 spin_unlock(&adapter->stats64_lock);
4461 e1000e_update_adaptive(&adapter->hw);
4463 if (!netif_carrier_ok(netdev) &&
4464 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4466 * We've lost link, so the controller stops DMA,
4467 * but we've got queued Tx work that's never going
4468 * to get done, so reset controller to flush Tx.
4469 * (Do the reset outside of interrupt context).
4471 schedule_work(&adapter->reset_task);
4472 /* return immediately since reset is imminent */
4476 /* Simple mode for Interrupt Throttle Rate (ITR) */
4477 if (adapter->itr_setting == 4) {
4479 * Symmetric Tx/Rx gets a reduced ITR=2000;
4480 * Total asymmetrical Tx or Rx gets ITR=8000;
4481 * everyone else is between 2000-8000.
4483 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4484 u32 dif = (adapter->gotc > adapter->gorc ?
4485 adapter->gotc - adapter->gorc :
4486 adapter->gorc - adapter->gotc) / 10000;
4487 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4489 ew32(ITR, 1000000000 / (itr * 256));
4492 /* Cause software interrupt to ensure Rx ring is cleaned */
4493 if (adapter->msix_entries)
4494 ew32(ICS, adapter->rx_ring->ims_val);
4496 ew32(ICS, E1000_ICS_RXDMT0);
4498 /* flush pending descriptors to memory before detecting Tx hang */
4499 e1000e_flush_descriptors(adapter);
4501 /* Force detection of hung controller every watchdog period */
4502 adapter->detect_tx_hung = 1;
4505 * With 82571 controllers, LAA may be overwritten due to controller
4506 * reset from the other port. Set the appropriate LAA in RAR[0]
4508 if (e1000e_get_laa_state_82571(hw))
4509 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4511 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4512 e1000e_check_82574_phy_workaround(adapter);
4514 /* Reset the timer */
4515 if (!test_bit(__E1000_DOWN, &adapter->state))
4516 mod_timer(&adapter->watchdog_timer,
4517 round_jiffies(jiffies + 2 * HZ));
4520 #define E1000_TX_FLAGS_CSUM 0x00000001
4521 #define E1000_TX_FLAGS_VLAN 0x00000002
4522 #define E1000_TX_FLAGS_TSO 0x00000004
4523 #define E1000_TX_FLAGS_IPV4 0x00000008
4524 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4525 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4527 static int e1000_tso(struct e1000_adapter *adapter,
4528 struct sk_buff *skb)
4530 struct e1000_ring *tx_ring = adapter->tx_ring;
4531 struct e1000_context_desc *context_desc;
4532 struct e1000_buffer *buffer_info;
4535 u16 ipcse = 0, tucse, mss;
4536 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4538 if (!skb_is_gso(skb))
4541 if (skb_header_cloned(skb)) {
4542 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4548 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4549 mss = skb_shinfo(skb)->gso_size;
4550 if (skb->protocol == htons(ETH_P_IP)) {
4551 struct iphdr *iph = ip_hdr(skb);
4554 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4556 cmd_length = E1000_TXD_CMD_IP;
4557 ipcse = skb_transport_offset(skb) - 1;
4558 } else if (skb_is_gso_v6(skb)) {
4559 ipv6_hdr(skb)->payload_len = 0;
4560 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4561 &ipv6_hdr(skb)->daddr,
4565 ipcss = skb_network_offset(skb);
4566 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4567 tucss = skb_transport_offset(skb);
4568 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4571 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4572 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4574 i = tx_ring->next_to_use;
4575 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4576 buffer_info = &tx_ring->buffer_info[i];
4578 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4579 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4580 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4581 context_desc->upper_setup.tcp_fields.tucss = tucss;
4582 context_desc->upper_setup.tcp_fields.tucso = tucso;
4583 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4584 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4585 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4586 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4588 buffer_info->time_stamp = jiffies;
4589 buffer_info->next_to_watch = i;
4592 if (i == tx_ring->count)
4594 tx_ring->next_to_use = i;
4599 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4601 struct e1000_ring *tx_ring = adapter->tx_ring;
4602 struct e1000_context_desc *context_desc;
4603 struct e1000_buffer *buffer_info;
4606 u32 cmd_len = E1000_TXD_CMD_DEXT;
4609 if (skb->ip_summed != CHECKSUM_PARTIAL)
4612 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4613 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4615 protocol = skb->protocol;
4618 case cpu_to_be16(ETH_P_IP):
4619 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4620 cmd_len |= E1000_TXD_CMD_TCP;
4622 case cpu_to_be16(ETH_P_IPV6):
4623 /* XXX not handling all IPV6 headers */
4624 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4625 cmd_len |= E1000_TXD_CMD_TCP;
4628 if (unlikely(net_ratelimit()))
4629 e_warn("checksum_partial proto=%x!\n",
4630 be16_to_cpu(protocol));
4634 css = skb_checksum_start_offset(skb);
4636 i = tx_ring->next_to_use;
4637 buffer_info = &tx_ring->buffer_info[i];
4638 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4640 context_desc->lower_setup.ip_config = 0;
4641 context_desc->upper_setup.tcp_fields.tucss = css;
4642 context_desc->upper_setup.tcp_fields.tucso =
4643 css + skb->csum_offset;
4644 context_desc->upper_setup.tcp_fields.tucse = 0;
4645 context_desc->tcp_seg_setup.data = 0;
4646 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4648 buffer_info->time_stamp = jiffies;
4649 buffer_info->next_to_watch = i;
4652 if (i == tx_ring->count)
4654 tx_ring->next_to_use = i;
4659 static int e1000_tx_map(struct e1000_adapter *adapter,
4660 struct sk_buff *skb, unsigned int first,
4661 unsigned int max_per_txd, unsigned int nr_frags)
4663 struct e1000_ring *tx_ring = adapter->tx_ring;
4664 struct pci_dev *pdev = adapter->pdev;
4665 struct e1000_buffer *buffer_info;
4666 unsigned int len = skb_headlen(skb);
4667 unsigned int offset = 0, size, count = 0, i;
4668 unsigned int f, bytecount, segs;
4670 i = tx_ring->next_to_use;
4673 buffer_info = &tx_ring->buffer_info[i];
4674 size = min(len, max_per_txd);
4676 buffer_info->length = size;
4677 buffer_info->time_stamp = jiffies;
4678 buffer_info->next_to_watch = i;
4679 buffer_info->dma = dma_map_single(&pdev->dev,
4681 size, DMA_TO_DEVICE);
4682 buffer_info->mapped_as_page = false;
4683 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4692 if (i == tx_ring->count)
4697 for (f = 0; f < nr_frags; f++) {
4698 const struct skb_frag_struct *frag;
4700 frag = &skb_shinfo(skb)->frags[f];
4701 len = skb_frag_size(frag);
4706 if (i == tx_ring->count)
4709 buffer_info = &tx_ring->buffer_info[i];
4710 size = min(len, max_per_txd);
4712 buffer_info->length = size;
4713 buffer_info->time_stamp = jiffies;
4714 buffer_info->next_to_watch = i;
4715 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
4716 offset, size, DMA_TO_DEVICE);
4717 buffer_info->mapped_as_page = true;
4718 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4727 segs = skb_shinfo(skb)->gso_segs ? : 1;
4728 /* multiply data chunks by size of headers */
4729 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4731 tx_ring->buffer_info[i].skb = skb;
4732 tx_ring->buffer_info[i].segs = segs;
4733 tx_ring->buffer_info[i].bytecount = bytecount;
4734 tx_ring->buffer_info[first].next_to_watch = i;
4739 dev_err(&pdev->dev, "Tx DMA map failed\n");
4740 buffer_info->dma = 0;
4746 i += tx_ring->count;
4748 buffer_info = &tx_ring->buffer_info[i];
4749 e1000_put_txbuf(adapter, buffer_info);
4755 static void e1000_tx_queue(struct e1000_adapter *adapter,
4756 int tx_flags, int count)
4758 struct e1000_ring *tx_ring = adapter->tx_ring;
4759 struct e1000_tx_desc *tx_desc = NULL;
4760 struct e1000_buffer *buffer_info;
4761 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4764 if (tx_flags & E1000_TX_FLAGS_TSO) {
4765 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4767 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4769 if (tx_flags & E1000_TX_FLAGS_IPV4)
4770 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4773 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4774 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4775 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4778 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4779 txd_lower |= E1000_TXD_CMD_VLE;
4780 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4783 i = tx_ring->next_to_use;
4786 buffer_info = &tx_ring->buffer_info[i];
4787 tx_desc = E1000_TX_DESC(*tx_ring, i);
4788 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4789 tx_desc->lower.data =
4790 cpu_to_le32(txd_lower | buffer_info->length);
4791 tx_desc->upper.data = cpu_to_le32(txd_upper);
4794 if (i == tx_ring->count)
4796 } while (--count > 0);
4798 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4801 * Force memory writes to complete before letting h/w
4802 * know there are new descriptors to fetch. (Only
4803 * applicable for weak-ordered memory model archs,
4808 tx_ring->next_to_use = i;
4810 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4811 e1000e_update_tdt_wa(adapter, i);
4813 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4816 * we need this if more than one processor can write to our tail
4817 * at a time, it synchronizes IO on IA64/Altix systems
4822 #define MINIMUM_DHCP_PACKET_SIZE 282
4823 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4824 struct sk_buff *skb)
4826 struct e1000_hw *hw = &adapter->hw;
4829 if (vlan_tx_tag_present(skb)) {
4830 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4831 (adapter->hw.mng_cookie.status &
4832 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4836 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4839 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4843 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4846 if (ip->protocol != IPPROTO_UDP)
4849 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4850 if (ntohs(udp->dest) != 67)
4853 offset = (u8 *)udp + 8 - skb->data;
4854 length = skb->len - offset;
4855 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4861 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4863 struct e1000_adapter *adapter = netdev_priv(netdev);
4865 netif_stop_queue(netdev);
4867 * Herbert's original patch had:
4868 * smp_mb__after_netif_stop_queue();
4869 * but since that doesn't exist yet, just open code it.
4874 * We need to check again in a case another CPU has just
4875 * made room available.
4877 if (e1000_desc_unused(adapter->tx_ring) < size)
4881 netif_start_queue(netdev);
4882 ++adapter->restart_queue;
4886 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4888 struct e1000_adapter *adapter = netdev_priv(netdev);
4890 BUG_ON(size > adapter->tx_ring->count);
4892 if (e1000_desc_unused(adapter->tx_ring) >= size)
4894 return __e1000_maybe_stop_tx(netdev, size);
4897 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4898 struct net_device *netdev)
4900 struct e1000_adapter *adapter = netdev_priv(netdev);
4901 struct e1000_ring *tx_ring = adapter->tx_ring;
4903 unsigned int tx_flags = 0;
4904 unsigned int len = skb_headlen(skb);
4905 unsigned int nr_frags;
4911 if (test_bit(__E1000_DOWN, &adapter->state)) {
4912 dev_kfree_skb_any(skb);
4913 return NETDEV_TX_OK;
4916 if (skb->len <= 0) {
4917 dev_kfree_skb_any(skb);
4918 return NETDEV_TX_OK;
4921 mss = skb_shinfo(skb)->gso_size;
4926 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4927 * points to just header, pull a few bytes of payload from
4928 * frags into skb->data
4930 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4932 * we do this workaround for ES2LAN, but it is un-necessary,
4933 * avoiding it could save a lot of cycles
4935 if (skb->data_len && (hdr_len == len)) {
4936 unsigned int pull_size;
4938 pull_size = min((unsigned int)4, skb->data_len);
4939 if (!__pskb_pull_tail(skb, pull_size)) {
4940 e_err("__pskb_pull_tail failed.\n");
4941 dev_kfree_skb_any(skb);
4942 return NETDEV_TX_OK;
4944 len = skb_headlen(skb);
4948 /* reserve a descriptor for the offload context */
4949 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4953 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
4955 nr_frags = skb_shinfo(skb)->nr_frags;
4956 for (f = 0; f < nr_frags; f++)
4957 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
4958 adapter->tx_fifo_limit);
4960 if (adapter->hw.mac.tx_pkt_filtering)
4961 e1000_transfer_dhcp_info(adapter, skb);
4964 * need: count + 2 desc gap to keep tail from touching
4965 * head, otherwise try next time
4967 if (e1000_maybe_stop_tx(netdev, count + 2))
4968 return NETDEV_TX_BUSY;
4970 if (vlan_tx_tag_present(skb)) {
4971 tx_flags |= E1000_TX_FLAGS_VLAN;
4972 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4975 first = tx_ring->next_to_use;
4977 tso = e1000_tso(adapter, skb);
4979 dev_kfree_skb_any(skb);
4980 return NETDEV_TX_OK;
4984 tx_flags |= E1000_TX_FLAGS_TSO;
4985 else if (e1000_tx_csum(adapter, skb))
4986 tx_flags |= E1000_TX_FLAGS_CSUM;
4989 * Old method was to assume IPv4 packet by default if TSO was enabled.
4990 * 82571 hardware supports TSO capabilities for IPv6 as well...
4991 * no longer assume, we must.
4993 if (skb->protocol == htons(ETH_P_IP))
4994 tx_flags |= E1000_TX_FLAGS_IPV4;
4996 /* if count is 0 then mapping error has occurred */
4997 count = e1000_tx_map(adapter, skb, first, adapter->tx_fifo_limit,
5000 e1000_tx_queue(adapter, tx_flags, count);
5001 /* Make sure there is space in the ring for the next send. */
5002 e1000_maybe_stop_tx(netdev,
5004 DIV_ROUND_UP(PAGE_SIZE,
5005 adapter->tx_fifo_limit) + 2));
5007 dev_kfree_skb_any(skb);
5008 tx_ring->buffer_info[first].time_stamp = 0;
5009 tx_ring->next_to_use = first;
5012 return NETDEV_TX_OK;
5016 * e1000_tx_timeout - Respond to a Tx Hang
5017 * @netdev: network interface device structure
5019 static void e1000_tx_timeout(struct net_device *netdev)
5021 struct e1000_adapter *adapter = netdev_priv(netdev);
5023 /* Do the reset outside of interrupt context */
5024 adapter->tx_timeout_count++;
5025 schedule_work(&adapter->reset_task);
5028 static void e1000_reset_task(struct work_struct *work)
5030 struct e1000_adapter *adapter;
5031 adapter = container_of(work, struct e1000_adapter, reset_task);
5033 /* don't run the task if already down */
5034 if (test_bit(__E1000_DOWN, &adapter->state))
5037 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5038 (adapter->flags & FLAG_RX_RESTART_NOW))) {
5039 e1000e_dump(adapter);
5040 e_err("Reset adapter\n");
5042 e1000e_reinit_locked(adapter);
5046 * e1000_get_stats64 - Get System Network Statistics
5047 * @netdev: network interface device structure
5048 * @stats: rtnl_link_stats64 pointer
5050 * Returns the address of the device statistics structure.
5052 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5053 struct rtnl_link_stats64 *stats)
5055 struct e1000_adapter *adapter = netdev_priv(netdev);
5057 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5058 spin_lock(&adapter->stats64_lock);
5059 e1000e_update_stats(adapter);
5060 /* Fill out the OS statistics structure */
5061 stats->rx_bytes = adapter->stats.gorc;
5062 stats->rx_packets = adapter->stats.gprc;
5063 stats->tx_bytes = adapter->stats.gotc;
5064 stats->tx_packets = adapter->stats.gptc;
5065 stats->multicast = adapter->stats.mprc;
5066 stats->collisions = adapter->stats.colc;
5071 * RLEC on some newer hardware can be incorrect so build
5072 * our own version based on RUC and ROC
5074 stats->rx_errors = adapter->stats.rxerrc +
5075 adapter->stats.crcerrs + adapter->stats.algnerrc +
5076 adapter->stats.ruc + adapter->stats.roc +
5077 adapter->stats.cexterr;
5078 stats->rx_length_errors = adapter->stats.ruc +
5080 stats->rx_crc_errors = adapter->stats.crcerrs;
5081 stats->rx_frame_errors = adapter->stats.algnerrc;
5082 stats->rx_missed_errors = adapter->stats.mpc;
5085 stats->tx_errors = adapter->stats.ecol +
5086 adapter->stats.latecol;
5087 stats->tx_aborted_errors = adapter->stats.ecol;
5088 stats->tx_window_errors = adapter->stats.latecol;
5089 stats->tx_carrier_errors = adapter->stats.tncrs;
5091 /* Tx Dropped needs to be maintained elsewhere */
5093 spin_unlock(&adapter->stats64_lock);
5098 * e1000_change_mtu - Change the Maximum Transfer Unit
5099 * @netdev: network interface device structure
5100 * @new_mtu: new value for maximum frame size
5102 * Returns 0 on success, negative on failure
5104 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5106 struct e1000_adapter *adapter = netdev_priv(netdev);
5107 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5109 /* Jumbo frame support */
5110 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5111 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5112 e_err("Jumbo Frames not supported.\n");
5116 /* Supported frame sizes */
5117 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5118 (max_frame > adapter->max_hw_frame_size)) {
5119 e_err("Unsupported MTU setting\n");
5123 /* Jumbo frame workaround on 82579 requires CRC be stripped */
5124 if ((adapter->hw.mac.type == e1000_pch2lan) &&
5125 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5126 (new_mtu > ETH_DATA_LEN)) {
5127 e_err("Jumbo Frames not supported on 82579 when CRC "
5128 "stripping is disabled.\n");
5132 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5133 usleep_range(1000, 2000);
5134 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5135 adapter->max_frame_size = max_frame;
5136 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5137 netdev->mtu = new_mtu;
5138 if (netif_running(netdev))
5139 e1000e_down(adapter);
5142 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5143 * means we reserve 2 more, this pushes us to allocate from the next
5145 * i.e. RXBUFFER_2048 --> size-4096 slab
5146 * However with the new *_jumbo_rx* routines, jumbo receives will use
5150 if (max_frame <= 2048)
5151 adapter->rx_buffer_len = 2048;
5153 adapter->rx_buffer_len = 4096;
5155 /* adjust allocation if LPE protects us, and we aren't using SBP */
5156 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5157 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5158 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5161 if (netif_running(netdev))
5164 e1000e_reset(adapter);
5166 clear_bit(__E1000_RESETTING, &adapter->state);
5171 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5174 struct e1000_adapter *adapter = netdev_priv(netdev);
5175 struct mii_ioctl_data *data = if_mii(ifr);
5177 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5182 data->phy_id = adapter->hw.phy.addr;
5185 e1000_phy_read_status(adapter);
5187 switch (data->reg_num & 0x1F) {
5189 data->val_out = adapter->phy_regs.bmcr;
5192 data->val_out = adapter->phy_regs.bmsr;
5195 data->val_out = (adapter->hw.phy.id >> 16);
5198 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5201 data->val_out = adapter->phy_regs.advertise;
5204 data->val_out = adapter->phy_regs.lpa;
5207 data->val_out = adapter->phy_regs.expansion;
5210 data->val_out = adapter->phy_regs.ctrl1000;
5213 data->val_out = adapter->phy_regs.stat1000;
5216 data->val_out = adapter->phy_regs.estatus;
5229 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5235 return e1000_mii_ioctl(netdev, ifr, cmd);
5241 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5243 struct e1000_hw *hw = &adapter->hw;
5245 u16 phy_reg, wuc_enable;
5248 /* copy MAC RARs to PHY RARs */
5249 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5251 retval = hw->phy.ops.acquire(hw);
5253 e_err("Could not acquire PHY\n");
5257 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5258 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5262 /* copy MAC MTA to PHY MTA - only needed for pchlan */
5263 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5264 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5265 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5266 (u16)(mac_reg & 0xFFFF));
5267 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5268 (u16)((mac_reg >> 16) & 0xFFFF));
5271 /* configure PHY Rx Control register */
5272 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5273 mac_reg = er32(RCTL);
5274 if (mac_reg & E1000_RCTL_UPE)
5275 phy_reg |= BM_RCTL_UPE;
5276 if (mac_reg & E1000_RCTL_MPE)
5277 phy_reg |= BM_RCTL_MPE;
5278 phy_reg &= ~(BM_RCTL_MO_MASK);
5279 if (mac_reg & E1000_RCTL_MO_3)
5280 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5281 << BM_RCTL_MO_SHIFT);
5282 if (mac_reg & E1000_RCTL_BAM)
5283 phy_reg |= BM_RCTL_BAM;
5284 if (mac_reg & E1000_RCTL_PMCF)
5285 phy_reg |= BM_RCTL_PMCF;
5286 mac_reg = er32(CTRL);
5287 if (mac_reg & E1000_CTRL_RFCE)
5288 phy_reg |= BM_RCTL_RFCE;
5289 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5291 /* enable PHY wakeup in MAC register */
5293 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5295 /* configure and enable PHY wakeup in PHY registers */
5296 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5297 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5299 /* activate PHY wakeup */
5300 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5301 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5303 e_err("Could not set PHY Host Wakeup bit\n");
5305 hw->phy.ops.release(hw);
5310 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5313 struct net_device *netdev = pci_get_drvdata(pdev);
5314 struct e1000_adapter *adapter = netdev_priv(netdev);
5315 struct e1000_hw *hw = &adapter->hw;
5316 u32 ctrl, ctrl_ext, rctl, status;
5317 /* Runtime suspend should only enable wakeup for link changes */
5318 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5321 netif_device_detach(netdev);
5323 if (netif_running(netdev)) {
5324 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5325 e1000e_down(adapter);
5326 e1000_free_irq(adapter);
5328 e1000e_reset_interrupt_capability(adapter);
5330 retval = pci_save_state(pdev);
5334 status = er32(STATUS);
5335 if (status & E1000_STATUS_LU)
5336 wufc &= ~E1000_WUFC_LNKC;
5339 e1000_setup_rctl(adapter);
5340 e1000_set_multi(netdev);
5342 /* turn on all-multi mode if wake on multicast is enabled */
5343 if (wufc & E1000_WUFC_MC) {
5345 rctl |= E1000_RCTL_MPE;
5350 /* advertise wake from D3Cold */
5351 #define E1000_CTRL_ADVD3WUC 0x00100000
5352 /* phy power management enable */
5353 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5354 ctrl |= E1000_CTRL_ADVD3WUC;
5355 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5356 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5359 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5360 adapter->hw.phy.media_type ==
5361 e1000_media_type_internal_serdes) {
5362 /* keep the laser running in D3 */
5363 ctrl_ext = er32(CTRL_EXT);
5364 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5365 ew32(CTRL_EXT, ctrl_ext);
5368 if (adapter->flags & FLAG_IS_ICH)
5369 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5371 /* Allow time for pending master requests to run */
5372 e1000e_disable_pcie_master(&adapter->hw);
5374 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5375 /* enable wakeup by the PHY */
5376 retval = e1000_init_phy_wakeup(adapter, wufc);
5380 /* enable wakeup by the MAC */
5382 ew32(WUC, E1000_WUC_PME_EN);
5389 *enable_wake = !!wufc;
5391 /* make sure adapter isn't asleep if manageability is enabled */
5392 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5393 (hw->mac.ops.check_mng_mode(hw)))
5394 *enable_wake = true;
5396 if (adapter->hw.phy.type == e1000_phy_igp_3)
5397 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5400 * Release control of h/w to f/w. If f/w is AMT enabled, this
5401 * would have already happened in close and is redundant.
5403 e1000e_release_hw_control(adapter);
5405 pci_clear_master(pdev);
5410 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5412 if (sleep && wake) {
5413 pci_prepare_to_sleep(pdev);
5417 pci_wake_from_d3(pdev, wake);
5418 pci_set_power_state(pdev, PCI_D3hot);
5421 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5424 struct net_device *netdev = pci_get_drvdata(pdev);
5425 struct e1000_adapter *adapter = netdev_priv(netdev);
5428 * The pci-e switch on some quad port adapters will report a
5429 * correctable error when the MAC transitions from D0 to D3. To
5430 * prevent this we need to mask off the correctable errors on the
5431 * downstream port of the pci-e switch.
5433 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5434 struct pci_dev *us_dev = pdev->bus->self;
5435 int pos = pci_pcie_cap(us_dev);
5438 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
5439 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
5440 (devctl & ~PCI_EXP_DEVCTL_CERE));
5442 e1000_power_off(pdev, sleep, wake);
5444 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
5446 e1000_power_off(pdev, sleep, wake);
5450 #ifdef CONFIG_PCIEASPM
5451 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5453 pci_disable_link_state_locked(pdev, state);
5456 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5462 * Both device and parent should have the same ASPM setting.
5463 * Disable ASPM in downstream component first and then upstream.
5465 pos = pci_pcie_cap(pdev);
5466 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
5468 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5470 if (!pdev->bus->self)
5473 pos = pci_pcie_cap(pdev->bus->self);
5474 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16);
5476 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5479 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5481 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5482 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5483 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5485 __e1000e_disable_aspm(pdev, state);
5489 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5491 return !!adapter->tx_ring->buffer_info;
5494 static int __e1000_resume(struct pci_dev *pdev)
5496 struct net_device *netdev = pci_get_drvdata(pdev);
5497 struct e1000_adapter *adapter = netdev_priv(netdev);
5498 struct e1000_hw *hw = &adapter->hw;
5499 u16 aspm_disable_flag = 0;
5502 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5503 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5504 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5505 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5506 if (aspm_disable_flag)
5507 e1000e_disable_aspm(pdev, aspm_disable_flag);
5509 pci_set_power_state(pdev, PCI_D0);
5510 pci_restore_state(pdev);
5511 pci_save_state(pdev);
5513 e1000e_set_interrupt_capability(adapter);
5514 if (netif_running(netdev)) {
5515 err = e1000_request_irq(adapter);
5520 if (hw->mac.type == e1000_pch2lan)
5521 e1000_resume_workarounds_pchlan(&adapter->hw);
5523 e1000e_power_up_phy(adapter);
5525 /* report the system wakeup cause from S3/S4 */
5526 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5529 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5531 e_info("PHY Wakeup cause - %s\n",
5532 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5533 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5534 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5535 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5536 phy_data & E1000_WUS_LNKC ? "Link Status "
5537 " Change" : "other");
5539 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5541 u32 wus = er32(WUS);
5543 e_info("MAC Wakeup cause - %s\n",
5544 wus & E1000_WUS_EX ? "Unicast Packet" :
5545 wus & E1000_WUS_MC ? "Multicast Packet" :
5546 wus & E1000_WUS_BC ? "Broadcast Packet" :
5547 wus & E1000_WUS_MAG ? "Magic Packet" :
5548 wus & E1000_WUS_LNKC ? "Link Status Change" :
5554 e1000e_reset(adapter);
5556 e1000_init_manageability_pt(adapter);
5558 if (netif_running(netdev))
5561 netif_device_attach(netdev);
5564 * If the controller has AMT, do not set DRV_LOAD until the interface
5565 * is up. For all other cases, let the f/w know that the h/w is now
5566 * under the control of the driver.
5568 if (!(adapter->flags & FLAG_HAS_AMT))
5569 e1000e_get_hw_control(adapter);
5574 #ifdef CONFIG_PM_SLEEP
5575 static int e1000_suspend(struct device *dev)
5577 struct pci_dev *pdev = to_pci_dev(dev);
5581 retval = __e1000_shutdown(pdev, &wake, false);
5583 e1000_complete_shutdown(pdev, true, wake);
5588 static int e1000_resume(struct device *dev)
5590 struct pci_dev *pdev = to_pci_dev(dev);
5591 struct net_device *netdev = pci_get_drvdata(pdev);
5592 struct e1000_adapter *adapter = netdev_priv(netdev);
5594 if (e1000e_pm_ready(adapter))
5595 adapter->idle_check = true;
5597 return __e1000_resume(pdev);
5599 #endif /* CONFIG_PM_SLEEP */
5601 #ifdef CONFIG_PM_RUNTIME
5602 static int e1000_runtime_suspend(struct device *dev)
5604 struct pci_dev *pdev = to_pci_dev(dev);
5605 struct net_device *netdev = pci_get_drvdata(pdev);
5606 struct e1000_adapter *adapter = netdev_priv(netdev);
5608 if (e1000e_pm_ready(adapter)) {
5611 __e1000_shutdown(pdev, &wake, true);
5617 static int e1000_idle(struct device *dev)
5619 struct pci_dev *pdev = to_pci_dev(dev);
5620 struct net_device *netdev = pci_get_drvdata(pdev);
5621 struct e1000_adapter *adapter = netdev_priv(netdev);
5623 if (!e1000e_pm_ready(adapter))
5626 if (adapter->idle_check) {
5627 adapter->idle_check = false;
5628 if (!e1000e_has_link(adapter))
5629 pm_schedule_suspend(dev, MSEC_PER_SEC);
5635 static int e1000_runtime_resume(struct device *dev)
5637 struct pci_dev *pdev = to_pci_dev(dev);
5638 struct net_device *netdev = pci_get_drvdata(pdev);
5639 struct e1000_adapter *adapter = netdev_priv(netdev);
5641 if (!e1000e_pm_ready(adapter))
5644 adapter->idle_check = !dev->power.runtime_auto;
5645 return __e1000_resume(pdev);
5647 #endif /* CONFIG_PM_RUNTIME */
5648 #endif /* CONFIG_PM */
5650 static void e1000_shutdown(struct pci_dev *pdev)
5654 __e1000_shutdown(pdev, &wake, false);
5656 if (system_state == SYSTEM_POWER_OFF)
5657 e1000_complete_shutdown(pdev, false, wake);
5660 #ifdef CONFIG_NET_POLL_CONTROLLER
5662 static irqreturn_t e1000_intr_msix(int irq, void *data)
5664 struct net_device *netdev = data;
5665 struct e1000_adapter *adapter = netdev_priv(netdev);
5667 if (adapter->msix_entries) {
5668 int vector, msix_irq;
5671 msix_irq = adapter->msix_entries[vector].vector;
5672 disable_irq(msix_irq);
5673 e1000_intr_msix_rx(msix_irq, netdev);
5674 enable_irq(msix_irq);
5677 msix_irq = adapter->msix_entries[vector].vector;
5678 disable_irq(msix_irq);
5679 e1000_intr_msix_tx(msix_irq, netdev);
5680 enable_irq(msix_irq);
5683 msix_irq = adapter->msix_entries[vector].vector;
5684 disable_irq(msix_irq);
5685 e1000_msix_other(msix_irq, netdev);
5686 enable_irq(msix_irq);
5693 * Polling 'interrupt' - used by things like netconsole to send skbs
5694 * without having to re-enable interrupts. It's not called while
5695 * the interrupt routine is executing.
5697 static void e1000_netpoll(struct net_device *netdev)
5699 struct e1000_adapter *adapter = netdev_priv(netdev);
5701 switch (adapter->int_mode) {
5702 case E1000E_INT_MODE_MSIX:
5703 e1000_intr_msix(adapter->pdev->irq, netdev);
5705 case E1000E_INT_MODE_MSI:
5706 disable_irq(adapter->pdev->irq);
5707 e1000_intr_msi(adapter->pdev->irq, netdev);
5708 enable_irq(adapter->pdev->irq);
5710 default: /* E1000E_INT_MODE_LEGACY */
5711 disable_irq(adapter->pdev->irq);
5712 e1000_intr(adapter->pdev->irq, netdev);
5713 enable_irq(adapter->pdev->irq);
5720 * e1000_io_error_detected - called when PCI error is detected
5721 * @pdev: Pointer to PCI device
5722 * @state: The current pci connection state
5724 * This function is called after a PCI bus error affecting
5725 * this device has been detected.
5727 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5728 pci_channel_state_t state)
5730 struct net_device *netdev = pci_get_drvdata(pdev);
5731 struct e1000_adapter *adapter = netdev_priv(netdev);
5733 netif_device_detach(netdev);
5735 if (state == pci_channel_io_perm_failure)
5736 return PCI_ERS_RESULT_DISCONNECT;
5738 if (netif_running(netdev))
5739 e1000e_down(adapter);
5740 pci_disable_device(pdev);
5742 /* Request a slot slot reset. */
5743 return PCI_ERS_RESULT_NEED_RESET;
5747 * e1000_io_slot_reset - called after the pci bus has been reset.
5748 * @pdev: Pointer to PCI device
5750 * Restart the card from scratch, as if from a cold-boot. Implementation
5751 * resembles the first-half of the e1000_resume routine.
5753 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5755 struct net_device *netdev = pci_get_drvdata(pdev);
5756 struct e1000_adapter *adapter = netdev_priv(netdev);
5757 struct e1000_hw *hw = &adapter->hw;
5758 u16 aspm_disable_flag = 0;
5760 pci_ers_result_t result;
5762 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5763 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5764 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5765 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5766 if (aspm_disable_flag)
5767 e1000e_disable_aspm(pdev, aspm_disable_flag);
5769 err = pci_enable_device_mem(pdev);
5772 "Cannot re-enable PCI device after reset.\n");
5773 result = PCI_ERS_RESULT_DISCONNECT;
5775 pci_set_master(pdev);
5776 pdev->state_saved = true;
5777 pci_restore_state(pdev);
5779 pci_enable_wake(pdev, PCI_D3hot, 0);
5780 pci_enable_wake(pdev, PCI_D3cold, 0);
5782 e1000e_reset(adapter);
5784 result = PCI_ERS_RESULT_RECOVERED;
5787 pci_cleanup_aer_uncorrect_error_status(pdev);
5793 * e1000_io_resume - called when traffic can start flowing again.
5794 * @pdev: Pointer to PCI device
5796 * This callback is called when the error recovery driver tells us that
5797 * its OK to resume normal operation. Implementation resembles the
5798 * second-half of the e1000_resume routine.
5800 static void e1000_io_resume(struct pci_dev *pdev)
5802 struct net_device *netdev = pci_get_drvdata(pdev);
5803 struct e1000_adapter *adapter = netdev_priv(netdev);
5805 e1000_init_manageability_pt(adapter);
5807 if (netif_running(netdev)) {
5808 if (e1000e_up(adapter)) {
5810 "can't bring device back up after reset\n");
5815 netif_device_attach(netdev);
5818 * If the controller has AMT, do not set DRV_LOAD until the interface
5819 * is up. For all other cases, let the f/w know that the h/w is now
5820 * under the control of the driver.
5822 if (!(adapter->flags & FLAG_HAS_AMT))
5823 e1000e_get_hw_control(adapter);
5827 static void e1000_print_device_info(struct e1000_adapter *adapter)
5829 struct e1000_hw *hw = &adapter->hw;
5830 struct net_device *netdev = adapter->netdev;
5832 u8 pba_str[E1000_PBANUM_LENGTH];
5834 /* print bus type/speed/width info */
5835 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5837 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5841 e_info("Intel(R) PRO/%s Network Connection\n",
5842 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5843 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5844 E1000_PBANUM_LENGTH);
5846 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
5847 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5848 hw->mac.type, hw->phy.type, pba_str);
5851 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5853 struct e1000_hw *hw = &adapter->hw;
5857 if (hw->mac.type != e1000_82573)
5860 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5861 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
5862 /* Deep Smart Power Down (DSPD) */
5863 dev_warn(&adapter->pdev->dev,
5864 "Warning: detected DSPD enabled in EEPROM\n");
5868 static int e1000_set_features(struct net_device *netdev, u32 features)
5870 struct e1000_adapter *adapter = netdev_priv(netdev);
5871 u32 changed = features ^ netdev->features;
5873 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
5874 adapter->flags |= FLAG_TSO_FORCE;
5876 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
5880 if (netif_running(netdev))
5881 e1000e_reinit_locked(adapter);
5883 e1000e_reset(adapter);
5888 static const struct net_device_ops e1000e_netdev_ops = {
5889 .ndo_open = e1000_open,
5890 .ndo_stop = e1000_close,
5891 .ndo_start_xmit = e1000_xmit_frame,
5892 .ndo_get_stats64 = e1000e_get_stats64,
5893 .ndo_set_rx_mode = e1000_set_multi,
5894 .ndo_set_mac_address = e1000_set_mac,
5895 .ndo_change_mtu = e1000_change_mtu,
5896 .ndo_do_ioctl = e1000_ioctl,
5897 .ndo_tx_timeout = e1000_tx_timeout,
5898 .ndo_validate_addr = eth_validate_addr,
5900 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5901 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5902 #ifdef CONFIG_NET_POLL_CONTROLLER
5903 .ndo_poll_controller = e1000_netpoll,
5905 .ndo_set_features = e1000_set_features,
5909 * e1000_probe - Device Initialization Routine
5910 * @pdev: PCI device information struct
5911 * @ent: entry in e1000_pci_tbl
5913 * Returns 0 on success, negative on failure
5915 * e1000_probe initializes an adapter identified by a pci_dev structure.
5916 * The OS initialization, configuring of the adapter private structure,
5917 * and a hardware reset occur.
5919 static int __devinit e1000_probe(struct pci_dev *pdev,
5920 const struct pci_device_id *ent)
5922 struct net_device *netdev;
5923 struct e1000_adapter *adapter;
5924 struct e1000_hw *hw;
5925 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
5926 resource_size_t mmio_start, mmio_len;
5927 resource_size_t flash_start, flash_len;
5929 static int cards_found;
5930 u16 aspm_disable_flag = 0;
5931 int i, err, pci_using_dac;
5932 u16 eeprom_data = 0;
5933 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5935 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
5936 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5937 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5938 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5939 if (aspm_disable_flag)
5940 e1000e_disable_aspm(pdev, aspm_disable_flag);
5942 err = pci_enable_device_mem(pdev);
5947 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5949 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5953 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5955 err = dma_set_coherent_mask(&pdev->dev,
5958 dev_err(&pdev->dev, "No usable DMA "
5959 "configuration, aborting\n");
5965 err = pci_request_selected_regions_exclusive(pdev,
5966 pci_select_bars(pdev, IORESOURCE_MEM),
5967 e1000e_driver_name);
5971 /* AER (Advanced Error Reporting) hooks */
5972 pci_enable_pcie_error_reporting(pdev);
5974 pci_set_master(pdev);
5975 /* PCI config space info */
5976 err = pci_save_state(pdev);
5978 goto err_alloc_etherdev;
5981 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5983 goto err_alloc_etherdev;
5985 SET_NETDEV_DEV(netdev, &pdev->dev);
5987 netdev->irq = pdev->irq;
5989 pci_set_drvdata(pdev, netdev);
5990 adapter = netdev_priv(netdev);
5992 adapter->netdev = netdev;
5993 adapter->pdev = pdev;
5995 adapter->pba = ei->pba;
5996 adapter->flags = ei->flags;
5997 adapter->flags2 = ei->flags2;
5998 adapter->hw.adapter = adapter;
5999 adapter->hw.mac.type = ei->mac;
6000 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6001 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
6003 mmio_start = pci_resource_start(pdev, 0);
6004 mmio_len = pci_resource_len(pdev, 0);
6007 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6008 if (!adapter->hw.hw_addr)
6011 if ((adapter->flags & FLAG_HAS_FLASH) &&
6012 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6013 flash_start = pci_resource_start(pdev, 1);
6014 flash_len = pci_resource_len(pdev, 1);
6015 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6016 if (!adapter->hw.flash_address)
6020 /* construct the net_device struct */
6021 netdev->netdev_ops = &e1000e_netdev_ops;
6022 e1000e_set_ethtool_ops(netdev);
6023 netdev->watchdog_timeo = 5 * HZ;
6024 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
6025 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
6027 netdev->mem_start = mmio_start;
6028 netdev->mem_end = mmio_start + mmio_len;
6030 adapter->bd_number = cards_found++;
6032 e1000e_check_options(adapter);
6034 /* setup adapter struct */
6035 err = e1000_sw_init(adapter);
6039 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6040 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6041 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6043 err = ei->get_variants(adapter);
6047 if ((adapter->flags & FLAG_IS_ICH) &&
6048 (adapter->flags & FLAG_READ_ONLY_NVM))
6049 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6051 hw->mac.ops.get_bus_info(&adapter->hw);
6053 adapter->hw.phy.autoneg_wait_to_complete = 0;
6055 /* Copper options */
6056 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6057 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6058 adapter->hw.phy.disable_polarity_correction = 0;
6059 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6062 if (e1000_check_reset_block(&adapter->hw))
6063 e_info("PHY reset is blocked due to SOL/IDER session.\n");
6065 /* Set initial default active device features */
6066 netdev->features = (NETIF_F_SG |
6067 NETIF_F_HW_VLAN_RX |
6068 NETIF_F_HW_VLAN_TX |
6074 /* Set user-changeable features (subset of all device features) */
6075 netdev->hw_features = netdev->features;
6077 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6078 netdev->features |= NETIF_F_HW_VLAN_FILTER;
6080 netdev->vlan_features |= (NETIF_F_SG |
6085 if (pci_using_dac) {
6086 netdev->features |= NETIF_F_HIGHDMA;
6087 netdev->vlan_features |= NETIF_F_HIGHDMA;
6090 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6091 adapter->flags |= FLAG_MNG_PT_ENABLED;
6094 * before reading the NVM, reset the controller to
6095 * put the device in a known good starting state
6097 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6100 * systems with ASPM and others may see the checksum fail on the first
6101 * attempt. Let's give it a few tries
6104 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6107 e_err("The NVM Checksum Is Not Valid\n");
6113 e1000_eeprom_checks(adapter);
6115 /* copy the MAC address */
6116 if (e1000e_read_mac_addr(&adapter->hw))
6117 e_err("NVM Read Error while reading MAC address\n");
6119 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6120 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6122 if (!is_valid_ether_addr(netdev->perm_addr)) {
6123 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
6128 init_timer(&adapter->watchdog_timer);
6129 adapter->watchdog_timer.function = e1000_watchdog;
6130 adapter->watchdog_timer.data = (unsigned long) adapter;
6132 init_timer(&adapter->phy_info_timer);
6133 adapter->phy_info_timer.function = e1000_update_phy_info;
6134 adapter->phy_info_timer.data = (unsigned long) adapter;
6136 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6137 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6138 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6139 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6140 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6142 /* Initialize link parameters. User can change them with ethtool */
6143 adapter->hw.mac.autoneg = 1;
6144 adapter->fc_autoneg = 1;
6145 adapter->hw.fc.requested_mode = e1000_fc_default;
6146 adapter->hw.fc.current_mode = e1000_fc_default;
6147 adapter->hw.phy.autoneg_advertised = 0x2f;
6149 /* ring size defaults */
6150 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6151 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6154 * Initial Wake on LAN setting - If APM wake is enabled in
6155 * the EEPROM, enable the ACPI Magic Packet filter
6157 if (adapter->flags & FLAG_APME_IN_WUC) {
6158 /* APME bit in EEPROM is mapped to WUC.APME */
6159 eeprom_data = er32(WUC);
6160 eeprom_apme_mask = E1000_WUC_APME;
6161 if ((hw->mac.type > e1000_ich10lan) &&
6162 (eeprom_data & E1000_WUC_PHY_WAKE))
6163 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6164 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6165 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6166 (adapter->hw.bus.func == 1))
6167 e1000_read_nvm(&adapter->hw,
6168 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
6170 e1000_read_nvm(&adapter->hw,
6171 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6174 /* fetch WoL from EEPROM */
6175 if (eeprom_data & eeprom_apme_mask)
6176 adapter->eeprom_wol |= E1000_WUFC_MAG;
6179 * now that we have the eeprom settings, apply the special cases
6180 * where the eeprom may be wrong or the board simply won't support
6181 * wake on lan on a particular port
6183 if (!(adapter->flags & FLAG_HAS_WOL))
6184 adapter->eeprom_wol = 0;
6186 /* initialize the wol settings based on the eeprom settings */
6187 adapter->wol = adapter->eeprom_wol;
6188 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6190 /* save off EEPROM version number */
6191 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6193 /* reset the hardware with the new settings */
6194 e1000e_reset(adapter);
6197 * If the controller has AMT, do not set DRV_LOAD until the interface
6198 * is up. For all other cases, let the f/w know that the h/w is now
6199 * under the control of the driver.
6201 if (!(adapter->flags & FLAG_HAS_AMT))
6202 e1000e_get_hw_control(adapter);
6204 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
6205 err = register_netdev(netdev);
6209 /* carrier off reporting is important to ethtool even BEFORE open */
6210 netif_carrier_off(netdev);
6212 e1000_print_device_info(adapter);
6214 if (pci_dev_run_wake(pdev))
6215 pm_runtime_put_noidle(&pdev->dev);
6220 if (!(adapter->flags & FLAG_HAS_AMT))
6221 e1000e_release_hw_control(adapter);
6223 if (!e1000_check_reset_block(&adapter->hw))
6224 e1000_phy_hw_reset(&adapter->hw);
6226 kfree(adapter->tx_ring);
6227 kfree(adapter->rx_ring);
6229 if (adapter->hw.flash_address)
6230 iounmap(adapter->hw.flash_address);
6231 e1000e_reset_interrupt_capability(adapter);
6233 iounmap(adapter->hw.hw_addr);
6235 free_netdev(netdev);
6237 pci_release_selected_regions(pdev,
6238 pci_select_bars(pdev, IORESOURCE_MEM));
6241 pci_disable_device(pdev);
6246 * e1000_remove - Device Removal Routine
6247 * @pdev: PCI device information struct
6249 * e1000_remove is called by the PCI subsystem to alert the driver
6250 * that it should release a PCI device. The could be caused by a
6251 * Hot-Plug event, or because the driver is going to be removed from
6254 static void __devexit e1000_remove(struct pci_dev *pdev)
6256 struct net_device *netdev = pci_get_drvdata(pdev);
6257 struct e1000_adapter *adapter = netdev_priv(netdev);
6258 bool down = test_bit(__E1000_DOWN, &adapter->state);
6261 * The timers may be rescheduled, so explicitly disable them
6262 * from being rescheduled.
6265 set_bit(__E1000_DOWN, &adapter->state);
6266 del_timer_sync(&adapter->watchdog_timer);
6267 del_timer_sync(&adapter->phy_info_timer);
6269 cancel_work_sync(&adapter->reset_task);
6270 cancel_work_sync(&adapter->watchdog_task);
6271 cancel_work_sync(&adapter->downshift_task);
6272 cancel_work_sync(&adapter->update_phy_task);
6273 cancel_work_sync(&adapter->print_hang_task);
6275 if (!(netdev->flags & IFF_UP))
6276 e1000_power_down_phy(adapter);
6278 /* Don't lie to e1000_close() down the road. */
6280 clear_bit(__E1000_DOWN, &adapter->state);
6281 unregister_netdev(netdev);
6283 if (pci_dev_run_wake(pdev))
6284 pm_runtime_get_noresume(&pdev->dev);
6287 * Release control of h/w to f/w. If f/w is AMT enabled, this
6288 * would have already happened in close and is redundant.
6290 e1000e_release_hw_control(adapter);
6292 e1000e_reset_interrupt_capability(adapter);
6293 kfree(adapter->tx_ring);
6294 kfree(adapter->rx_ring);
6296 iounmap(adapter->hw.hw_addr);
6297 if (adapter->hw.flash_address)
6298 iounmap(adapter->hw.flash_address);
6299 pci_release_selected_regions(pdev,
6300 pci_select_bars(pdev, IORESOURCE_MEM));
6302 free_netdev(netdev);
6305 pci_disable_pcie_error_reporting(pdev);
6307 pci_disable_device(pdev);
6310 /* PCI Error Recovery (ERS) */
6311 static struct pci_error_handlers e1000_err_handler = {
6312 .error_detected = e1000_io_error_detected,
6313 .slot_reset = e1000_io_slot_reset,
6314 .resume = e1000_io_resume,
6317 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6318 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6319 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6320 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6321 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6322 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6323 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6324 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6325 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6326 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6328 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6329 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6330 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6331 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6333 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6334 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6335 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6337 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6338 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6339 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6341 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6342 board_80003es2lan },
6343 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6344 board_80003es2lan },
6345 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6346 board_80003es2lan },
6347 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6348 board_80003es2lan },
6350 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6351 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6352 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6353 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6354 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6355 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6356 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6357 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6359 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6360 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6361 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6362 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6363 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6364 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6365 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6366 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6367 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6369 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6370 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6371 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6373 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6374 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6375 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6377 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6378 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6379 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6380 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6382 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6383 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6385 { } /* terminate list */
6387 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6390 static const struct dev_pm_ops e1000_pm_ops = {
6391 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6392 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6393 e1000_runtime_resume, e1000_idle)
6397 /* PCI Device API Driver */
6398 static struct pci_driver e1000_driver = {
6399 .name = e1000e_driver_name,
6400 .id_table = e1000_pci_tbl,
6401 .probe = e1000_probe,
6402 .remove = __devexit_p(e1000_remove),
6404 .driver.pm = &e1000_pm_ops,
6406 .shutdown = e1000_shutdown,
6407 .err_handler = &e1000_err_handler
6411 * e1000_init_module - Driver Registration Routine
6413 * e1000_init_module is the first routine called when the driver is
6414 * loaded. All it does is register with the PCI subsystem.
6416 static int __init e1000_init_module(void)
6419 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6420 e1000e_driver_version);
6421 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
6422 ret = pci_register_driver(&e1000_driver);
6426 module_init(e1000_init_module);
6429 * e1000_exit_module - Driver Exit Cleanup Routine
6431 * e1000_exit_module is called just before the driver is removed
6434 static void __exit e1000_exit_module(void)
6436 pci_unregister_driver(&e1000_driver);
6438 module_exit(e1000_exit_module);
6441 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6442 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6443 MODULE_LICENSE("GPL");
6444 MODULE_VERSION(DRV_VERSION);