sched: fix cpu hotplug, cleanup
[pandora-kernel.git] / drivers / net / igb / igb_main.c
1 /*******************************************************************************
2
3   Intel(R) Gigabit Ethernet Linux driver
4   Copyright(c) 2007 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/mii.h>
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/interrupt.h>
43 #include <linux/if_ether.h>
44
45 #include "igb.h"
46
47 #define DRV_VERSION "1.0.8-k2"
48 char igb_driver_name[] = "igb";
49 char igb_driver_version[] = DRV_VERSION;
50 static const char igb_driver_string[] =
51                                 "Intel(R) Gigabit Ethernet Network Driver";
52 static const char igb_copyright[] = "Copyright (c) 2007 Intel Corporation.";
53
54
55 static const struct e1000_info *igb_info_tbl[] = {
56         [board_82575] = &e1000_82575_info,
57 };
58
59 static struct pci_device_id igb_pci_tbl[] = {
60         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
61         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
62         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
63         /* required last entry */
64         {0, }
65 };
66
67 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
68
69 void igb_reset(struct igb_adapter *);
70 static int igb_setup_all_tx_resources(struct igb_adapter *);
71 static int igb_setup_all_rx_resources(struct igb_adapter *);
72 static void igb_free_all_tx_resources(struct igb_adapter *);
73 static void igb_free_all_rx_resources(struct igb_adapter *);
74 static void igb_free_tx_resources(struct igb_adapter *, struct igb_ring *);
75 static void igb_free_rx_resources(struct igb_adapter *, struct igb_ring *);
76 void igb_update_stats(struct igb_adapter *);
77 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
78 static void __devexit igb_remove(struct pci_dev *pdev);
79 static int igb_sw_init(struct igb_adapter *);
80 static int igb_open(struct net_device *);
81 static int igb_close(struct net_device *);
82 static void igb_configure_tx(struct igb_adapter *);
83 static void igb_configure_rx(struct igb_adapter *);
84 static void igb_setup_rctl(struct igb_adapter *);
85 static void igb_clean_all_tx_rings(struct igb_adapter *);
86 static void igb_clean_all_rx_rings(struct igb_adapter *);
87 static void igb_clean_tx_ring(struct igb_adapter *, struct igb_ring *);
88 static void igb_clean_rx_ring(struct igb_adapter *, struct igb_ring *);
89 static void igb_set_multi(struct net_device *);
90 static void igb_update_phy_info(unsigned long);
91 static void igb_watchdog(unsigned long);
92 static void igb_watchdog_task(struct work_struct *);
93 static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
94                                   struct igb_ring *);
95 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
96 static struct net_device_stats *igb_get_stats(struct net_device *);
97 static int igb_change_mtu(struct net_device *, int);
98 static int igb_set_mac(struct net_device *, void *);
99 static irqreturn_t igb_intr(int irq, void *);
100 static irqreturn_t igb_intr_msi(int irq, void *);
101 static irqreturn_t igb_msix_other(int irq, void *);
102 static irqreturn_t igb_msix_rx(int irq, void *);
103 static irqreturn_t igb_msix_tx(int irq, void *);
104 static int igb_clean_rx_ring_msix(struct napi_struct *, int);
105 static bool igb_clean_tx_irq(struct igb_adapter *, struct igb_ring *);
106 static int igb_clean(struct napi_struct *, int);
107 static bool igb_clean_rx_irq_adv(struct igb_adapter *,
108                                  struct igb_ring *, int *, int);
109 static void igb_alloc_rx_buffers_adv(struct igb_adapter *,
110                                      struct igb_ring *, int);
111 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
112 static void igb_tx_timeout(struct net_device *);
113 static void igb_reset_task(struct work_struct *);
114 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
115 static void igb_vlan_rx_add_vid(struct net_device *, u16);
116 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
117 static void igb_restore_vlan(struct igb_adapter *);
118
119 static int igb_suspend(struct pci_dev *, pm_message_t);
120 #ifdef CONFIG_PM
121 static int igb_resume(struct pci_dev *);
122 #endif
123 static void igb_shutdown(struct pci_dev *);
124
125 #ifdef CONFIG_NET_POLL_CONTROLLER
126 /* for netdump / net console */
127 static void igb_netpoll(struct net_device *);
128 #endif
129
130 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
131                      pci_channel_state_t);
132 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
133 static void igb_io_resume(struct pci_dev *);
134
135 static struct pci_error_handlers igb_err_handler = {
136         .error_detected = igb_io_error_detected,
137         .slot_reset = igb_io_slot_reset,
138         .resume = igb_io_resume,
139 };
140
141
142 static struct pci_driver igb_driver = {
143         .name     = igb_driver_name,
144         .id_table = igb_pci_tbl,
145         .probe    = igb_probe,
146         .remove   = __devexit_p(igb_remove),
147 #ifdef CONFIG_PM
148         /* Power Managment Hooks */
149         .suspend  = igb_suspend,
150         .resume   = igb_resume,
151 #endif
152         .shutdown = igb_shutdown,
153         .err_handler = &igb_err_handler
154 };
155
156 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
157 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
158 MODULE_LICENSE("GPL");
159 MODULE_VERSION(DRV_VERSION);
160
161 #ifdef DEBUG
162 /**
163  * igb_get_hw_dev_name - return device name string
164  * used by hardware layer to print debugging information
165  **/
166 char *igb_get_hw_dev_name(struct e1000_hw *hw)
167 {
168         struct igb_adapter *adapter = hw->back;
169         return adapter->netdev->name;
170 }
171 #endif
172
173 /**
174  * igb_init_module - Driver Registration Routine
175  *
176  * igb_init_module is the first routine called when the driver is
177  * loaded. All it does is register with the PCI subsystem.
178  **/
179 static int __init igb_init_module(void)
180 {
181         int ret;
182         printk(KERN_INFO "%s - version %s\n",
183                igb_driver_string, igb_driver_version);
184
185         printk(KERN_INFO "%s\n", igb_copyright);
186
187         ret = pci_register_driver(&igb_driver);
188         return ret;
189 }
190
191 module_init(igb_init_module);
192
193 /**
194  * igb_exit_module - Driver Exit Cleanup Routine
195  *
196  * igb_exit_module is called just before the driver is removed
197  * from memory.
198  **/
199 static void __exit igb_exit_module(void)
200 {
201         pci_unregister_driver(&igb_driver);
202 }
203
204 module_exit(igb_exit_module);
205
206 /**
207  * igb_alloc_queues - Allocate memory for all rings
208  * @adapter: board private structure to initialize
209  *
210  * We allocate one ring per queue at run-time since we don't know the
211  * number of queues at compile-time.
212  **/
213 static int igb_alloc_queues(struct igb_adapter *adapter)
214 {
215         int i;
216
217         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
218                                    sizeof(struct igb_ring), GFP_KERNEL);
219         if (!adapter->tx_ring)
220                 return -ENOMEM;
221
222         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
223                                    sizeof(struct igb_ring), GFP_KERNEL);
224         if (!adapter->rx_ring) {
225                 kfree(adapter->tx_ring);
226                 return -ENOMEM;
227         }
228
229         for (i = 0; i < adapter->num_rx_queues; i++) {
230                 struct igb_ring *ring = &(adapter->rx_ring[i]);
231                 ring->adapter = adapter;
232                 ring->itr_register = E1000_ITR;
233
234                 if (!ring->napi.poll)
235                         netif_napi_add(adapter->netdev, &ring->napi, igb_clean,
236                                        adapter->napi.weight /
237                                        adapter->num_rx_queues);
238         }
239         return 0;
240 }
241
242 #define IGB_N0_QUEUE -1
243 static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
244                               int tx_queue, int msix_vector)
245 {
246         u32 msixbm = 0;
247         struct e1000_hw *hw = &adapter->hw;
248                 /* The 82575 assigns vectors using a bitmask, which matches the
249                    bitmask for the EICR/EIMS/EIMC registers.  To assign one
250                    or more queues to a vector, we write the appropriate bits
251                    into the MSIXBM register for that vector. */
252                 if (rx_queue > IGB_N0_QUEUE) {
253                         msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
254                         adapter->rx_ring[rx_queue].eims_value = msixbm;
255                 }
256                 if (tx_queue > IGB_N0_QUEUE) {
257                         msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
258                         adapter->tx_ring[tx_queue].eims_value =
259                                   E1000_EICR_TX_QUEUE0 << tx_queue;
260                 }
261                 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
262 }
263
264 /**
265  * igb_configure_msix - Configure MSI-X hardware
266  *
267  * igb_configure_msix sets up the hardware to properly
268  * generate MSI-X interrupts.
269  **/
270 static void igb_configure_msix(struct igb_adapter *adapter)
271 {
272         u32 tmp;
273         int i, vector = 0;
274         struct e1000_hw *hw = &adapter->hw;
275
276         adapter->eims_enable_mask = 0;
277
278         for (i = 0; i < adapter->num_tx_queues; i++) {
279                 struct igb_ring *tx_ring = &adapter->tx_ring[i];
280                 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
281                 adapter->eims_enable_mask |= tx_ring->eims_value;
282                 if (tx_ring->itr_val)
283                         writel(1000000000 / (tx_ring->itr_val * 256),
284                                hw->hw_addr + tx_ring->itr_register);
285                 else
286                         writel(1, hw->hw_addr + tx_ring->itr_register);
287         }
288
289         for (i = 0; i < adapter->num_rx_queues; i++) {
290                 struct igb_ring *rx_ring = &adapter->rx_ring[i];
291                 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
292                 adapter->eims_enable_mask |= rx_ring->eims_value;
293                 if (rx_ring->itr_val)
294                         writel(1000000000 / (rx_ring->itr_val * 256),
295                                hw->hw_addr + rx_ring->itr_register);
296                 else
297                         writel(1, hw->hw_addr + rx_ring->itr_register);
298         }
299
300
301         /* set vector for other causes, i.e. link changes */
302                 array_wr32(E1000_MSIXBM(0), vector++,
303                                       E1000_EIMS_OTHER);
304
305                 /* disable IAM for ICR interrupt bits */
306                 wr32(E1000_IAM, 0);
307
308                 tmp = rd32(E1000_CTRL_EXT);
309                 /* enable MSI-X PBA support*/
310                 tmp |= E1000_CTRL_EXT_PBA_CLR;
311
312                 /* Auto-Mask interrupts upon ICR read. */
313                 tmp |= E1000_CTRL_EXT_EIAME;
314                 tmp |= E1000_CTRL_EXT_IRCA;
315
316                 wr32(E1000_CTRL_EXT, tmp);
317                 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
318
319         wrfl();
320 }
321
322 /**
323  * igb_request_msix - Initialize MSI-X interrupts
324  *
325  * igb_request_msix allocates MSI-X vectors and requests interrupts from the
326  * kernel.
327  **/
328 static int igb_request_msix(struct igb_adapter *adapter)
329 {
330         struct net_device *netdev = adapter->netdev;
331         int i, err = 0, vector = 0;
332
333         vector = 0;
334
335         for (i = 0; i < adapter->num_tx_queues; i++) {
336                 struct igb_ring *ring = &(adapter->tx_ring[i]);
337                 sprintf(ring->name, "%s-tx%d", netdev->name, i);
338                 err = request_irq(adapter->msix_entries[vector].vector,
339                                   &igb_msix_tx, 0, ring->name,
340                                   &(adapter->tx_ring[i]));
341                 if (err)
342                         goto out;
343                 ring->itr_register = E1000_EITR(0) + (vector << 2);
344                 ring->itr_val = adapter->itr;
345                 vector++;
346         }
347         for (i = 0; i < adapter->num_rx_queues; i++) {
348                 struct igb_ring *ring = &(adapter->rx_ring[i]);
349                 if (strlen(netdev->name) < (IFNAMSIZ - 5))
350                         sprintf(ring->name, "%s-rx%d", netdev->name, i);
351                 else
352                         memcpy(ring->name, netdev->name, IFNAMSIZ);
353                 err = request_irq(adapter->msix_entries[vector].vector,
354                                   &igb_msix_rx, 0, ring->name,
355                                   &(adapter->rx_ring[i]));
356                 if (err)
357                         goto out;
358                 ring->itr_register = E1000_EITR(0) + (vector << 2);
359                 ring->itr_val = adapter->itr;
360                 vector++;
361         }
362
363         err = request_irq(adapter->msix_entries[vector].vector,
364                           &igb_msix_other, 0, netdev->name, netdev);
365         if (err)
366                 goto out;
367
368         adapter->napi.poll = igb_clean_rx_ring_msix;
369         for (i = 0; i < adapter->num_rx_queues; i++)
370                 adapter->rx_ring[i].napi.poll = adapter->napi.poll;
371         igb_configure_msix(adapter);
372         return 0;
373 out:
374         return err;
375 }
376
377 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
378 {
379         if (adapter->msix_entries) {
380                 pci_disable_msix(adapter->pdev);
381                 kfree(adapter->msix_entries);
382                 adapter->msix_entries = NULL;
383         } else if (adapter->msi_enabled)
384                 pci_disable_msi(adapter->pdev);
385         return;
386 }
387
388
389 /**
390  * igb_set_interrupt_capability - set MSI or MSI-X if supported
391  *
392  * Attempt to configure interrupts using the best available
393  * capabilities of the hardware and kernel.
394  **/
395 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
396 {
397         int err;
398         int numvecs, i;
399
400         numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
401         adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
402                                         GFP_KERNEL);
403         if (!adapter->msix_entries)
404                 goto msi_only;
405
406         for (i = 0; i < numvecs; i++)
407                 adapter->msix_entries[i].entry = i;
408
409         err = pci_enable_msix(adapter->pdev,
410                               adapter->msix_entries,
411                               numvecs);
412         if (err == 0)
413                 return;
414
415         igb_reset_interrupt_capability(adapter);
416
417         /* If we can't do MSI-X, try MSI */
418 msi_only:
419         adapter->num_rx_queues = 1;
420         if (!pci_enable_msi(adapter->pdev))
421                 adapter->msi_enabled = 1;
422         return;
423 }
424
425 /**
426  * igb_request_irq - initialize interrupts
427  *
428  * Attempts to configure interrupts using the best available
429  * capabilities of the hardware and kernel.
430  **/
431 static int igb_request_irq(struct igb_adapter *adapter)
432 {
433         struct net_device *netdev = adapter->netdev;
434         struct e1000_hw *hw = &adapter->hw;
435         int err = 0;
436
437         if (adapter->msix_entries) {
438                 err = igb_request_msix(adapter);
439                 if (!err) {
440                         /* enable IAM, auto-mask,
441                          * DO NOT USE EIAM or IAM in legacy mode */
442                         wr32(E1000_IAM, IMS_ENABLE_MASK);
443                         goto request_done;
444                 }
445                 /* fall back to MSI */
446                 igb_reset_interrupt_capability(adapter);
447                 if (!pci_enable_msi(adapter->pdev))
448                         adapter->msi_enabled = 1;
449                 igb_free_all_tx_resources(adapter);
450                 igb_free_all_rx_resources(adapter);
451                 adapter->num_rx_queues = 1;
452                 igb_alloc_queues(adapter);
453         }
454         if (adapter->msi_enabled) {
455                 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
456                                   netdev->name, netdev);
457                 if (!err)
458                         goto request_done;
459                 /* fall back to legacy interrupts */
460                 igb_reset_interrupt_capability(adapter);
461                 adapter->msi_enabled = 0;
462         }
463
464         err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
465                           netdev->name, netdev);
466
467         if (err)
468                 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
469                         err);
470
471 request_done:
472         return err;
473 }
474
475 static void igb_free_irq(struct igb_adapter *adapter)
476 {
477         struct net_device *netdev = adapter->netdev;
478
479         if (adapter->msix_entries) {
480                 int vector = 0, i;
481
482                 for (i = 0; i < adapter->num_tx_queues; i++)
483                         free_irq(adapter->msix_entries[vector++].vector,
484                                 &(adapter->tx_ring[i]));
485                 for (i = 0; i < adapter->num_rx_queues; i++)
486                         free_irq(adapter->msix_entries[vector++].vector,
487                                 &(adapter->rx_ring[i]));
488
489                 free_irq(adapter->msix_entries[vector++].vector, netdev);
490                 return;
491         }
492
493         free_irq(adapter->pdev->irq, netdev);
494 }
495
496 /**
497  * igb_irq_disable - Mask off interrupt generation on the NIC
498  * @adapter: board private structure
499  **/
500 static void igb_irq_disable(struct igb_adapter *adapter)
501 {
502         struct e1000_hw *hw = &adapter->hw;
503
504         if (adapter->msix_entries) {
505                 wr32(E1000_EIMC, ~0);
506                 wr32(E1000_EIAC, 0);
507         }
508         wr32(E1000_IMC, ~0);
509         wrfl();
510         synchronize_irq(adapter->pdev->irq);
511 }
512
513 /**
514  * igb_irq_enable - Enable default interrupt generation settings
515  * @adapter: board private structure
516  **/
517 static void igb_irq_enable(struct igb_adapter *adapter)
518 {
519         struct e1000_hw *hw = &adapter->hw;
520
521         if (adapter->msix_entries) {
522                 wr32(E1000_EIMS,
523                                 adapter->eims_enable_mask);
524                 wr32(E1000_EIAC,
525                                 adapter->eims_enable_mask);
526                 wr32(E1000_IMS, E1000_IMS_LSC);
527         } else
528         wr32(E1000_IMS, IMS_ENABLE_MASK);
529 }
530
531 static void igb_update_mng_vlan(struct igb_adapter *adapter)
532 {
533         struct net_device *netdev = adapter->netdev;
534         u16 vid = adapter->hw.mng_cookie.vlan_id;
535         u16 old_vid = adapter->mng_vlan_id;
536         if (adapter->vlgrp) {
537                 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
538                         if (adapter->hw.mng_cookie.status &
539                                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
540                                 igb_vlan_rx_add_vid(netdev, vid);
541                                 adapter->mng_vlan_id = vid;
542                         } else
543                                 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
544
545                         if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
546                                         (vid != old_vid) &&
547                             !vlan_group_get_device(adapter->vlgrp, old_vid))
548                                 igb_vlan_rx_kill_vid(netdev, old_vid);
549                 } else
550                         adapter->mng_vlan_id = vid;
551         }
552 }
553
554 /**
555  * igb_release_hw_control - release control of the h/w to f/w
556  * @adapter: address of board private structure
557  *
558  * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
559  * For ASF and Pass Through versions of f/w this means that the
560  * driver is no longer loaded.
561  *
562  **/
563 static void igb_release_hw_control(struct igb_adapter *adapter)
564 {
565         struct e1000_hw *hw = &adapter->hw;
566         u32 ctrl_ext;
567
568         /* Let firmware take over control of h/w */
569         ctrl_ext = rd32(E1000_CTRL_EXT);
570         wr32(E1000_CTRL_EXT,
571                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
572 }
573
574
575 /**
576  * igb_get_hw_control - get control of the h/w from f/w
577  * @adapter: address of board private structure
578  *
579  * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
580  * For ASF and Pass Through versions of f/w this means that
581  * the driver is loaded.
582  *
583  **/
584 static void igb_get_hw_control(struct igb_adapter *adapter)
585 {
586         struct e1000_hw *hw = &adapter->hw;
587         u32 ctrl_ext;
588
589         /* Let firmware know the driver has taken over */
590         ctrl_ext = rd32(E1000_CTRL_EXT);
591         wr32(E1000_CTRL_EXT,
592                         ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
593 }
594
595 static void igb_init_manageability(struct igb_adapter *adapter)
596 {
597         struct e1000_hw *hw = &adapter->hw;
598
599         if (adapter->en_mng_pt) {
600                 u32 manc2h = rd32(E1000_MANC2H);
601                 u32 manc = rd32(E1000_MANC);
602
603                 /* enable receiving management packets to the host */
604                 /* this will probably generate destination unreachable messages
605                  * from the host OS, but the packets will be handled on SMBUS */
606                 manc |= E1000_MANC_EN_MNG2HOST;
607 #define E1000_MNG2HOST_PORT_623 (1 << 5)
608 #define E1000_MNG2HOST_PORT_664 (1 << 6)
609                 manc2h |= E1000_MNG2HOST_PORT_623;
610                 manc2h |= E1000_MNG2HOST_PORT_664;
611                 wr32(E1000_MANC2H, manc2h);
612
613                 wr32(E1000_MANC, manc);
614         }
615 }
616
617 /**
618  * igb_configure - configure the hardware for RX and TX
619  * @adapter: private board structure
620  **/
621 static void igb_configure(struct igb_adapter *adapter)
622 {
623         struct net_device *netdev = adapter->netdev;
624         int i;
625
626         igb_get_hw_control(adapter);
627         igb_set_multi(netdev);
628
629         igb_restore_vlan(adapter);
630         igb_init_manageability(adapter);
631
632         igb_configure_tx(adapter);
633         igb_setup_rctl(adapter);
634         igb_configure_rx(adapter);
635         /* call IGB_DESC_UNUSED which always leaves
636          * at least 1 descriptor unused to make sure
637          * next_to_use != next_to_clean */
638         for (i = 0; i < adapter->num_rx_queues; i++) {
639                 struct igb_ring *ring = &adapter->rx_ring[i];
640                 igb_alloc_rx_buffers_adv(adapter, ring, IGB_DESC_UNUSED(ring));
641         }
642
643
644         adapter->tx_queue_len = netdev->tx_queue_len;
645 }
646
647
648 /**
649  * igb_up - Open the interface and prepare it to handle traffic
650  * @adapter: board private structure
651  **/
652
653 int igb_up(struct igb_adapter *adapter)
654 {
655         struct e1000_hw *hw = &adapter->hw;
656         int i;
657
658         /* hardware has been reset, we need to reload some things */
659         igb_configure(adapter);
660
661         clear_bit(__IGB_DOWN, &adapter->state);
662
663         napi_enable(&adapter->napi);
664
665         if (adapter->msix_entries) {
666                 for (i = 0; i < adapter->num_rx_queues; i++)
667                         napi_enable(&adapter->rx_ring[i].napi);
668                 igb_configure_msix(adapter);
669         }
670
671         /* Clear any pending interrupts. */
672         rd32(E1000_ICR);
673         igb_irq_enable(adapter);
674
675         /* Fire a link change interrupt to start the watchdog. */
676         wr32(E1000_ICS, E1000_ICS_LSC);
677         return 0;
678 }
679
680 void igb_down(struct igb_adapter *adapter)
681 {
682         struct e1000_hw *hw = &adapter->hw;
683         struct net_device *netdev = adapter->netdev;
684         u32 tctl, rctl;
685         int i;
686
687         /* signal that we're down so the interrupt handler does not
688          * reschedule our watchdog timer */
689         set_bit(__IGB_DOWN, &adapter->state);
690
691         /* disable receives in the hardware */
692         rctl = rd32(E1000_RCTL);
693         wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
694         /* flush and sleep below */
695
696         netif_stop_queue(netdev);
697
698         /* disable transmits in the hardware */
699         tctl = rd32(E1000_TCTL);
700         tctl &= ~E1000_TCTL_EN;
701         wr32(E1000_TCTL, tctl);
702         /* flush both disables and wait for them to finish */
703         wrfl();
704         msleep(10);
705
706         napi_disable(&adapter->napi);
707
708         if (adapter->msix_entries)
709                 for (i = 0; i < adapter->num_rx_queues; i++)
710                         napi_disable(&adapter->rx_ring[i].napi);
711         igb_irq_disable(adapter);
712
713         del_timer_sync(&adapter->watchdog_timer);
714         del_timer_sync(&adapter->phy_info_timer);
715
716         netdev->tx_queue_len = adapter->tx_queue_len;
717         netif_carrier_off(netdev);
718         adapter->link_speed = 0;
719         adapter->link_duplex = 0;
720
721         igb_reset(adapter);
722         igb_clean_all_tx_rings(adapter);
723         igb_clean_all_rx_rings(adapter);
724 }
725
726 void igb_reinit_locked(struct igb_adapter *adapter)
727 {
728         WARN_ON(in_interrupt());
729         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
730                 msleep(1);
731         igb_down(adapter);
732         igb_up(adapter);
733         clear_bit(__IGB_RESETTING, &adapter->state);
734 }
735
736 void igb_reset(struct igb_adapter *adapter)
737 {
738         struct e1000_hw *hw = &adapter->hw;
739         struct e1000_fc_info *fc = &adapter->hw.fc;
740         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
741         u16 hwm;
742
743         /* Repartition Pba for greater than 9k mtu
744          * To take effect CTRL.RST is required.
745          */
746         pba = E1000_PBA_34K;
747
748         if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
749                 /* adjust PBA for jumbo frames */
750                 wr32(E1000_PBA, pba);
751
752                 /* To maintain wire speed transmits, the Tx FIFO should be
753                  * large enough to accommodate two full transmit packets,
754                  * rounded up to the next 1KB and expressed in KB.  Likewise,
755                  * the Rx FIFO should be large enough to accommodate at least
756                  * one full receive packet and is similarly rounded up and
757                  * expressed in KB. */
758                 pba = rd32(E1000_PBA);
759                 /* upper 16 bits has Tx packet buffer allocation size in KB */
760                 tx_space = pba >> 16;
761                 /* lower 16 bits has Rx packet buffer allocation size in KB */
762                 pba &= 0xffff;
763                 /* the tx fifo also stores 16 bytes of information about the tx
764                  * but don't include ethernet FCS because hardware appends it */
765                 min_tx_space = (adapter->max_frame_size +
766                                 sizeof(struct e1000_tx_desc) -
767                                 ETH_FCS_LEN) * 2;
768                 min_tx_space = ALIGN(min_tx_space, 1024);
769                 min_tx_space >>= 10;
770                 /* software strips receive CRC, so leave room for it */
771                 min_rx_space = adapter->max_frame_size;
772                 min_rx_space = ALIGN(min_rx_space, 1024);
773                 min_rx_space >>= 10;
774
775                 /* If current Tx allocation is less than the min Tx FIFO size,
776                  * and the min Tx FIFO size is less than the current Rx FIFO
777                  * allocation, take space away from current Rx allocation */
778                 if (tx_space < min_tx_space &&
779                     ((min_tx_space - tx_space) < pba)) {
780                         pba = pba - (min_tx_space - tx_space);
781
782                         /* if short on rx space, rx wins and must trump tx
783                          * adjustment */
784                         if (pba < min_rx_space)
785                                 pba = min_rx_space;
786                 }
787         }
788         wr32(E1000_PBA, pba);
789
790         /* flow control settings */
791         /* The high water mark must be low enough to fit one full frame
792          * (or the size used for early receive) above it in the Rx FIFO.
793          * Set it to the lower of:
794          * - 90% of the Rx FIFO size, or
795          * - the full Rx FIFO size minus one full frame */
796         hwm = min(((pba << 10) * 9 / 10),
797                   ((pba << 10) - adapter->max_frame_size));
798
799         fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
800         fc->low_water = fc->high_water - 8;
801         fc->pause_time = 0xFFFF;
802         fc->send_xon = 1;
803         fc->type = fc->original_type;
804
805         /* Allow time for pending master requests to run */
806         adapter->hw.mac.ops.reset_hw(&adapter->hw);
807         wr32(E1000_WUC, 0);
808
809         if (adapter->hw.mac.ops.init_hw(&adapter->hw))
810                 dev_err(&adapter->pdev->dev, "Hardware Error\n");
811
812         igb_update_mng_vlan(adapter);
813
814         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
815         wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
816
817         igb_reset_adaptive(&adapter->hw);
818         if (adapter->hw.phy.ops.get_phy_info)
819                 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
820 }
821
822 /**
823  * igb_probe - Device Initialization Routine
824  * @pdev: PCI device information struct
825  * @ent: entry in igb_pci_tbl
826  *
827  * Returns 0 on success, negative on failure
828  *
829  * igb_probe initializes an adapter identified by a pci_dev structure.
830  * The OS initialization, configuring of the adapter private structure,
831  * and a hardware reset occur.
832  **/
833 static int __devinit igb_probe(struct pci_dev *pdev,
834                                const struct pci_device_id *ent)
835 {
836         struct net_device *netdev;
837         struct igb_adapter *adapter;
838         struct e1000_hw *hw;
839         const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
840         unsigned long mmio_start, mmio_len;
841         static int cards_found;
842         int i, err, pci_using_dac;
843         u16 eeprom_data = 0;
844         u16 eeprom_apme_mask = IGB_EEPROM_APME;
845         u32 part_num;
846
847         err = pci_enable_device(pdev);
848         if (err)
849                 return err;
850
851         pci_using_dac = 0;
852         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
853         if (!err) {
854                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
855                 if (!err)
856                         pci_using_dac = 1;
857         } else {
858                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
859                 if (err) {
860                         err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
861                         if (err) {
862                                 dev_err(&pdev->dev, "No usable DMA "
863                                         "configuration, aborting\n");
864                                 goto err_dma;
865                         }
866                 }
867         }
868
869         err = pci_request_regions(pdev, igb_driver_name);
870         if (err)
871                 goto err_pci_reg;
872
873         pci_set_master(pdev);
874         pci_save_state(pdev);
875
876         err = -ENOMEM;
877         netdev = alloc_etherdev(sizeof(struct igb_adapter));
878         if (!netdev)
879                 goto err_alloc_etherdev;
880
881         SET_NETDEV_DEV(netdev, &pdev->dev);
882
883         pci_set_drvdata(pdev, netdev);
884         adapter = netdev_priv(netdev);
885         adapter->netdev = netdev;
886         adapter->pdev = pdev;
887         hw = &adapter->hw;
888         hw->back = adapter;
889         adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
890
891         mmio_start = pci_resource_start(pdev, 0);
892         mmio_len = pci_resource_len(pdev, 0);
893
894         err = -EIO;
895         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
896         if (!adapter->hw.hw_addr)
897                 goto err_ioremap;
898
899         netdev->open = &igb_open;
900         netdev->stop = &igb_close;
901         netdev->get_stats = &igb_get_stats;
902         netdev->set_multicast_list = &igb_set_multi;
903         netdev->set_mac_address = &igb_set_mac;
904         netdev->change_mtu = &igb_change_mtu;
905         netdev->do_ioctl = &igb_ioctl;
906         igb_set_ethtool_ops(netdev);
907         netdev->tx_timeout = &igb_tx_timeout;
908         netdev->watchdog_timeo = 5 * HZ;
909         netif_napi_add(netdev, &adapter->napi, igb_clean, 64);
910         netdev->vlan_rx_register = igb_vlan_rx_register;
911         netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
912         netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
913 #ifdef CONFIG_NET_POLL_CONTROLLER
914         netdev->poll_controller = igb_netpoll;
915 #endif
916         netdev->hard_start_xmit = &igb_xmit_frame_adv;
917
918         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
919
920         netdev->mem_start = mmio_start;
921         netdev->mem_end = mmio_start + mmio_len;
922
923         adapter->bd_number = cards_found;
924
925         /* PCI config space info */
926         hw->vendor_id = pdev->vendor;
927         hw->device_id = pdev->device;
928         hw->revision_id = pdev->revision;
929         hw->subsystem_vendor_id = pdev->subsystem_vendor;
930         hw->subsystem_device_id = pdev->subsystem_device;
931
932         /* setup the private structure */
933         hw->back = adapter;
934         /* Copy the default MAC, PHY and NVM function pointers */
935         memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
936         memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
937         memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
938         /* Initialize skew-specific constants */
939         err = ei->get_invariants(hw);
940         if (err)
941                 goto err_hw_init;
942
943         err = igb_sw_init(adapter);
944         if (err)
945                 goto err_sw_init;
946
947         igb_get_bus_info_pcie(hw);
948
949         hw->phy.autoneg_wait_to_complete = false;
950         hw->mac.adaptive_ifs = true;
951
952         /* Copper options */
953         if (hw->phy.media_type == e1000_media_type_copper) {
954                 hw->phy.mdix = AUTO_ALL_MODES;
955                 hw->phy.disable_polarity_correction = false;
956                 hw->phy.ms_type = e1000_ms_hw_default;
957         }
958
959         if (igb_check_reset_block(hw))
960                 dev_info(&pdev->dev,
961                         "PHY reset is blocked due to SOL/IDER session.\n");
962
963         netdev->features = NETIF_F_SG |
964                            NETIF_F_HW_CSUM |
965                            NETIF_F_HW_VLAN_TX |
966                            NETIF_F_HW_VLAN_RX |
967                            NETIF_F_HW_VLAN_FILTER;
968
969         netdev->features |= NETIF_F_TSO;
970
971         netdev->features |= NETIF_F_TSO6;
972         if (pci_using_dac)
973                 netdev->features |= NETIF_F_HIGHDMA;
974
975         netdev->features |= NETIF_F_LLTX;
976         adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
977
978         /* before reading the NVM, reset the controller to put the device in a
979          * known good starting state */
980         hw->mac.ops.reset_hw(hw);
981
982         /* make sure the NVM is good */
983         if (igb_validate_nvm_checksum(hw) < 0) {
984                 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
985                 err = -EIO;
986                 goto err_eeprom;
987         }
988
989         /* copy the MAC address out of the NVM */
990         if (hw->mac.ops.read_mac_addr(hw))
991                 dev_err(&pdev->dev, "NVM Read Error\n");
992
993         memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
994         memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
995
996         if (!is_valid_ether_addr(netdev->perm_addr)) {
997                 dev_err(&pdev->dev, "Invalid MAC Address\n");
998                 err = -EIO;
999                 goto err_eeprom;
1000         }
1001
1002         init_timer(&adapter->watchdog_timer);
1003         adapter->watchdog_timer.function = &igb_watchdog;
1004         adapter->watchdog_timer.data = (unsigned long) adapter;
1005
1006         init_timer(&adapter->phy_info_timer);
1007         adapter->phy_info_timer.function = &igb_update_phy_info;
1008         adapter->phy_info_timer.data = (unsigned long) adapter;
1009
1010         INIT_WORK(&adapter->reset_task, igb_reset_task);
1011         INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1012
1013         /* Initialize link & ring properties that are user-changeable */
1014         adapter->tx_ring->count = 256;
1015         for (i = 0; i < adapter->num_tx_queues; i++)
1016                 adapter->tx_ring[i].count = adapter->tx_ring->count;
1017         adapter->rx_ring->count = 256;
1018         for (i = 0; i < adapter->num_rx_queues; i++)
1019                 adapter->rx_ring[i].count = adapter->rx_ring->count;
1020
1021         adapter->fc_autoneg = true;
1022         hw->mac.autoneg = true;
1023         hw->phy.autoneg_advertised = 0x2f;
1024
1025         hw->fc.original_type = e1000_fc_default;
1026         hw->fc.type = e1000_fc_default;
1027
1028         adapter->itr_setting = 3;
1029         adapter->itr = IGB_START_ITR;
1030
1031         igb_validate_mdi_setting(hw);
1032
1033         adapter->rx_csum = 1;
1034
1035         /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1036          * enable the ACPI Magic Packet filter
1037          */
1038
1039         if (hw->bus.func == 0 ||
1040             hw->device_id == E1000_DEV_ID_82575EB_COPPER)
1041                 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1,
1042                                      &eeprom_data);
1043
1044         if (eeprom_data & eeprom_apme_mask)
1045                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1046
1047         /* now that we have the eeprom settings, apply the special cases where
1048          * the eeprom may be wrong or the board simply won't support wake on
1049          * lan on a particular port */
1050         switch (pdev->device) {
1051         case E1000_DEV_ID_82575GB_QUAD_COPPER:
1052                 adapter->eeprom_wol = 0;
1053                 break;
1054         case E1000_DEV_ID_82575EB_FIBER_SERDES:
1055                 /* Wake events only supported on port A for dual fiber
1056                  * regardless of eeprom setting */
1057                 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1058                         adapter->eeprom_wol = 0;
1059                 break;
1060         }
1061
1062         /* initialize the wol settings based on the eeprom settings */
1063         adapter->wol = adapter->eeprom_wol;
1064
1065         /* reset the hardware with the new settings */
1066         igb_reset(adapter);
1067
1068         /* let the f/w know that the h/w is now under the control of the
1069          * driver. */
1070         igb_get_hw_control(adapter);
1071
1072         /* tell the stack to leave us alone until igb_open() is called */
1073         netif_carrier_off(netdev);
1074         netif_stop_queue(netdev);
1075
1076         strcpy(netdev->name, "eth%d");
1077         err = register_netdev(netdev);
1078         if (err)
1079                 goto err_register;
1080
1081         dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1082         /* print bus type/speed/width info */
1083         dev_info(&pdev->dev,
1084                  "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
1085                  netdev->name,
1086                  ((hw->bus.speed == e1000_bus_speed_2500)
1087                   ? "2.5Gb/s" : "unknown"),
1088                  ((hw->bus.width == e1000_bus_width_pcie_x4)
1089                   ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1090                   ? "Width x1" : "unknown"),
1091                  netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
1092                  netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
1093
1094         igb_read_part_num(hw, &part_num);
1095         dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1096                 (part_num >> 8), (part_num & 0xff));
1097
1098         dev_info(&pdev->dev,
1099                 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1100                 adapter->msix_entries ? "MSI-X" :
1101                 adapter->msi_enabled ? "MSI" : "legacy",
1102                 adapter->num_rx_queues, adapter->num_tx_queues);
1103
1104         cards_found++;
1105         return 0;
1106
1107 err_register:
1108         igb_release_hw_control(adapter);
1109 err_eeprom:
1110         if (!igb_check_reset_block(hw))
1111                 hw->phy.ops.reset_phy(hw);
1112
1113         if (hw->flash_address)
1114                 iounmap(hw->flash_address);
1115
1116         igb_remove_device(hw);
1117         kfree(adapter->tx_ring);
1118         kfree(adapter->rx_ring);
1119 err_sw_init:
1120 err_hw_init:
1121         iounmap(hw->hw_addr);
1122 err_ioremap:
1123         free_netdev(netdev);
1124 err_alloc_etherdev:
1125         pci_release_regions(pdev);
1126 err_pci_reg:
1127 err_dma:
1128         pci_disable_device(pdev);
1129         return err;
1130 }
1131
1132 /**
1133  * igb_remove - Device Removal Routine
1134  * @pdev: PCI device information struct
1135  *
1136  * igb_remove is called by the PCI subsystem to alert the driver
1137  * that it should release a PCI device.  The could be caused by a
1138  * Hot-Plug event, or because the driver is going to be removed from
1139  * memory.
1140  **/
1141 static void __devexit igb_remove(struct pci_dev *pdev)
1142 {
1143         struct net_device *netdev = pci_get_drvdata(pdev);
1144         struct igb_adapter *adapter = netdev_priv(netdev);
1145
1146         /* flush_scheduled work may reschedule our watchdog task, so
1147          * explicitly disable watchdog tasks from being rescheduled  */
1148         set_bit(__IGB_DOWN, &adapter->state);
1149         del_timer_sync(&adapter->watchdog_timer);
1150         del_timer_sync(&adapter->phy_info_timer);
1151
1152         flush_scheduled_work();
1153
1154         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
1155          * would have already happened in close and is redundant. */
1156         igb_release_hw_control(adapter);
1157
1158         unregister_netdev(netdev);
1159
1160         if (!igb_check_reset_block(&adapter->hw))
1161                 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1162
1163         igb_remove_device(&adapter->hw);
1164         igb_reset_interrupt_capability(adapter);
1165
1166         kfree(adapter->tx_ring);
1167         kfree(adapter->rx_ring);
1168
1169         iounmap(adapter->hw.hw_addr);
1170         if (adapter->hw.flash_address)
1171                 iounmap(adapter->hw.flash_address);
1172         pci_release_regions(pdev);
1173
1174         free_netdev(netdev);
1175
1176         pci_disable_device(pdev);
1177 }
1178
1179 /**
1180  * igb_sw_init - Initialize general software structures (struct igb_adapter)
1181  * @adapter: board private structure to initialize
1182  *
1183  * igb_sw_init initializes the Adapter private data structure.
1184  * Fields are initialized based on PCI device information and
1185  * OS network device settings (MTU size).
1186  **/
1187 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1188 {
1189         struct e1000_hw *hw = &adapter->hw;
1190         struct net_device *netdev = adapter->netdev;
1191         struct pci_dev *pdev = adapter->pdev;
1192
1193         pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1194
1195         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1196         adapter->rx_ps_hdr_size = 0; /* disable packet split */
1197         adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1198         adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1199
1200         /* Number of supported queues. */
1201         /* Having more queues than CPUs doesn't make sense. */
1202         adapter->num_tx_queues = 1;
1203         adapter->num_rx_queues = min(IGB_MAX_RX_QUEUES, num_online_cpus());
1204
1205         igb_set_interrupt_capability(adapter);
1206
1207         if (igb_alloc_queues(adapter)) {
1208                 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1209                 return -ENOMEM;
1210         }
1211
1212         /* Explicitly disable IRQ since the NIC can be in any state. */
1213         igb_irq_disable(adapter);
1214
1215         set_bit(__IGB_DOWN, &adapter->state);
1216         return 0;
1217 }
1218
1219 /**
1220  * igb_open - Called when a network interface is made active
1221  * @netdev: network interface device structure
1222  *
1223  * Returns 0 on success, negative value on failure
1224  *
1225  * The open entry point is called when a network interface is made
1226  * active by the system (IFF_UP).  At this point all resources needed
1227  * for transmit and receive operations are allocated, the interrupt
1228  * handler is registered with the OS, the watchdog timer is started,
1229  * and the stack is notified that the interface is ready.
1230  **/
1231 static int igb_open(struct net_device *netdev)
1232 {
1233         struct igb_adapter *adapter = netdev_priv(netdev);
1234         struct e1000_hw *hw = &adapter->hw;
1235         int err;
1236         int i;
1237
1238         /* disallow open during test */
1239         if (test_bit(__IGB_TESTING, &adapter->state))
1240                 return -EBUSY;
1241
1242         /* allocate transmit descriptors */
1243         err = igb_setup_all_tx_resources(adapter);
1244         if (err)
1245                 goto err_setup_tx;
1246
1247         /* allocate receive descriptors */
1248         err = igb_setup_all_rx_resources(adapter);
1249         if (err)
1250                 goto err_setup_rx;
1251
1252         /* e1000_power_up_phy(adapter); */
1253
1254         adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1255         if ((adapter->hw.mng_cookie.status &
1256              E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1257                 igb_update_mng_vlan(adapter);
1258
1259         /* before we allocate an interrupt, we must be ready to handle it.
1260          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1261          * as soon as we call pci_request_irq, so we have to setup our
1262          * clean_rx handler before we do so.  */
1263         igb_configure(adapter);
1264
1265         err = igb_request_irq(adapter);
1266         if (err)
1267                 goto err_req_irq;
1268
1269         /* From here on the code is the same as igb_up() */
1270         clear_bit(__IGB_DOWN, &adapter->state);
1271
1272         napi_enable(&adapter->napi);
1273         if (adapter->msix_entries)
1274                 for (i = 0; i < adapter->num_rx_queues; i++)
1275                         napi_enable(&adapter->rx_ring[i].napi);
1276
1277         igb_irq_enable(adapter);
1278
1279         /* Clear any pending interrupts. */
1280         rd32(E1000_ICR);
1281         /* Fire a link status change interrupt to start the watchdog. */
1282         wr32(E1000_ICS, E1000_ICS_LSC);
1283
1284         return 0;
1285
1286 err_req_irq:
1287         igb_release_hw_control(adapter);
1288         /* e1000_power_down_phy(adapter); */
1289         igb_free_all_rx_resources(adapter);
1290 err_setup_rx:
1291         igb_free_all_tx_resources(adapter);
1292 err_setup_tx:
1293         igb_reset(adapter);
1294
1295         return err;
1296 }
1297
1298 /**
1299  * igb_close - Disables a network interface
1300  * @netdev: network interface device structure
1301  *
1302  * Returns 0, this is not allowed to fail
1303  *
1304  * The close entry point is called when an interface is de-activated
1305  * by the OS.  The hardware is still under the driver's control, but
1306  * needs to be disabled.  A global MAC reset is issued to stop the
1307  * hardware, and all transmit and receive resources are freed.
1308  **/
1309 static int igb_close(struct net_device *netdev)
1310 {
1311         struct igb_adapter *adapter = netdev_priv(netdev);
1312
1313         WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1314         igb_down(adapter);
1315
1316         igb_free_irq(adapter);
1317
1318         igb_free_all_tx_resources(adapter);
1319         igb_free_all_rx_resources(adapter);
1320
1321         /* kill manageability vlan ID if supported, but not if a vlan with
1322          * the same ID is registered on the host OS (let 8021q kill it) */
1323         if ((adapter->hw.mng_cookie.status &
1324                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1325              !(adapter->vlgrp &&
1326                vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1327                 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1328
1329         return 0;
1330 }
1331
1332 /**
1333  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1334  * @adapter: board private structure
1335  * @tx_ring: tx descriptor ring (for a specific queue) to setup
1336  *
1337  * Return 0 on success, negative on failure
1338  **/
1339
1340 int igb_setup_tx_resources(struct igb_adapter *adapter,
1341                            struct igb_ring *tx_ring)
1342 {
1343         struct pci_dev *pdev = adapter->pdev;
1344         int size;
1345
1346         size = sizeof(struct igb_buffer) * tx_ring->count;
1347         tx_ring->buffer_info = vmalloc(size);
1348         if (!tx_ring->buffer_info)
1349                 goto err;
1350         memset(tx_ring->buffer_info, 0, size);
1351
1352         /* round up to nearest 4K */
1353         tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc)
1354                         + sizeof(u32);
1355         tx_ring->size = ALIGN(tx_ring->size, 4096);
1356
1357         tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1358                                              &tx_ring->dma);
1359
1360         if (!tx_ring->desc)
1361                 goto err;
1362
1363         tx_ring->adapter = adapter;
1364         tx_ring->next_to_use = 0;
1365         tx_ring->next_to_clean = 0;
1366         spin_lock_init(&tx_ring->tx_clean_lock);
1367         spin_lock_init(&tx_ring->tx_lock);
1368         return 0;
1369
1370 err:
1371         vfree(tx_ring->buffer_info);
1372         dev_err(&adapter->pdev->dev,
1373                 "Unable to allocate memory for the transmit descriptor ring\n");
1374         return -ENOMEM;
1375 }
1376
1377 /**
1378  * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1379  *                                (Descriptors) for all queues
1380  * @adapter: board private structure
1381  *
1382  * Return 0 on success, negative on failure
1383  **/
1384 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1385 {
1386         int i, err = 0;
1387
1388         for (i = 0; i < adapter->num_tx_queues; i++) {
1389                 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1390                 if (err) {
1391                         dev_err(&adapter->pdev->dev,
1392                                 "Allocation for Tx Queue %u failed\n", i);
1393                         for (i--; i >= 0; i--)
1394                                 igb_free_tx_resources(adapter,
1395                                                         &adapter->tx_ring[i]);
1396                         break;
1397                 }
1398         }
1399
1400         return err;
1401 }
1402
1403 /**
1404  * igb_configure_tx - Configure transmit Unit after Reset
1405  * @adapter: board private structure
1406  *
1407  * Configure the Tx unit of the MAC after a reset.
1408  **/
1409 static void igb_configure_tx(struct igb_adapter *adapter)
1410 {
1411         u64 tdba, tdwba;
1412         struct e1000_hw *hw = &adapter->hw;
1413         u32 tctl;
1414         u32 txdctl, txctrl;
1415         int i;
1416
1417         for (i = 0; i < adapter->num_tx_queues; i++) {
1418                 struct igb_ring *ring = &(adapter->tx_ring[i]);
1419
1420                 wr32(E1000_TDLEN(i),
1421                                 ring->count * sizeof(struct e1000_tx_desc));
1422                 tdba = ring->dma;
1423                 wr32(E1000_TDBAL(i),
1424                                 tdba & 0x00000000ffffffffULL);
1425                 wr32(E1000_TDBAH(i), tdba >> 32);
1426
1427                 tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
1428                 tdwba |= 1; /* enable head wb */
1429                 wr32(E1000_TDWBAL(i),
1430                                 tdwba & 0x00000000ffffffffULL);
1431                 wr32(E1000_TDWBAH(i), tdwba >> 32);
1432
1433                 ring->head = E1000_TDH(i);
1434                 ring->tail = E1000_TDT(i);
1435                 writel(0, hw->hw_addr + ring->tail);
1436                 writel(0, hw->hw_addr + ring->head);
1437                 txdctl = rd32(E1000_TXDCTL(i));
1438                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1439                 wr32(E1000_TXDCTL(i), txdctl);
1440
1441                 /* Turn off Relaxed Ordering on head write-backs.  The
1442                  * writebacks MUST be delivered in order or it will
1443                  * completely screw up our bookeeping.
1444                  */
1445                 txctrl = rd32(E1000_DCA_TXCTRL(i));
1446                 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1447                 wr32(E1000_DCA_TXCTRL(i), txctrl);
1448         }
1449
1450
1451
1452         /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
1453
1454         /* Program the Transmit Control Register */
1455
1456         tctl = rd32(E1000_TCTL);
1457         tctl &= ~E1000_TCTL_CT;
1458         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1459                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1460
1461         igb_config_collision_dist(hw);
1462
1463         /* Setup Transmit Descriptor Settings for eop descriptor */
1464         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1465
1466         /* Enable transmits */
1467         tctl |= E1000_TCTL_EN;
1468
1469         wr32(E1000_TCTL, tctl);
1470 }
1471
1472 /**
1473  * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1474  * @adapter: board private structure
1475  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
1476  *
1477  * Returns 0 on success, negative on failure
1478  **/
1479
1480 int igb_setup_rx_resources(struct igb_adapter *adapter,
1481                            struct igb_ring *rx_ring)
1482 {
1483         struct pci_dev *pdev = adapter->pdev;
1484         int size, desc_len;
1485
1486         size = sizeof(struct igb_buffer) * rx_ring->count;
1487         rx_ring->buffer_info = vmalloc(size);
1488         if (!rx_ring->buffer_info)
1489                 goto err;
1490         memset(rx_ring->buffer_info, 0, size);
1491
1492         desc_len = sizeof(union e1000_adv_rx_desc);
1493
1494         /* Round up to nearest 4K */
1495         rx_ring->size = rx_ring->count * desc_len;
1496         rx_ring->size = ALIGN(rx_ring->size, 4096);
1497
1498         rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1499                                              &rx_ring->dma);
1500
1501         if (!rx_ring->desc)
1502                 goto err;
1503
1504         rx_ring->next_to_clean = 0;
1505         rx_ring->next_to_use = 0;
1506         rx_ring->pending_skb = NULL;
1507
1508         rx_ring->adapter = adapter;
1509         /* FIXME: do we want to setup ring->napi->poll here? */
1510         rx_ring->napi.poll = adapter->napi.poll;
1511
1512         return 0;
1513
1514 err:
1515         vfree(rx_ring->buffer_info);
1516         dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1517                 "the receive descriptor ring\n");
1518         return -ENOMEM;
1519 }
1520
1521 /**
1522  * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1523  *                                (Descriptors) for all queues
1524  * @adapter: board private structure
1525  *
1526  * Return 0 on success, negative on failure
1527  **/
1528 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1529 {
1530         int i, err = 0;
1531
1532         for (i = 0; i < adapter->num_rx_queues; i++) {
1533                 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1534                 if (err) {
1535                         dev_err(&adapter->pdev->dev,
1536                                 "Allocation for Rx Queue %u failed\n", i);
1537                         for (i--; i >= 0; i--)
1538                                 igb_free_rx_resources(adapter,
1539                                                         &adapter->rx_ring[i]);
1540                         break;
1541                 }
1542         }
1543
1544         return err;
1545 }
1546
1547 /**
1548  * igb_setup_rctl - configure the receive control registers
1549  * @adapter: Board private structure
1550  **/
1551 static void igb_setup_rctl(struct igb_adapter *adapter)
1552 {
1553         struct e1000_hw *hw = &adapter->hw;
1554         u32 rctl;
1555         u32 srrctl = 0;
1556         int i;
1557
1558         rctl = rd32(E1000_RCTL);
1559
1560         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1561
1562         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1563                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1564                 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1565
1566         /* disable the stripping of CRC because it breaks
1567          * BMC firmware connected over SMBUS
1568         rctl |= E1000_RCTL_SECRC;
1569         */
1570
1571         rctl &= ~E1000_RCTL_SBP;
1572
1573         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1574                 rctl &= ~E1000_RCTL_LPE;
1575         else
1576                 rctl |= E1000_RCTL_LPE;
1577         if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) {
1578                 /* Setup buffer sizes */
1579                 rctl &= ~E1000_RCTL_SZ_4096;
1580                 rctl |= E1000_RCTL_BSEX;
1581                 switch (adapter->rx_buffer_len) {
1582                 case IGB_RXBUFFER_256:
1583                         rctl |= E1000_RCTL_SZ_256;
1584                         rctl &= ~E1000_RCTL_BSEX;
1585                         break;
1586                 case IGB_RXBUFFER_512:
1587                         rctl |= E1000_RCTL_SZ_512;
1588                         rctl &= ~E1000_RCTL_BSEX;
1589                         break;
1590                 case IGB_RXBUFFER_1024:
1591                         rctl |= E1000_RCTL_SZ_1024;
1592                         rctl &= ~E1000_RCTL_BSEX;
1593                         break;
1594                 case IGB_RXBUFFER_2048:
1595                 default:
1596                         rctl |= E1000_RCTL_SZ_2048;
1597                         rctl &= ~E1000_RCTL_BSEX;
1598                         break;
1599                 case IGB_RXBUFFER_4096:
1600                         rctl |= E1000_RCTL_SZ_4096;
1601                         break;
1602                 case IGB_RXBUFFER_8192:
1603                         rctl |= E1000_RCTL_SZ_8192;
1604                         break;
1605                 case IGB_RXBUFFER_16384:
1606                         rctl |= E1000_RCTL_SZ_16384;
1607                         break;
1608                 }
1609         } else {
1610                 rctl &= ~E1000_RCTL_BSEX;
1611                 srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1612         }
1613
1614         /* 82575 and greater support packet-split where the protocol
1615          * header is placed in skb->data and the packet data is
1616          * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1617          * In the case of a non-split, skb->data is linearly filled,
1618          * followed by the page buffers.  Therefore, skb->data is
1619          * sized to hold the largest protocol header.
1620          */
1621         /* allocations using alloc_page take too long for regular MTU
1622          * so only enable packet split for jumbo frames */
1623         if (rctl & E1000_RCTL_LPE) {
1624                 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
1625                 srrctl = adapter->rx_ps_hdr_size <<
1626                          E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1627                 /* buffer size is ALWAYS one page */
1628                 srrctl |= PAGE_SIZE >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1629                 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1630         } else {
1631                 adapter->rx_ps_hdr_size = 0;
1632                 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1633         }
1634
1635         for (i = 0; i < adapter->num_rx_queues; i++)
1636                 wr32(E1000_SRRCTL(i), srrctl);
1637
1638         wr32(E1000_RCTL, rctl);
1639 }
1640
1641 /**
1642  * igb_configure_rx - Configure receive Unit after Reset
1643  * @adapter: board private structure
1644  *
1645  * Configure the Rx unit of the MAC after a reset.
1646  **/
1647 static void igb_configure_rx(struct igb_adapter *adapter)
1648 {
1649         u64 rdba;
1650         struct e1000_hw *hw = &adapter->hw;
1651         u32 rctl, rxcsum;
1652         u32 rxdctl;
1653         int i;
1654
1655         /* disable receives while setting up the descriptors */
1656         rctl = rd32(E1000_RCTL);
1657         wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1658         wrfl();
1659         mdelay(10);
1660
1661         if (adapter->itr_setting > 3)
1662                 wr32(E1000_ITR,
1663                                 1000000000 / (adapter->itr * 256));
1664
1665         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1666          * the Base and Length of the Rx Descriptor Ring */
1667         for (i = 0; i < adapter->num_rx_queues; i++) {
1668                 struct igb_ring *ring = &(adapter->rx_ring[i]);
1669                 rdba = ring->dma;
1670                 wr32(E1000_RDBAL(i),
1671                                 rdba & 0x00000000ffffffffULL);
1672                 wr32(E1000_RDBAH(i), rdba >> 32);
1673                 wr32(E1000_RDLEN(i),
1674                                ring->count * sizeof(union e1000_adv_rx_desc));
1675
1676                 ring->head = E1000_RDH(i);
1677                 ring->tail = E1000_RDT(i);
1678                 writel(0, hw->hw_addr + ring->tail);
1679                 writel(0, hw->hw_addr + ring->head);
1680
1681                 rxdctl = rd32(E1000_RXDCTL(i));
1682                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1683                 rxdctl &= 0xFFF00000;
1684                 rxdctl |= IGB_RX_PTHRESH;
1685                 rxdctl |= IGB_RX_HTHRESH << 8;
1686                 rxdctl |= IGB_RX_WTHRESH << 16;
1687                 wr32(E1000_RXDCTL(i), rxdctl);
1688         }
1689
1690         if (adapter->num_rx_queues > 1) {
1691                 u32 random[10];
1692                 u32 mrqc;
1693                 u32 j, shift;
1694                 union e1000_reta {
1695                         u32 dword;
1696                         u8  bytes[4];
1697                 } reta;
1698
1699                 get_random_bytes(&random[0], 40);
1700
1701                 shift = 6;
1702                 for (j = 0; j < (32 * 4); j++) {
1703                         reta.bytes[j & 3] =
1704                                 (j % adapter->num_rx_queues) << shift;
1705                         if ((j & 3) == 3)
1706                                 writel(reta.dword,
1707                                        hw->hw_addr + E1000_RETA(0) + (j & ~3));
1708                 }
1709                 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
1710
1711                 /* Fill out hash function seeds */
1712                 for (j = 0; j < 10; j++)
1713                         array_wr32(E1000_RSSRK(0), j, random[j]);
1714
1715                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1716                          E1000_MRQC_RSS_FIELD_IPV4_TCP);
1717                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
1718                          E1000_MRQC_RSS_FIELD_IPV6_TCP);
1719                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
1720                          E1000_MRQC_RSS_FIELD_IPV6_UDP);
1721                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
1722                          E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
1723
1724
1725                 wr32(E1000_MRQC, mrqc);
1726
1727                 /* Multiqueue and raw packet checksumming are mutually
1728                  * exclusive.  Note that this not the same as TCP/IP
1729                  * checksumming, which works fine. */
1730                 rxcsum = rd32(E1000_RXCSUM);
1731                 rxcsum |= E1000_RXCSUM_PCSD;
1732                 wr32(E1000_RXCSUM, rxcsum);
1733         } else {
1734                 /* Enable Receive Checksum Offload for TCP and UDP */
1735                 rxcsum = rd32(E1000_RXCSUM);
1736                 if (adapter->rx_csum) {
1737                         rxcsum |= E1000_RXCSUM_TUOFL;
1738
1739                         /* Enable IPv4 payload checksum for UDP fragments
1740                          * Must be used in conjunction with packet-split. */
1741                         if (adapter->rx_ps_hdr_size)
1742                                 rxcsum |= E1000_RXCSUM_IPPCSE;
1743                 } else {
1744                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1745                         /* don't need to clear IPPCSE as it defaults to 0 */
1746                 }
1747                 wr32(E1000_RXCSUM, rxcsum);
1748         }
1749
1750         if (adapter->vlgrp)
1751                 wr32(E1000_RLPML,
1752                                 adapter->max_frame_size + VLAN_TAG_SIZE);
1753         else
1754                 wr32(E1000_RLPML, adapter->max_frame_size);
1755
1756         /* Enable Receives */
1757         wr32(E1000_RCTL, rctl);
1758 }
1759
1760 /**
1761  * igb_free_tx_resources - Free Tx Resources per Queue
1762  * @adapter: board private structure
1763  * @tx_ring: Tx descriptor ring for a specific queue
1764  *
1765  * Free all transmit software resources
1766  **/
1767 static void igb_free_tx_resources(struct igb_adapter *adapter,
1768                                   struct igb_ring *tx_ring)
1769 {
1770         struct pci_dev *pdev = adapter->pdev;
1771
1772         igb_clean_tx_ring(adapter, tx_ring);
1773
1774         vfree(tx_ring->buffer_info);
1775         tx_ring->buffer_info = NULL;
1776
1777         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1778
1779         tx_ring->desc = NULL;
1780 }
1781
1782 /**
1783  * igb_free_all_tx_resources - Free Tx Resources for All Queues
1784  * @adapter: board private structure
1785  *
1786  * Free all transmit software resources
1787  **/
1788 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
1789 {
1790         int i;
1791
1792         for (i = 0; i < adapter->num_tx_queues; i++)
1793                 igb_free_tx_resources(adapter, &adapter->tx_ring[i]);
1794 }
1795
1796 static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
1797                                            struct igb_buffer *buffer_info)
1798 {
1799         if (buffer_info->dma) {
1800                 pci_unmap_page(adapter->pdev,
1801                                 buffer_info->dma,
1802                                 buffer_info->length,
1803                                 PCI_DMA_TODEVICE);
1804                 buffer_info->dma = 0;
1805         }
1806         if (buffer_info->skb) {
1807                 dev_kfree_skb_any(buffer_info->skb);
1808                 buffer_info->skb = NULL;
1809         }
1810         buffer_info->time_stamp = 0;
1811         /* buffer_info must be completely set up in the transmit path */
1812 }
1813
1814 /**
1815  * igb_clean_tx_ring - Free Tx Buffers
1816  * @adapter: board private structure
1817  * @tx_ring: ring to be cleaned
1818  **/
1819 static void igb_clean_tx_ring(struct igb_adapter *adapter,
1820                               struct igb_ring *tx_ring)
1821 {
1822         struct igb_buffer *buffer_info;
1823         unsigned long size;
1824         unsigned int i;
1825
1826         if (!tx_ring->buffer_info)
1827                 return;
1828         /* Free all the Tx ring sk_buffs */
1829
1830         for (i = 0; i < tx_ring->count; i++) {
1831                 buffer_info = &tx_ring->buffer_info[i];
1832                 igb_unmap_and_free_tx_resource(adapter, buffer_info);
1833         }
1834
1835         size = sizeof(struct igb_buffer) * tx_ring->count;
1836         memset(tx_ring->buffer_info, 0, size);
1837
1838         /* Zero out the descriptor ring */
1839
1840         memset(tx_ring->desc, 0, tx_ring->size);
1841
1842         tx_ring->next_to_use = 0;
1843         tx_ring->next_to_clean = 0;
1844
1845         writel(0, adapter->hw.hw_addr + tx_ring->head);
1846         writel(0, adapter->hw.hw_addr + tx_ring->tail);
1847 }
1848
1849 /**
1850  * igb_clean_all_tx_rings - Free Tx Buffers for all queues
1851  * @adapter: board private structure
1852  **/
1853 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
1854 {
1855         int i;
1856
1857         for (i = 0; i < adapter->num_tx_queues; i++)
1858                 igb_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1859 }
1860
1861 /**
1862  * igb_free_rx_resources - Free Rx Resources
1863  * @adapter: board private structure
1864  * @rx_ring: ring to clean the resources from
1865  *
1866  * Free all receive software resources
1867  **/
1868 static void igb_free_rx_resources(struct igb_adapter *adapter,
1869                                   struct igb_ring *rx_ring)
1870 {
1871         struct pci_dev *pdev = adapter->pdev;
1872
1873         igb_clean_rx_ring(adapter, rx_ring);
1874
1875         vfree(rx_ring->buffer_info);
1876         rx_ring->buffer_info = NULL;
1877
1878         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1879
1880         rx_ring->desc = NULL;
1881 }
1882
1883 /**
1884  * igb_free_all_rx_resources - Free Rx Resources for All Queues
1885  * @adapter: board private structure
1886  *
1887  * Free all receive software resources
1888  **/
1889 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
1890 {
1891         int i;
1892
1893         for (i = 0; i < adapter->num_rx_queues; i++)
1894                 igb_free_rx_resources(adapter, &adapter->rx_ring[i]);
1895 }
1896
1897 /**
1898  * igb_clean_rx_ring - Free Rx Buffers per Queue
1899  * @adapter: board private structure
1900  * @rx_ring: ring to free buffers from
1901  **/
1902 static void igb_clean_rx_ring(struct igb_adapter *adapter,
1903                               struct igb_ring *rx_ring)
1904 {
1905         struct igb_buffer *buffer_info;
1906         struct pci_dev *pdev = adapter->pdev;
1907         unsigned long size;
1908         unsigned int i;
1909
1910         if (!rx_ring->buffer_info)
1911                 return;
1912         /* Free all the Rx ring sk_buffs */
1913         for (i = 0; i < rx_ring->count; i++) {
1914                 buffer_info = &rx_ring->buffer_info[i];
1915                 if (buffer_info->dma) {
1916                         if (adapter->rx_ps_hdr_size)
1917                                 pci_unmap_single(pdev, buffer_info->dma,
1918                                                  adapter->rx_ps_hdr_size,
1919                                                  PCI_DMA_FROMDEVICE);
1920                         else
1921                                 pci_unmap_single(pdev, buffer_info->dma,
1922                                                  adapter->rx_buffer_len,
1923                                                  PCI_DMA_FROMDEVICE);
1924                         buffer_info->dma = 0;
1925                 }
1926
1927                 if (buffer_info->skb) {
1928                         dev_kfree_skb(buffer_info->skb);
1929                         buffer_info->skb = NULL;
1930                 }
1931                 if (buffer_info->page) {
1932                         pci_unmap_page(pdev, buffer_info->page_dma,
1933                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
1934                         put_page(buffer_info->page);
1935                         buffer_info->page = NULL;
1936                         buffer_info->page_dma = 0;
1937                 }
1938         }
1939
1940         /* there also may be some cached data from a chained receive */
1941         if (rx_ring->pending_skb) {
1942                 dev_kfree_skb(rx_ring->pending_skb);
1943                 rx_ring->pending_skb = NULL;
1944         }
1945
1946         size = sizeof(struct igb_buffer) * rx_ring->count;
1947         memset(rx_ring->buffer_info, 0, size);
1948
1949         /* Zero out the descriptor ring */
1950         memset(rx_ring->desc, 0, rx_ring->size);
1951
1952         rx_ring->next_to_clean = 0;
1953         rx_ring->next_to_use = 0;
1954
1955         writel(0, adapter->hw.hw_addr + rx_ring->head);
1956         writel(0, adapter->hw.hw_addr + rx_ring->tail);
1957 }
1958
1959 /**
1960  * igb_clean_all_rx_rings - Free Rx Buffers for all queues
1961  * @adapter: board private structure
1962  **/
1963 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
1964 {
1965         int i;
1966
1967         for (i = 0; i < adapter->num_rx_queues; i++)
1968                 igb_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1969 }
1970
1971 /**
1972  * igb_set_mac - Change the Ethernet Address of the NIC
1973  * @netdev: network interface device structure
1974  * @p: pointer to an address structure
1975  *
1976  * Returns 0 on success, negative on failure
1977  **/
1978 static int igb_set_mac(struct net_device *netdev, void *p)
1979 {
1980         struct igb_adapter *adapter = netdev_priv(netdev);
1981         struct sockaddr *addr = p;
1982
1983         if (!is_valid_ether_addr(addr->sa_data))
1984                 return -EADDRNOTAVAIL;
1985
1986         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1987         memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
1988
1989         adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1990
1991         return 0;
1992 }
1993
1994 /**
1995  * igb_set_multi - Multicast and Promiscuous mode set
1996  * @netdev: network interface device structure
1997  *
1998  * The set_multi entry point is called whenever the multicast address
1999  * list or the network interface flags are updated.  This routine is
2000  * responsible for configuring the hardware for proper multicast,
2001  * promiscuous mode, and all-multi behavior.
2002  **/
2003 static void igb_set_multi(struct net_device *netdev)
2004 {
2005         struct igb_adapter *adapter = netdev_priv(netdev);
2006         struct e1000_hw *hw = &adapter->hw;
2007         struct e1000_mac_info *mac = &hw->mac;
2008         struct dev_mc_list *mc_ptr;
2009         u8  *mta_list;
2010         u32 rctl;
2011         int i;
2012
2013         /* Check for Promiscuous and All Multicast modes */
2014
2015         rctl = rd32(E1000_RCTL);
2016
2017         if (netdev->flags & IFF_PROMISC)
2018                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2019         else if (netdev->flags & IFF_ALLMULTI) {
2020                 rctl |= E1000_RCTL_MPE;
2021                 rctl &= ~E1000_RCTL_UPE;
2022         } else
2023                 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2024
2025         wr32(E1000_RCTL, rctl);
2026
2027         if (!netdev->mc_count) {
2028                 /* nothing to program, so clear mc list */
2029                 igb_update_mc_addr_list(hw, NULL, 0, 1,
2030                                           mac->rar_entry_count);
2031                 return;
2032         }
2033
2034         mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2035         if (!mta_list)
2036                 return;
2037
2038         /* The shared function expects a packed array of only addresses. */
2039         mc_ptr = netdev->mc_list;
2040
2041         for (i = 0; i < netdev->mc_count; i++) {
2042                 if (!mc_ptr)
2043                         break;
2044                 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2045                 mc_ptr = mc_ptr->next;
2046         }
2047         igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count);
2048         kfree(mta_list);
2049 }
2050
2051 /* Need to wait a few seconds after link up to get diagnostic information from
2052  * the phy */
2053 static void igb_update_phy_info(unsigned long data)
2054 {
2055         struct igb_adapter *adapter = (struct igb_adapter *) data;
2056         if (adapter->hw.phy.ops.get_phy_info)
2057                 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
2058 }
2059
2060 /**
2061  * igb_watchdog - Timer Call-back
2062  * @data: pointer to adapter cast into an unsigned long
2063  **/
2064 static void igb_watchdog(unsigned long data)
2065 {
2066         struct igb_adapter *adapter = (struct igb_adapter *)data;
2067         /* Do the rest outside of interrupt context */
2068         schedule_work(&adapter->watchdog_task);
2069 }
2070
2071 static void igb_watchdog_task(struct work_struct *work)
2072 {
2073         struct igb_adapter *adapter = container_of(work,
2074                                         struct igb_adapter, watchdog_task);
2075         struct e1000_hw *hw = &adapter->hw;
2076
2077         struct net_device *netdev = adapter->netdev;
2078         struct igb_ring *tx_ring = adapter->tx_ring;
2079         struct e1000_mac_info *mac = &adapter->hw.mac;
2080         u32 link;
2081         s32 ret_val;
2082
2083         if ((netif_carrier_ok(netdev)) &&
2084             (rd32(E1000_STATUS) & E1000_STATUS_LU))
2085                 goto link_up;
2086
2087         ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2088         if ((ret_val == E1000_ERR_PHY) &&
2089             (hw->phy.type == e1000_phy_igp_3) &&
2090             (rd32(E1000_CTRL) &
2091              E1000_PHY_CTRL_GBE_DISABLE))
2092                 dev_info(&adapter->pdev->dev,
2093                          "Gigabit has been disabled, downgrading speed\n");
2094
2095         if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2096             !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2097                 link = mac->serdes_has_link;
2098         else
2099                 link = rd32(E1000_STATUS) &
2100                                       E1000_STATUS_LU;
2101
2102         if (link) {
2103                 if (!netif_carrier_ok(netdev)) {
2104                         u32 ctrl;
2105                         hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2106                                                    &adapter->link_speed,
2107                                                    &adapter->link_duplex);
2108
2109                         ctrl = rd32(E1000_CTRL);
2110                         dev_info(&adapter->pdev->dev,
2111                                  "NIC Link is Up %d Mbps %s, "
2112                                  "Flow Control: %s\n",
2113                                  adapter->link_speed,
2114                                  adapter->link_duplex == FULL_DUPLEX ?
2115                                  "Full Duplex" : "Half Duplex",
2116                                  ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2117                                  E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2118                                  E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2119                                  E1000_CTRL_TFCE) ? "TX" : "None")));
2120
2121                         /* tweak tx_queue_len according to speed/duplex and
2122                          * adjust the timeout factor */
2123                         netdev->tx_queue_len = adapter->tx_queue_len;
2124                         adapter->tx_timeout_factor = 1;
2125                         switch (adapter->link_speed) {
2126                         case SPEED_10:
2127                                 netdev->tx_queue_len = 10;
2128                                 adapter->tx_timeout_factor = 14;
2129                                 break;
2130                         case SPEED_100:
2131                                 netdev->tx_queue_len = 100;
2132                                 /* maybe add some timeout factor ? */
2133                                 break;
2134                         }
2135
2136                         netif_carrier_on(netdev);
2137                         netif_wake_queue(netdev);
2138
2139                         if (!test_bit(__IGB_DOWN, &adapter->state))
2140                                 mod_timer(&adapter->phy_info_timer,
2141                                           round_jiffies(jiffies + 2 * HZ));
2142                 }
2143         } else {
2144                 if (netif_carrier_ok(netdev)) {
2145                         adapter->link_speed = 0;
2146                         adapter->link_duplex = 0;
2147                         dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2148                         netif_carrier_off(netdev);
2149                         netif_stop_queue(netdev);
2150                         if (!test_bit(__IGB_DOWN, &adapter->state))
2151                                 mod_timer(&adapter->phy_info_timer,
2152                                           round_jiffies(jiffies + 2 * HZ));
2153                 }
2154         }
2155
2156 link_up:
2157         igb_update_stats(adapter);
2158
2159         mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2160         adapter->tpt_old = adapter->stats.tpt;
2161         mac->collision_delta = adapter->stats.colc - adapter->colc_old;
2162         adapter->colc_old = adapter->stats.colc;
2163
2164         adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2165         adapter->gorc_old = adapter->stats.gorc;
2166         adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2167         adapter->gotc_old = adapter->stats.gotc;
2168
2169         igb_update_adaptive(&adapter->hw);
2170
2171         if (!netif_carrier_ok(netdev)) {
2172                 if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
2173                         /* We've lost link, so the controller stops DMA,
2174                          * but we've got queued Tx work that's never going
2175                          * to get done, so reset controller to flush Tx.
2176                          * (Do the reset outside of interrupt context). */
2177                         adapter->tx_timeout_count++;
2178                         schedule_work(&adapter->reset_task);
2179                 }
2180         }
2181
2182         /* Cause software interrupt to ensure rx ring is cleaned */
2183         wr32(E1000_ICS, E1000_ICS_RXDMT0);
2184
2185         /* Force detection of hung controller every watchdog period */
2186         tx_ring->detect_tx_hung = true;
2187
2188         /* Reset the timer */
2189         if (!test_bit(__IGB_DOWN, &adapter->state))
2190                 mod_timer(&adapter->watchdog_timer,
2191                           round_jiffies(jiffies + 2 * HZ));
2192 }
2193
2194 enum latency_range {
2195         lowest_latency = 0,
2196         low_latency = 1,
2197         bulk_latency = 2,
2198         latency_invalid = 255
2199 };
2200
2201
2202 static void igb_lower_rx_eitr(struct igb_adapter *adapter,
2203                               struct igb_ring *rx_ring)
2204 {
2205         struct e1000_hw *hw = &adapter->hw;
2206         int new_val;
2207
2208         new_val = rx_ring->itr_val / 2;
2209         if (new_val < IGB_MIN_DYN_ITR)
2210                 new_val = IGB_MIN_DYN_ITR;
2211
2212         if (new_val != rx_ring->itr_val) {
2213                 rx_ring->itr_val = new_val;
2214                 wr32(rx_ring->itr_register,
2215                                 1000000000 / (new_val * 256));
2216         }
2217 }
2218
2219 static void igb_raise_rx_eitr(struct igb_adapter *adapter,
2220                               struct igb_ring *rx_ring)
2221 {
2222         struct e1000_hw *hw = &adapter->hw;
2223         int new_val;
2224
2225         new_val = rx_ring->itr_val * 2;
2226         if (new_val > IGB_MAX_DYN_ITR)
2227                 new_val = IGB_MAX_DYN_ITR;
2228
2229         if (new_val != rx_ring->itr_val) {
2230                 rx_ring->itr_val = new_val;
2231                 wr32(rx_ring->itr_register,
2232                                 1000000000 / (new_val * 256));
2233         }
2234 }
2235
2236 /**
2237  * igb_update_itr - update the dynamic ITR value based on statistics
2238  *      Stores a new ITR value based on packets and byte
2239  *      counts during the last interrupt.  The advantage of per interrupt
2240  *      computation is faster updates and more accurate ITR for the current
2241  *      traffic pattern.  Constants in this function were computed
2242  *      based on theoretical maximum wire speed and thresholds were set based
2243  *      on testing data as well as attempting to minimize response time
2244  *      while increasing bulk throughput.
2245  *      this functionality is controlled by the InterruptThrottleRate module
2246  *      parameter (see igb_param.c)
2247  *      NOTE:  These calculations are only valid when operating in a single-
2248  *             queue environment.
2249  * @adapter: pointer to adapter
2250  * @itr_setting: current adapter->itr
2251  * @packets: the number of packets during this measurement interval
2252  * @bytes: the number of bytes during this measurement interval
2253  **/
2254 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2255                                    int packets, int bytes)
2256 {
2257         unsigned int retval = itr_setting;
2258
2259         if (packets == 0)
2260                 goto update_itr_done;
2261
2262         switch (itr_setting) {
2263         case lowest_latency:
2264                 /* handle TSO and jumbo frames */
2265                 if (bytes/packets > 8000)
2266                         retval = bulk_latency;
2267                 else if ((packets < 5) && (bytes > 512))
2268                         retval = low_latency;
2269                 break;
2270         case low_latency:  /* 50 usec aka 20000 ints/s */
2271                 if (bytes > 10000) {
2272                         /* this if handles the TSO accounting */
2273                         if (bytes/packets > 8000) {
2274                                 retval = bulk_latency;
2275                         } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2276                                 retval = bulk_latency;
2277                         } else if ((packets > 35)) {
2278                                 retval = lowest_latency;
2279                         }
2280                 } else if (bytes/packets > 2000) {
2281                         retval = bulk_latency;
2282                 } else if (packets <= 2 && bytes < 512) {
2283                         retval = lowest_latency;
2284                 }
2285                 break;
2286         case bulk_latency: /* 250 usec aka 4000 ints/s */
2287                 if (bytes > 25000) {
2288                         if (packets > 35)
2289                                 retval = low_latency;
2290                 } else if (bytes < 6000) {
2291                         retval = low_latency;
2292                 }
2293                 break;
2294         }
2295
2296 update_itr_done:
2297         return retval;
2298 }
2299
2300 static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register,
2301                         int rx_only)
2302 {
2303         u16 current_itr;
2304         u32 new_itr = adapter->itr;
2305
2306         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2307         if (adapter->link_speed != SPEED_1000) {
2308                 current_itr = 0;
2309                 new_itr = 4000;
2310                 goto set_itr_now;
2311         }
2312
2313         adapter->rx_itr = igb_update_itr(adapter,
2314                                     adapter->rx_itr,
2315                                     adapter->rx_ring->total_packets,
2316                                     adapter->rx_ring->total_bytes);
2317         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2318         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2319                 adapter->rx_itr = low_latency;
2320
2321         if (!rx_only) {
2322                 adapter->tx_itr = igb_update_itr(adapter,
2323                                             adapter->tx_itr,
2324                                             adapter->tx_ring->total_packets,
2325                                             adapter->tx_ring->total_bytes);
2326                 /* conservative mode (itr 3) eliminates the
2327                  * lowest_latency setting */
2328                 if (adapter->itr_setting == 3 &&
2329                     adapter->tx_itr == lowest_latency)
2330                         adapter->tx_itr = low_latency;
2331
2332                 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2333         } else {
2334                 current_itr = adapter->rx_itr;
2335         }
2336
2337         switch (current_itr) {
2338         /* counts and packets in update_itr are dependent on these numbers */
2339         case lowest_latency:
2340                 new_itr = 70000;
2341                 break;
2342         case low_latency:
2343                 new_itr = 20000; /* aka hwitr = ~200 */
2344                 break;
2345         case bulk_latency:
2346                 new_itr = 4000;
2347                 break;
2348         default:
2349                 break;
2350         }
2351
2352 set_itr_now:
2353         if (new_itr != adapter->itr) {
2354                 /* this attempts to bias the interrupt rate towards Bulk
2355                  * by adding intermediate steps when interrupt rate is
2356                  * increasing */
2357                 new_itr = new_itr > adapter->itr ?
2358                              min(adapter->itr + (new_itr >> 2), new_itr) :
2359                              new_itr;
2360                 /* Don't write the value here; it resets the adapter's
2361                  * internal timer, and causes us to delay far longer than
2362                  * we should between interrupts.  Instead, we write the ITR
2363                  * value at the beginning of the next interrupt so the timing
2364                  * ends up being correct.
2365                  */
2366                 adapter->itr = new_itr;
2367                 adapter->set_itr = 1;
2368         }
2369
2370         return;
2371 }
2372
2373
2374 #define IGB_TX_FLAGS_CSUM               0x00000001
2375 #define IGB_TX_FLAGS_VLAN               0x00000002
2376 #define IGB_TX_FLAGS_TSO                0x00000004
2377 #define IGB_TX_FLAGS_IPV4               0x00000008
2378 #define IGB_TX_FLAGS_VLAN_MASK  0xffff0000
2379 #define IGB_TX_FLAGS_VLAN_SHIFT 16
2380
2381 static inline int igb_tso_adv(struct igb_adapter *adapter,
2382                               struct igb_ring *tx_ring,
2383                               struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2384 {
2385         struct e1000_adv_tx_context_desc *context_desc;
2386         unsigned int i;
2387         int err;
2388         struct igb_buffer *buffer_info;
2389         u32 info = 0, tu_cmd = 0;
2390         u32 mss_l4len_idx, l4len;
2391         *hdr_len = 0;
2392
2393         if (skb_header_cloned(skb)) {
2394                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2395                 if (err)
2396                         return err;
2397         }
2398
2399         l4len = tcp_hdrlen(skb);
2400         *hdr_len += l4len;
2401
2402         if (skb->protocol == htons(ETH_P_IP)) {
2403                 struct iphdr *iph = ip_hdr(skb);
2404                 iph->tot_len = 0;
2405                 iph->check = 0;
2406                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2407                                                          iph->daddr, 0,
2408                                                          IPPROTO_TCP,
2409                                                          0);
2410         } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2411                 ipv6_hdr(skb)->payload_len = 0;
2412                 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2413                                                        &ipv6_hdr(skb)->daddr,
2414                                                        0, IPPROTO_TCP, 0);
2415         }
2416
2417         i = tx_ring->next_to_use;
2418
2419         buffer_info = &tx_ring->buffer_info[i];
2420         context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2421         /* VLAN MACLEN IPLEN */
2422         if (tx_flags & IGB_TX_FLAGS_VLAN)
2423                 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2424         info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2425         *hdr_len += skb_network_offset(skb);
2426         info |= skb_network_header_len(skb);
2427         *hdr_len += skb_network_header_len(skb);
2428         context_desc->vlan_macip_lens = cpu_to_le32(info);
2429
2430         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2431         tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2432
2433         if (skb->protocol == htons(ETH_P_IP))
2434                 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2435         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2436
2437         context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2438
2439         /* MSS L4LEN IDX */
2440         mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2441         mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2442
2443         /* Context index must be unique per ring.  Luckily, so is the interrupt
2444          * mask value. */
2445         mss_l4len_idx |= tx_ring->eims_value >> 4;
2446
2447         context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2448         context_desc->seqnum_seed = 0;
2449
2450         buffer_info->time_stamp = jiffies;
2451         buffer_info->dma = 0;
2452         i++;
2453         if (i == tx_ring->count)
2454                 i = 0;
2455
2456         tx_ring->next_to_use = i;
2457
2458         return true;
2459 }
2460
2461 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2462                                         struct igb_ring *tx_ring,
2463                                         struct sk_buff *skb, u32 tx_flags)
2464 {
2465         struct e1000_adv_tx_context_desc *context_desc;
2466         unsigned int i;
2467         struct igb_buffer *buffer_info;
2468         u32 info = 0, tu_cmd = 0;
2469
2470         if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
2471             (tx_flags & IGB_TX_FLAGS_VLAN)) {
2472                 i = tx_ring->next_to_use;
2473                 buffer_info = &tx_ring->buffer_info[i];
2474                 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2475
2476                 if (tx_flags & IGB_TX_FLAGS_VLAN)
2477                         info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2478                 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2479                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2480                         info |= skb_network_header_len(skb);
2481
2482                 context_desc->vlan_macip_lens = cpu_to_le32(info);
2483
2484                 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2485
2486                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2487                         switch (skb->protocol) {
2488                         case __constant_htons(ETH_P_IP):
2489                                 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2490                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2491                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2492                                 break;
2493                         case __constant_htons(ETH_P_IPV6):
2494                                 /* XXX what about other V6 headers?? */
2495                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2496                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2497                                 break;
2498                         default:
2499                                 if (unlikely(net_ratelimit()))
2500                                         dev_warn(&adapter->pdev->dev,
2501                                             "partial checksum but proto=%x!\n",
2502                                             skb->protocol);
2503                                 break;
2504                         }
2505                 }
2506
2507                 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2508                 context_desc->seqnum_seed = 0;
2509                 context_desc->mss_l4len_idx =
2510                                           cpu_to_le32(tx_ring->eims_value >> 4);
2511
2512                 buffer_info->time_stamp = jiffies;
2513                 buffer_info->dma = 0;
2514
2515                 i++;
2516                 if (i == tx_ring->count)
2517                         i = 0;
2518                 tx_ring->next_to_use = i;
2519
2520                 return true;
2521         }
2522
2523
2524         return false;
2525 }
2526
2527 #define IGB_MAX_TXD_PWR 16
2528 #define IGB_MAX_DATA_PER_TXD    (1<<IGB_MAX_TXD_PWR)
2529
2530 static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2531                                  struct igb_ring *tx_ring,
2532                                  struct sk_buff *skb)
2533 {
2534         struct igb_buffer *buffer_info;
2535         unsigned int len = skb_headlen(skb);
2536         unsigned int count = 0, i;
2537         unsigned int f;
2538
2539         i = tx_ring->next_to_use;
2540
2541         buffer_info = &tx_ring->buffer_info[i];
2542         BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2543         buffer_info->length = len;
2544         /* set time_stamp *before* dma to help avoid a possible race */
2545         buffer_info->time_stamp = jiffies;
2546         buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
2547                                           PCI_DMA_TODEVICE);
2548         count++;
2549         i++;
2550         if (i == tx_ring->count)
2551                 i = 0;
2552
2553         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2554                 struct skb_frag_struct *frag;
2555
2556                 frag = &skb_shinfo(skb)->frags[f];
2557                 len = frag->size;
2558
2559                 buffer_info = &tx_ring->buffer_info[i];
2560                 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2561                 buffer_info->length = len;
2562                 buffer_info->time_stamp = jiffies;
2563                 buffer_info->dma = pci_map_page(adapter->pdev,
2564                                                 frag->page,
2565                                                 frag->page_offset,
2566                                                 len,
2567                                                 PCI_DMA_TODEVICE);
2568
2569                 count++;
2570                 i++;
2571                 if (i == tx_ring->count)
2572                         i = 0;
2573         }
2574
2575         i = (i == 0) ? tx_ring->count - 1 : i - 1;
2576         tx_ring->buffer_info[i].skb = skb;
2577
2578         return count;
2579 }
2580
2581 static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2582                                     struct igb_ring *tx_ring,
2583                                     int tx_flags, int count, u32 paylen,
2584                                     u8 hdr_len)
2585 {
2586         union e1000_adv_tx_desc *tx_desc = NULL;
2587         struct igb_buffer *buffer_info;
2588         u32 olinfo_status = 0, cmd_type_len;
2589         unsigned int i;
2590
2591         cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2592                         E1000_ADVTXD_DCMD_DEXT);
2593
2594         if (tx_flags & IGB_TX_FLAGS_VLAN)
2595                 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2596
2597         if (tx_flags & IGB_TX_FLAGS_TSO) {
2598                 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2599
2600                 /* insert tcp checksum */
2601                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2602
2603                 /* insert ip checksum */
2604                 if (tx_flags & IGB_TX_FLAGS_IPV4)
2605                         olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2606
2607         } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
2608                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2609         }
2610
2611         if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
2612                         IGB_TX_FLAGS_VLAN))
2613                 olinfo_status |= tx_ring->eims_value >> 4;
2614
2615         olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2616
2617         i = tx_ring->next_to_use;
2618         while (count--) {
2619                 buffer_info = &tx_ring->buffer_info[i];
2620                 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
2621                 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2622                 tx_desc->read.cmd_type_len =
2623                         cpu_to_le32(cmd_type_len | buffer_info->length);
2624                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2625                 i++;
2626                 if (i == tx_ring->count)
2627                         i = 0;
2628         }
2629
2630         tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2631         /* Force memory writes to complete before letting h/w
2632          * know there are new descriptors to fetch.  (Only
2633          * applicable for weak-ordered memory model archs,
2634          * such as IA-64). */
2635         wmb();
2636
2637         tx_ring->next_to_use = i;
2638         writel(i, adapter->hw.hw_addr + tx_ring->tail);
2639         /* we need this if more than one processor can write to our tail
2640          * at a time, it syncronizes IO on IA64/Altix systems */
2641         mmiowb();
2642 }
2643
2644 static int __igb_maybe_stop_tx(struct net_device *netdev,
2645                                struct igb_ring *tx_ring, int size)
2646 {
2647         struct igb_adapter *adapter = netdev_priv(netdev);
2648
2649         netif_stop_queue(netdev);
2650         /* Herbert's original patch had:
2651          *  smp_mb__after_netif_stop_queue();
2652          * but since that doesn't exist yet, just open code it. */
2653         smp_mb();
2654
2655         /* We need to check again in a case another CPU has just
2656          * made room available. */
2657         if (IGB_DESC_UNUSED(tx_ring) < size)
2658                 return -EBUSY;
2659
2660         /* A reprieve! */
2661         netif_start_queue(netdev);
2662         ++adapter->restart_queue;
2663         return 0;
2664 }
2665
2666 static int igb_maybe_stop_tx(struct net_device *netdev,
2667                              struct igb_ring *tx_ring, int size)
2668 {
2669         if (IGB_DESC_UNUSED(tx_ring) >= size)
2670                 return 0;
2671         return __igb_maybe_stop_tx(netdev, tx_ring, size);
2672 }
2673
2674 #define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
2675
2676 static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2677                                    struct net_device *netdev,
2678                                    struct igb_ring *tx_ring)
2679 {
2680         struct igb_adapter *adapter = netdev_priv(netdev);
2681         unsigned int tx_flags = 0;
2682         unsigned int len;
2683         unsigned long irq_flags;
2684         u8 hdr_len = 0;
2685         int tso = 0;
2686
2687         len = skb_headlen(skb);
2688
2689         if (test_bit(__IGB_DOWN, &adapter->state)) {
2690                 dev_kfree_skb_any(skb);
2691                 return NETDEV_TX_OK;
2692         }
2693
2694         if (skb->len <= 0) {
2695                 dev_kfree_skb_any(skb);
2696                 return NETDEV_TX_OK;
2697         }
2698
2699         if (!spin_trylock_irqsave(&tx_ring->tx_lock, irq_flags))
2700                 /* Collision - tell upper layer to requeue */
2701                 return NETDEV_TX_LOCKED;
2702
2703         /* need: 1 descriptor per page,
2704          *       + 2 desc gap to keep tail from touching head,
2705          *       + 1 desc for skb->data,
2706          *       + 1 desc for context descriptor,
2707          * otherwise try next time */
2708         if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
2709                 /* this is a hard error */
2710                 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2711                 return NETDEV_TX_BUSY;
2712         }
2713
2714         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2715                 tx_flags |= IGB_TX_FLAGS_VLAN;
2716                 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
2717         }
2718
2719         tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2720                                               &hdr_len) : 0;
2721
2722         if (tso < 0) {
2723                 dev_kfree_skb_any(skb);
2724                 spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2725                 return NETDEV_TX_OK;
2726         }
2727
2728         if (tso)
2729                 tx_flags |= IGB_TX_FLAGS_TSO;
2730         else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
2731                         if (skb->ip_summed == CHECKSUM_PARTIAL)
2732                                 tx_flags |= IGB_TX_FLAGS_CSUM;
2733
2734         if (skb->protocol == htons(ETH_P_IP))
2735                 tx_flags |= IGB_TX_FLAGS_IPV4;
2736
2737         igb_tx_queue_adv(adapter, tx_ring, tx_flags,
2738                          igb_tx_map_adv(adapter, tx_ring, skb),
2739                          skb->len, hdr_len);
2740
2741         netdev->trans_start = jiffies;
2742
2743         /* Make sure there is space in the ring for the next send. */
2744         igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
2745
2746         spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags);
2747         return NETDEV_TX_OK;
2748 }
2749
2750 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
2751 {
2752         struct igb_adapter *adapter = netdev_priv(netdev);
2753         struct igb_ring *tx_ring = &adapter->tx_ring[0];
2754
2755         /* This goes back to the question of how to logically map a tx queue
2756          * to a flow.  Right now, performance is impacted slightly negatively
2757          * if using multiple tx queues.  If the stack breaks away from a
2758          * single qdisc implementation, we can look at this again. */
2759         return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
2760 }
2761
2762 /**
2763  * igb_tx_timeout - Respond to a Tx Hang
2764  * @netdev: network interface device structure
2765  **/
2766 static void igb_tx_timeout(struct net_device *netdev)
2767 {
2768         struct igb_adapter *adapter = netdev_priv(netdev);
2769         struct e1000_hw *hw = &adapter->hw;
2770
2771         /* Do the reset outside of interrupt context */
2772         adapter->tx_timeout_count++;
2773         schedule_work(&adapter->reset_task);
2774         wr32(E1000_EICS, adapter->eims_enable_mask &
2775                 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
2776 }
2777
2778 static void igb_reset_task(struct work_struct *work)
2779 {
2780         struct igb_adapter *adapter;
2781         adapter = container_of(work, struct igb_adapter, reset_task);
2782
2783         igb_reinit_locked(adapter);
2784 }
2785
2786 /**
2787  * igb_get_stats - Get System Network Statistics
2788  * @netdev: network interface device structure
2789  *
2790  * Returns the address of the device statistics structure.
2791  * The statistics are actually updated from the timer callback.
2792  **/
2793 static struct net_device_stats *
2794 igb_get_stats(struct net_device *netdev)
2795 {
2796         struct igb_adapter *adapter = netdev_priv(netdev);
2797
2798         /* only return the current stats */
2799         return &adapter->net_stats;
2800 }
2801
2802 /**
2803  * igb_change_mtu - Change the Maximum Transfer Unit
2804  * @netdev: network interface device structure
2805  * @new_mtu: new value for maximum frame size
2806  *
2807  * Returns 0 on success, negative on failure
2808  **/
2809 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
2810 {
2811         struct igb_adapter *adapter = netdev_priv(netdev);
2812         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2813
2814         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2815             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2816                 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2817                 return -EINVAL;
2818         }
2819
2820 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2821         if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2822                 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2823                 return -EINVAL;
2824         }
2825
2826         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2827                 msleep(1);
2828         /* igb_down has a dependency on max_frame_size */
2829         adapter->max_frame_size = max_frame;
2830         if (netif_running(netdev))
2831                 igb_down(adapter);
2832
2833         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2834          * means we reserve 2 more, this pushes us to allocate from the next
2835          * larger slab size.
2836          * i.e. RXBUFFER_2048 --> size-4096 slab
2837          */
2838
2839         if (max_frame <= IGB_RXBUFFER_256)
2840                 adapter->rx_buffer_len = IGB_RXBUFFER_256;
2841         else if (max_frame <= IGB_RXBUFFER_512)
2842                 adapter->rx_buffer_len = IGB_RXBUFFER_512;
2843         else if (max_frame <= IGB_RXBUFFER_1024)
2844                 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
2845         else if (max_frame <= IGB_RXBUFFER_2048)
2846                 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
2847         else
2848                 adapter->rx_buffer_len = IGB_RXBUFFER_4096;
2849         /* adjust allocation if LPE protects us, and we aren't using SBP */
2850         if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2851              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
2852                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2853
2854         dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2855                  netdev->mtu, new_mtu);
2856         netdev->mtu = new_mtu;
2857
2858         if (netif_running(netdev))
2859                 igb_up(adapter);
2860         else
2861                 igb_reset(adapter);
2862
2863         clear_bit(__IGB_RESETTING, &adapter->state);
2864
2865         return 0;
2866 }
2867
2868 /**
2869  * igb_update_stats - Update the board statistics counters
2870  * @adapter: board private structure
2871  **/
2872
2873 void igb_update_stats(struct igb_adapter *adapter)
2874 {
2875         struct e1000_hw *hw = &adapter->hw;
2876         struct pci_dev *pdev = adapter->pdev;
2877         u16 phy_tmp;
2878
2879 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2880
2881         /*
2882          * Prevent stats update while adapter is being reset, or if the pci
2883          * connection is down.
2884          */
2885         if (adapter->link_speed == 0)
2886                 return;
2887         if (pci_channel_offline(pdev))
2888                 return;
2889
2890         adapter->stats.crcerrs += rd32(E1000_CRCERRS);
2891         adapter->stats.gprc += rd32(E1000_GPRC);
2892         adapter->stats.gorc += rd32(E1000_GORCL);
2893         rd32(E1000_GORCH); /* clear GORCL */
2894         adapter->stats.bprc += rd32(E1000_BPRC);
2895         adapter->stats.mprc += rd32(E1000_MPRC);
2896         adapter->stats.roc += rd32(E1000_ROC);
2897
2898         adapter->stats.prc64 += rd32(E1000_PRC64);
2899         adapter->stats.prc127 += rd32(E1000_PRC127);
2900         adapter->stats.prc255 += rd32(E1000_PRC255);
2901         adapter->stats.prc511 += rd32(E1000_PRC511);
2902         adapter->stats.prc1023 += rd32(E1000_PRC1023);
2903         adapter->stats.prc1522 += rd32(E1000_PRC1522);
2904         adapter->stats.symerrs += rd32(E1000_SYMERRS);
2905         adapter->stats.sec += rd32(E1000_SEC);
2906
2907         adapter->stats.mpc += rd32(E1000_MPC);
2908         adapter->stats.scc += rd32(E1000_SCC);
2909         adapter->stats.ecol += rd32(E1000_ECOL);
2910         adapter->stats.mcc += rd32(E1000_MCC);
2911         adapter->stats.latecol += rd32(E1000_LATECOL);
2912         adapter->stats.dc += rd32(E1000_DC);
2913         adapter->stats.rlec += rd32(E1000_RLEC);
2914         adapter->stats.xonrxc += rd32(E1000_XONRXC);
2915         adapter->stats.xontxc += rd32(E1000_XONTXC);
2916         adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
2917         adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
2918         adapter->stats.fcruc += rd32(E1000_FCRUC);
2919         adapter->stats.gptc += rd32(E1000_GPTC);
2920         adapter->stats.gotc += rd32(E1000_GOTCL);
2921         rd32(E1000_GOTCH); /* clear GOTCL */
2922         adapter->stats.rnbc += rd32(E1000_RNBC);
2923         adapter->stats.ruc += rd32(E1000_RUC);
2924         adapter->stats.rfc += rd32(E1000_RFC);
2925         adapter->stats.rjc += rd32(E1000_RJC);
2926         adapter->stats.tor += rd32(E1000_TORH);
2927         adapter->stats.tot += rd32(E1000_TOTH);
2928         adapter->stats.tpr += rd32(E1000_TPR);
2929
2930         adapter->stats.ptc64 += rd32(E1000_PTC64);
2931         adapter->stats.ptc127 += rd32(E1000_PTC127);
2932         adapter->stats.ptc255 += rd32(E1000_PTC255);
2933         adapter->stats.ptc511 += rd32(E1000_PTC511);
2934         adapter->stats.ptc1023 += rd32(E1000_PTC1023);
2935         adapter->stats.ptc1522 += rd32(E1000_PTC1522);
2936
2937         adapter->stats.mptc += rd32(E1000_MPTC);
2938         adapter->stats.bptc += rd32(E1000_BPTC);
2939
2940         /* used for adaptive IFS */
2941
2942         hw->mac.tx_packet_delta = rd32(E1000_TPT);
2943         adapter->stats.tpt += hw->mac.tx_packet_delta;
2944         hw->mac.collision_delta = rd32(E1000_COLC);
2945         adapter->stats.colc += hw->mac.collision_delta;
2946
2947         adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
2948         adapter->stats.rxerrc += rd32(E1000_RXERRC);
2949         adapter->stats.tncrs += rd32(E1000_TNCRS);
2950         adapter->stats.tsctc += rd32(E1000_TSCTC);
2951         adapter->stats.tsctfc += rd32(E1000_TSCTFC);
2952
2953         adapter->stats.iac += rd32(E1000_IAC);
2954         adapter->stats.icrxoc += rd32(E1000_ICRXOC);
2955         adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
2956         adapter->stats.icrxatc += rd32(E1000_ICRXATC);
2957         adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
2958         adapter->stats.ictxatc += rd32(E1000_ICTXATC);
2959         adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
2960         adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
2961         adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
2962
2963         /* Fill out the OS statistics structure */
2964         adapter->net_stats.multicast = adapter->stats.mprc;
2965         adapter->net_stats.collisions = adapter->stats.colc;
2966
2967         /* Rx Errors */
2968
2969         /* RLEC on some newer hardware can be incorrect so build
2970         * our own version based on RUC and ROC */
2971         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2972                 adapter->stats.crcerrs + adapter->stats.algnerrc +
2973                 adapter->stats.ruc + adapter->stats.roc +
2974                 adapter->stats.cexterr;
2975         adapter->net_stats.rx_length_errors = adapter->stats.ruc +
2976                                               adapter->stats.roc;
2977         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2978         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2979         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2980
2981         /* Tx Errors */
2982         adapter->net_stats.tx_errors = adapter->stats.ecol +
2983                                        adapter->stats.latecol;
2984         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2985         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2986         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2987
2988         /* Tx Dropped needs to be maintained elsewhere */
2989
2990         /* Phy Stats */
2991         if (hw->phy.media_type == e1000_media_type_copper) {
2992                 if ((adapter->link_speed == SPEED_1000) &&
2993                    (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
2994                                               &phy_tmp))) {
2995                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2996                         adapter->phy_stats.idle_errors += phy_tmp;
2997                 }
2998         }
2999
3000         /* Management Stats */
3001         adapter->stats.mgptc += rd32(E1000_MGTPTC);
3002         adapter->stats.mgprc += rd32(E1000_MGTPRC);
3003         adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3004 }
3005
3006
3007 static irqreturn_t igb_msix_other(int irq, void *data)
3008 {
3009         struct net_device *netdev = data;
3010         struct igb_adapter *adapter = netdev_priv(netdev);
3011         struct e1000_hw *hw = &adapter->hw;
3012         u32 eicr;
3013         /* disable interrupts from the "other" bit, avoid re-entry */
3014         wr32(E1000_EIMC, E1000_EIMS_OTHER);
3015
3016         eicr = rd32(E1000_EICR);
3017
3018         if (eicr & E1000_EIMS_OTHER) {
3019                 u32 icr = rd32(E1000_ICR);
3020                 /* reading ICR causes bit 31 of EICR to be cleared */
3021                 if (!(icr & E1000_ICR_LSC))
3022                         goto no_link_interrupt;
3023                 hw->mac.get_link_status = 1;
3024                 /* guard against interrupt when we're going down */
3025                 if (!test_bit(__IGB_DOWN, &adapter->state))
3026                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3027         }
3028
3029 no_link_interrupt:
3030         wr32(E1000_IMS, E1000_IMS_LSC);
3031         wr32(E1000_EIMS, E1000_EIMS_OTHER);
3032
3033         return IRQ_HANDLED;
3034 }
3035
3036 static irqreturn_t igb_msix_tx(int irq, void *data)
3037 {
3038         struct igb_ring *tx_ring = data;
3039         struct igb_adapter *adapter = tx_ring->adapter;
3040         struct e1000_hw *hw = &adapter->hw;
3041
3042         if (!tx_ring->itr_val)
3043                 wr32(E1000_EIMC, tx_ring->eims_value);
3044
3045         tx_ring->total_bytes = 0;
3046         tx_ring->total_packets = 0;
3047         if (!igb_clean_tx_irq(adapter, tx_ring))
3048                 /* Ring was not completely cleaned, so fire another interrupt */
3049                 wr32(E1000_EICS, tx_ring->eims_value);
3050
3051         if (!tx_ring->itr_val)
3052                 wr32(E1000_EIMS, tx_ring->eims_value);
3053         return IRQ_HANDLED;
3054 }
3055
3056 static irqreturn_t igb_msix_rx(int irq, void *data)
3057 {
3058         struct igb_ring *rx_ring = data;
3059         struct igb_adapter *adapter = rx_ring->adapter;
3060         struct e1000_hw *hw = &adapter->hw;
3061
3062         if (!rx_ring->itr_val)
3063                 wr32(E1000_EIMC, rx_ring->eims_value);
3064
3065         if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) {
3066                 rx_ring->total_bytes = 0;
3067                 rx_ring->total_packets = 0;
3068                 rx_ring->no_itr_adjust = 0;
3069                 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3070         } else {
3071                 if (!rx_ring->no_itr_adjust) {
3072                         igb_lower_rx_eitr(adapter, rx_ring);
3073                         rx_ring->no_itr_adjust = 1;
3074                 }
3075         }
3076
3077         return IRQ_HANDLED;
3078 }
3079
3080
3081 /**
3082  * igb_intr_msi - Interrupt Handler
3083  * @irq: interrupt number
3084  * @data: pointer to a network interface device structure
3085  **/
3086 static irqreturn_t igb_intr_msi(int irq, void *data)
3087 {
3088         struct net_device *netdev = data;
3089         struct igb_adapter *adapter = netdev_priv(netdev);
3090         struct napi_struct *napi = &adapter->napi;
3091         struct e1000_hw *hw = &adapter->hw;
3092         /* read ICR disables interrupts using IAM */
3093         u32 icr = rd32(E1000_ICR);
3094
3095         /* Write the ITR value calculated at the end of the
3096          * previous interrupt.
3097          */
3098         if (adapter->set_itr) {
3099                 wr32(E1000_ITR,
3100                         1000000000 / (adapter->itr * 256));
3101                 adapter->set_itr = 0;
3102         }
3103
3104         /* read ICR disables interrupts using IAM */
3105         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3106                 hw->mac.get_link_status = 1;
3107                 if (!test_bit(__IGB_DOWN, &adapter->state))
3108                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3109         }
3110
3111         if (netif_rx_schedule_prep(netdev, napi)) {
3112                 adapter->tx_ring->total_bytes = 0;
3113                 adapter->tx_ring->total_packets = 0;
3114                 adapter->rx_ring->total_bytes = 0;
3115                 adapter->rx_ring->total_packets = 0;
3116                 __netif_rx_schedule(netdev, napi);
3117         }
3118
3119         return IRQ_HANDLED;
3120 }
3121
3122 /**
3123  * igb_intr - Interrupt Handler
3124  * @irq: interrupt number
3125  * @data: pointer to a network interface device structure
3126  **/
3127 static irqreturn_t igb_intr(int irq, void *data)
3128 {
3129         struct net_device *netdev = data;
3130         struct igb_adapter *adapter = netdev_priv(netdev);
3131         struct napi_struct *napi = &adapter->napi;
3132         struct e1000_hw *hw = &adapter->hw;
3133         /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
3134          * need for the IMC write */
3135         u32 icr = rd32(E1000_ICR);
3136         u32 eicr = 0;
3137         if (!icr)
3138                 return IRQ_NONE;  /* Not our interrupt */
3139
3140         /* Write the ITR value calculated at the end of the
3141          * previous interrupt.
3142          */
3143         if (adapter->set_itr) {
3144                 wr32(E1000_ITR,
3145                         1000000000 / (adapter->itr * 256));
3146                 adapter->set_itr = 0;
3147         }
3148
3149         /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3150          * not set, then the adapter didn't send an interrupt */
3151         if (!(icr & E1000_ICR_INT_ASSERTED))
3152                 return IRQ_NONE;
3153
3154         eicr = rd32(E1000_EICR);
3155
3156         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3157                 hw->mac.get_link_status = 1;
3158                 /* guard against interrupt when we're going down */
3159                 if (!test_bit(__IGB_DOWN, &adapter->state))
3160                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3161         }
3162
3163         if (netif_rx_schedule_prep(netdev, napi)) {
3164                 adapter->tx_ring->total_bytes = 0;
3165                 adapter->rx_ring->total_bytes = 0;
3166                 adapter->tx_ring->total_packets = 0;
3167                 adapter->rx_ring->total_packets = 0;
3168                 __netif_rx_schedule(netdev, napi);
3169         }
3170
3171         return IRQ_HANDLED;
3172 }
3173
3174 /**
3175  * igb_clean - NAPI Rx polling callback
3176  * @adapter: board private structure
3177  **/
3178 static int igb_clean(struct napi_struct *napi, int budget)
3179 {
3180         struct igb_adapter *adapter = container_of(napi, struct igb_adapter,
3181                                                    napi);
3182         struct net_device *netdev = adapter->netdev;
3183         int tx_clean_complete = 1, work_done = 0;
3184         int i;
3185
3186         /* Must NOT use netdev_priv macro here. */
3187         adapter = netdev->priv;
3188
3189         /* Keep link state information with original netdev */
3190         if (!netif_carrier_ok(netdev))
3191                 goto quit_polling;
3192
3193         /* igb_clean is called per-cpu.  This lock protects tx_ring[i] from
3194          * being cleaned by multiple cpus simultaneously.  A failure obtaining
3195          * the lock means tx_ring[i] is currently being cleaned anyway. */
3196         for (i = 0; i < adapter->num_tx_queues; i++) {
3197                 if (spin_trylock(&adapter->tx_ring[i].tx_clean_lock)) {
3198                         tx_clean_complete &= igb_clean_tx_irq(adapter,
3199                                                         &adapter->tx_ring[i]);
3200                         spin_unlock(&adapter->tx_ring[i].tx_clean_lock);
3201                 }
3202         }
3203
3204         for (i = 0; i < adapter->num_rx_queues; i++)
3205                 igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i], &work_done,
3206                                      adapter->rx_ring[i].napi.weight);
3207
3208         /* If no Tx and not enough Rx work done, exit the polling mode */
3209         if ((tx_clean_complete && (work_done < budget)) ||
3210             !netif_running(netdev)) {
3211 quit_polling:
3212                 if (adapter->itr_setting & 3)
3213                         igb_set_itr(adapter, E1000_ITR, false);
3214                 netif_rx_complete(netdev, napi);
3215                 if (!test_bit(__IGB_DOWN, &adapter->state))
3216                         igb_irq_enable(adapter);
3217                 return 0;
3218         }
3219
3220         return 1;
3221 }
3222
3223 static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3224 {
3225         struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3226         struct igb_adapter *adapter = rx_ring->adapter;
3227         struct e1000_hw *hw = &adapter->hw;
3228         struct net_device *netdev = adapter->netdev;
3229         int work_done = 0;
3230
3231         /* Keep link state information with original netdev */
3232         if (!netif_carrier_ok(netdev))
3233                 goto quit_polling;
3234
3235         igb_clean_rx_irq_adv(adapter, rx_ring, &work_done, budget);
3236
3237
3238         /* If not enough Rx work done, exit the polling mode */
3239         if ((work_done == 0) || !netif_running(netdev)) {
3240 quit_polling:
3241                 netif_rx_complete(netdev, napi);
3242
3243                 wr32(E1000_EIMS, rx_ring->eims_value);
3244                 if ((adapter->itr_setting & 3) && !rx_ring->no_itr_adjust &&
3245                     (rx_ring->total_packets > IGB_DYN_ITR_PACKET_THRESHOLD)) {
3246                         int mean_size = rx_ring->total_bytes /
3247                                         rx_ring->total_packets;
3248                         if (mean_size < IGB_DYN_ITR_LENGTH_LOW)
3249                                 igb_raise_rx_eitr(adapter, rx_ring);
3250                         else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH)
3251                                 igb_lower_rx_eitr(adapter, rx_ring);
3252                 }
3253                 return 0;
3254         }
3255
3256         return 1;
3257 }
3258
3259 static inline u32 get_head(struct igb_ring *tx_ring)
3260 {
3261         void *end = (struct e1000_tx_desc *)tx_ring->desc + tx_ring->count;
3262         return le32_to_cpu(*(volatile __le32 *)end);
3263 }
3264
3265 /**
3266  * igb_clean_tx_irq - Reclaim resources after transmit completes
3267  * @adapter: board private structure
3268  * returns true if ring is completely cleaned
3269  **/
3270 static bool igb_clean_tx_irq(struct igb_adapter *adapter,
3271                                   struct igb_ring *tx_ring)
3272 {
3273         struct net_device *netdev = adapter->netdev;
3274         struct e1000_hw *hw = &adapter->hw;
3275         struct e1000_tx_desc *tx_desc;
3276         struct igb_buffer *buffer_info;
3277         struct sk_buff *skb;
3278         unsigned int i;
3279         u32 head, oldhead;
3280         unsigned int count = 0;
3281         bool cleaned = false;
3282         bool retval = true;
3283         unsigned int total_bytes = 0, total_packets = 0;
3284
3285         rmb();
3286         head = get_head(tx_ring);
3287         i = tx_ring->next_to_clean;
3288         while (1) {
3289                 while (i != head) {
3290                         cleaned = true;
3291                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3292                         buffer_info = &tx_ring->buffer_info[i];
3293                         skb = buffer_info->skb;
3294
3295                         if (skb) {
3296                                 unsigned int segs, bytecount;
3297                                 /* gso_segs is currently only valid for tcp */
3298                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
3299                                 /* multiply data chunks by size of headers */
3300                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
3301                                             skb->len;
3302                                 total_packets += segs;
3303                                 total_bytes += bytecount;
3304                         }
3305
3306                         igb_unmap_and_free_tx_resource(adapter, buffer_info);
3307                         tx_desc->upper.data = 0;
3308
3309                         i++;
3310                         if (i == tx_ring->count)
3311                                 i = 0;
3312
3313                         count++;
3314                         if (count == IGB_MAX_TX_CLEAN) {
3315                                 retval = false;
3316                                 goto done_cleaning;
3317                         }
3318                 }
3319                 oldhead = head;
3320                 rmb();
3321                 head = get_head(tx_ring);
3322                 if (head == oldhead)
3323                         goto done_cleaning;
3324         }  /* while (1) */
3325
3326 done_cleaning:
3327         tx_ring->next_to_clean = i;
3328
3329         if (unlikely(cleaned &&
3330                      netif_carrier_ok(netdev) &&
3331                      IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
3332                 /* Make sure that anybody stopping the queue after this
3333                  * sees the new next_to_clean.
3334                  */
3335                 smp_mb();
3336                 if (netif_queue_stopped(netdev) &&
3337                     !(test_bit(__IGB_DOWN, &adapter->state))) {
3338                         netif_wake_queue(netdev);
3339                         ++adapter->restart_queue;
3340                 }
3341         }
3342
3343         if (tx_ring->detect_tx_hung) {
3344                 /* Detect a transmit hang in hardware, this serializes the
3345                  * check with the clearing of time_stamp and movement of i */
3346                 tx_ring->detect_tx_hung = false;
3347                 if (tx_ring->buffer_info[i].time_stamp &&
3348                     time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
3349                                (adapter->tx_timeout_factor * HZ))
3350                     && !(rd32(E1000_STATUS) &
3351                          E1000_STATUS_TXOFF)) {
3352
3353                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3354                         /* detected Tx unit hang */
3355                         dev_err(&adapter->pdev->dev,
3356                                 "Detected Tx Unit Hang\n"
3357                                 "  Tx Queue             <%lu>\n"
3358                                 "  TDH                  <%x>\n"
3359                                 "  TDT                  <%x>\n"
3360                                 "  next_to_use          <%x>\n"
3361                                 "  next_to_clean        <%x>\n"
3362                                 "  head (WB)            <%x>\n"
3363                                 "buffer_info[next_to_clean]\n"
3364                                 "  time_stamp           <%lx>\n"
3365                                 "  jiffies              <%lx>\n"
3366                                 "  desc.status          <%x>\n",
3367                                 (unsigned long)((tx_ring - adapter->tx_ring) /
3368                                         sizeof(struct igb_ring)),
3369                                 readl(adapter->hw.hw_addr + tx_ring->head),
3370                                 readl(adapter->hw.hw_addr + tx_ring->tail),
3371                                 tx_ring->next_to_use,
3372                                 tx_ring->next_to_clean,
3373                                 head,
3374                                 tx_ring->buffer_info[i].time_stamp,
3375                                 jiffies,
3376                                 tx_desc->upper.fields.status);
3377                         netif_stop_queue(netdev);
3378                 }
3379         }
3380         tx_ring->total_bytes += total_bytes;
3381         tx_ring->total_packets += total_packets;
3382         adapter->net_stats.tx_bytes += total_bytes;
3383         adapter->net_stats.tx_packets += total_packets;
3384         return retval;
3385 }
3386
3387
3388 /**
3389  * igb_receive_skb - helper function to handle rx indications
3390  * @adapter: board private structure
3391  * @status: descriptor status field as written by hardware
3392  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3393  * @skb: pointer to sk_buff to be indicated to stack
3394  **/
3395 static void igb_receive_skb(struct igb_adapter *adapter, u8 status, __le16 vlan,
3396                             struct sk_buff *skb)
3397 {
3398         if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
3399                 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3400                                          le16_to_cpu(vlan) &
3401                                          E1000_RXD_SPC_VLAN_MASK);
3402         else
3403                 netif_receive_skb(skb);
3404 }
3405
3406
3407 static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
3408                                        u32 status_err, struct sk_buff *skb)
3409 {
3410         skb->ip_summed = CHECKSUM_NONE;
3411
3412         /* Ignore Checksum bit is set or checksum is disabled through ethtool */
3413         if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
3414                 return;
3415         /* TCP/UDP checksum error bit is set */
3416         if (status_err &
3417             (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
3418                 /* let the stack verify checksum errors */
3419                 adapter->hw_csum_err++;
3420                 return;
3421         }
3422         /* It must be a TCP or UDP packet with a valid checksum */
3423         if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
3424                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3425
3426         adapter->hw_csum_good++;
3427 }
3428
3429 static bool igb_clean_rx_irq_adv(struct igb_adapter *adapter,
3430                                       struct igb_ring *rx_ring,
3431                                       int *work_done, int budget)
3432 {
3433         struct net_device *netdev = adapter->netdev;
3434         struct pci_dev *pdev = adapter->pdev;
3435         union e1000_adv_rx_desc *rx_desc , *next_rxd;
3436         struct igb_buffer *buffer_info , *next_buffer;
3437         struct sk_buff *skb;
3438         unsigned int i, j;
3439         u32 length, hlen, staterr;
3440         bool cleaned = false;
3441         int cleaned_count = 0;
3442         unsigned int total_bytes = 0, total_packets = 0;
3443
3444         i = rx_ring->next_to_clean;
3445         rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3446         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3447
3448         while (staterr & E1000_RXD_STAT_DD) {
3449                 if (*work_done >= budget)
3450                         break;
3451                 (*work_done)++;
3452                 buffer_info = &rx_ring->buffer_info[i];
3453
3454                 /* HW will not DMA in data larger than the given buffer, even
3455                  * if it parses the (NFS, of course) header to be larger.  In
3456                  * that case, it fills the header buffer and spills the rest
3457                  * into the page.
3458                  */
3459                 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
3460                   E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
3461                 if (hlen > adapter->rx_ps_hdr_size)
3462                         hlen = adapter->rx_ps_hdr_size;
3463
3464                 length = le16_to_cpu(rx_desc->wb.upper.length);
3465                 cleaned = true;
3466                 cleaned_count++;
3467
3468                 if (rx_ring->pending_skb != NULL) {
3469                         skb = rx_ring->pending_skb;
3470                         rx_ring->pending_skb = NULL;
3471                         j = rx_ring->pending_skb_page;
3472                 } else {
3473                         skb = buffer_info->skb;
3474                         prefetch(skb->data - NET_IP_ALIGN);
3475                         buffer_info->skb = NULL;
3476                         if (hlen) {
3477                                 pci_unmap_single(pdev, buffer_info->dma,
3478                                                  adapter->rx_ps_hdr_size +
3479                                                    NET_IP_ALIGN,
3480                                                  PCI_DMA_FROMDEVICE);
3481                                 skb_put(skb, hlen);
3482                         } else {
3483                                 pci_unmap_single(pdev, buffer_info->dma,
3484                                                  adapter->rx_buffer_len +
3485                                                    NET_IP_ALIGN,
3486                                                  PCI_DMA_FROMDEVICE);
3487                                 skb_put(skb, length);
3488                                 goto send_up;
3489                         }
3490                         j = 0;
3491                 }
3492
3493                 while (length) {
3494                         pci_unmap_page(pdev, buffer_info->page_dma,
3495                                 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3496                         buffer_info->page_dma = 0;
3497                         skb_fill_page_desc(skb, j, buffer_info->page,
3498                                                 0, length);
3499                         buffer_info->page = NULL;
3500
3501                         skb->len += length;
3502                         skb->data_len += length;
3503                         skb->truesize += length;
3504                         rx_desc->wb.upper.status_error = 0;
3505                         if (staterr & E1000_RXD_STAT_EOP)
3506                                 break;
3507
3508                         j++;
3509                         cleaned_count++;
3510                         i++;
3511                         if (i == rx_ring->count)
3512                                 i = 0;
3513
3514                         buffer_info = &rx_ring->buffer_info[i];
3515                         rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3516                         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3517                         length = le16_to_cpu(rx_desc->wb.upper.length);
3518                         if (!(staterr & E1000_RXD_STAT_DD)) {
3519                                 rx_ring->pending_skb = skb;
3520                                 rx_ring->pending_skb_page = j;
3521                                 goto out;
3522                         }
3523                 }
3524 send_up:
3525                 pskb_trim(skb, skb->len - 4);
3526                 i++;
3527                 if (i == rx_ring->count)
3528                         i = 0;
3529                 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3530                 prefetch(next_rxd);
3531                 next_buffer = &rx_ring->buffer_info[i];
3532
3533                 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3534                         dev_kfree_skb_irq(skb);
3535                         goto next_desc;
3536                 }
3537                 rx_ring->no_itr_adjust |= (staterr & E1000_RXD_STAT_DYNINT);
3538
3539                 total_bytes += skb->len;
3540                 total_packets++;
3541
3542                 igb_rx_checksum_adv(adapter, staterr, skb);
3543
3544                 skb->protocol = eth_type_trans(skb, netdev);
3545
3546                 igb_receive_skb(adapter, staterr, rx_desc->wb.upper.vlan, skb);
3547
3548                 netdev->last_rx = jiffies;
3549
3550 next_desc:
3551                 rx_desc->wb.upper.status_error = 0;
3552
3553                 /* return some buffers to hardware, one at a time is too slow */
3554                 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3555                         igb_alloc_rx_buffers_adv(adapter, rx_ring,
3556                                                  cleaned_count);
3557                         cleaned_count = 0;
3558                 }
3559
3560                 /* use prefetched values */
3561                 rx_desc = next_rxd;
3562                 buffer_info = next_buffer;
3563
3564                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3565         }
3566 out:
3567         rx_ring->next_to_clean = i;
3568         cleaned_count = IGB_DESC_UNUSED(rx_ring);
3569
3570         if (cleaned_count)
3571                 igb_alloc_rx_buffers_adv(adapter, rx_ring, cleaned_count);
3572
3573         rx_ring->total_packets += total_packets;
3574         rx_ring->total_bytes += total_bytes;
3575         rx_ring->rx_stats.packets += total_packets;
3576         rx_ring->rx_stats.bytes += total_bytes;
3577         adapter->net_stats.rx_bytes += total_bytes;
3578         adapter->net_stats.rx_packets += total_packets;
3579         return cleaned;
3580 }
3581
3582
3583 /**
3584  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
3585  * @adapter: address of board private structure
3586  **/
3587 static void igb_alloc_rx_buffers_adv(struct igb_adapter *adapter,
3588                                      struct igb_ring *rx_ring,
3589                                      int cleaned_count)
3590 {
3591         struct net_device *netdev = adapter->netdev;
3592         struct pci_dev *pdev = adapter->pdev;
3593         union e1000_adv_rx_desc *rx_desc;
3594         struct igb_buffer *buffer_info;
3595         struct sk_buff *skb;
3596         unsigned int i;
3597
3598         i = rx_ring->next_to_use;
3599         buffer_info = &rx_ring->buffer_info[i];
3600
3601         while (cleaned_count--) {
3602                 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3603
3604                 if (adapter->rx_ps_hdr_size && !buffer_info->page) {
3605                         buffer_info->page = alloc_page(GFP_ATOMIC);
3606                         if (!buffer_info->page) {
3607                                 adapter->alloc_rx_buff_failed++;
3608                                 goto no_buffers;
3609                         }
3610                         buffer_info->page_dma =
3611                                 pci_map_page(pdev,
3612                                              buffer_info->page,
3613                                              0, PAGE_SIZE,
3614                                              PCI_DMA_FROMDEVICE);
3615                 }
3616
3617                 if (!buffer_info->skb) {
3618                         int bufsz;
3619
3620                         if (adapter->rx_ps_hdr_size)
3621                                 bufsz = adapter->rx_ps_hdr_size;
3622                         else
3623                                 bufsz = adapter->rx_buffer_len;
3624                         bufsz += NET_IP_ALIGN;
3625                         skb = netdev_alloc_skb(netdev, bufsz);
3626
3627                         if (!skb) {
3628                                 adapter->alloc_rx_buff_failed++;
3629                                 goto no_buffers;
3630                         }
3631
3632                         /* Make buffer alignment 2 beyond a 16 byte boundary
3633                          * this will result in a 16 byte aligned IP header after
3634                          * the 14 byte MAC header is removed
3635                          */
3636                         skb_reserve(skb, NET_IP_ALIGN);
3637
3638                         buffer_info->skb = skb;
3639                         buffer_info->dma = pci_map_single(pdev, skb->data,
3640                                                           bufsz,
3641                                                           PCI_DMA_FROMDEVICE);
3642
3643                 }
3644                 /* Refresh the desc even if buffer_addrs didn't change because
3645                  * each write-back erases this info. */
3646                 if (adapter->rx_ps_hdr_size) {
3647                         rx_desc->read.pkt_addr =
3648                              cpu_to_le64(buffer_info->page_dma);
3649                         rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
3650                 } else {
3651                         rx_desc->read.pkt_addr =
3652                              cpu_to_le64(buffer_info->dma);
3653                         rx_desc->read.hdr_addr = 0;
3654                 }
3655
3656                 i++;
3657                 if (i == rx_ring->count)
3658                         i = 0;
3659                 buffer_info = &rx_ring->buffer_info[i];
3660         }
3661
3662 no_buffers:
3663         if (rx_ring->next_to_use != i) {
3664                 rx_ring->next_to_use = i;
3665                 if (i == 0)
3666                         i = (rx_ring->count - 1);
3667                 else
3668                         i--;
3669
3670                 /* Force memory writes to complete before letting h/w
3671                  * know there are new descriptors to fetch.  (Only
3672                  * applicable for weak-ordered memory model archs,
3673                  * such as IA-64). */
3674                 wmb();
3675                 writel(i, adapter->hw.hw_addr + rx_ring->tail);
3676         }
3677 }
3678
3679 /**
3680  * igb_mii_ioctl -
3681  * @netdev:
3682  * @ifreq:
3683  * @cmd:
3684  **/
3685 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3686 {
3687         struct igb_adapter *adapter = netdev_priv(netdev);
3688         struct mii_ioctl_data *data = if_mii(ifr);
3689
3690         if (adapter->hw.phy.media_type != e1000_media_type_copper)
3691                 return -EOPNOTSUPP;
3692
3693         switch (cmd) {
3694         case SIOCGMIIPHY:
3695                 data->phy_id = adapter->hw.phy.addr;
3696                 break;
3697         case SIOCGMIIREG:
3698                 if (!capable(CAP_NET_ADMIN))
3699                         return -EPERM;
3700                 if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw,
3701                                                      data->reg_num
3702                                                      & 0x1F, &data->val_out))
3703                         return -EIO;
3704                 break;
3705         case SIOCSMIIREG:
3706         default:
3707                 return -EOPNOTSUPP;
3708         }
3709         return 0;
3710 }
3711
3712 /**
3713  * igb_ioctl -
3714  * @netdev:
3715  * @ifreq:
3716  * @cmd:
3717  **/
3718 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3719 {
3720         switch (cmd) {
3721         case SIOCGMIIPHY:
3722         case SIOCGMIIREG:
3723         case SIOCSMIIREG:
3724                 return igb_mii_ioctl(netdev, ifr, cmd);
3725         default:
3726                 return -EOPNOTSUPP;
3727         }
3728 }
3729
3730 static void igb_vlan_rx_register(struct net_device *netdev,
3731                                  struct vlan_group *grp)
3732 {
3733         struct igb_adapter *adapter = netdev_priv(netdev);
3734         struct e1000_hw *hw = &adapter->hw;
3735         u32 ctrl, rctl;
3736
3737         igb_irq_disable(adapter);
3738         adapter->vlgrp = grp;
3739
3740         if (grp) {
3741                 /* enable VLAN tag insert/strip */
3742                 ctrl = rd32(E1000_CTRL);
3743                 ctrl |= E1000_CTRL_VME;
3744                 wr32(E1000_CTRL, ctrl);
3745
3746                 /* enable VLAN receive filtering */
3747                 rctl = rd32(E1000_RCTL);
3748                 rctl |= E1000_RCTL_VFE;
3749                 rctl &= ~E1000_RCTL_CFIEN;
3750                 wr32(E1000_RCTL, rctl);
3751                 igb_update_mng_vlan(adapter);
3752                 wr32(E1000_RLPML,
3753                                 adapter->max_frame_size + VLAN_TAG_SIZE);
3754         } else {
3755                 /* disable VLAN tag insert/strip */
3756                 ctrl = rd32(E1000_CTRL);
3757                 ctrl &= ~E1000_CTRL_VME;
3758                 wr32(E1000_CTRL, ctrl);
3759
3760                 /* disable VLAN filtering */
3761                 rctl = rd32(E1000_RCTL);
3762                 rctl &= ~E1000_RCTL_VFE;
3763                 wr32(E1000_RCTL, rctl);
3764                 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
3765                         igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3766                         adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
3767                 }
3768                 wr32(E1000_RLPML,
3769                                 adapter->max_frame_size);
3770         }
3771
3772         if (!test_bit(__IGB_DOWN, &adapter->state))
3773                 igb_irq_enable(adapter);
3774 }
3775
3776 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3777 {
3778         struct igb_adapter *adapter = netdev_priv(netdev);
3779         struct e1000_hw *hw = &adapter->hw;
3780         u32 vfta, index;
3781
3782         if ((adapter->hw.mng_cookie.status &
3783              E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3784             (vid == adapter->mng_vlan_id))
3785                 return;
3786         /* add VID to filter table */
3787         index = (vid >> 5) & 0x7F;
3788         vfta = array_rd32(E1000_VFTA, index);
3789         vfta |= (1 << (vid & 0x1F));
3790         igb_write_vfta(&adapter->hw, index, vfta);
3791 }
3792
3793 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3794 {
3795         struct igb_adapter *adapter = netdev_priv(netdev);
3796         struct e1000_hw *hw = &adapter->hw;
3797         u32 vfta, index;
3798
3799         igb_irq_disable(adapter);
3800         vlan_group_set_device(adapter->vlgrp, vid, NULL);
3801
3802         if (!test_bit(__IGB_DOWN, &adapter->state))
3803                 igb_irq_enable(adapter);
3804
3805         if ((adapter->hw.mng_cookie.status &
3806              E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3807             (vid == adapter->mng_vlan_id)) {
3808                 /* release control to f/w */
3809                 igb_release_hw_control(adapter);
3810                 return;
3811         }
3812
3813         /* remove VID from filter table */
3814         index = (vid >> 5) & 0x7F;
3815         vfta = array_rd32(E1000_VFTA, index);
3816         vfta &= ~(1 << (vid & 0x1F));
3817         igb_write_vfta(&adapter->hw, index, vfta);
3818 }
3819
3820 static void igb_restore_vlan(struct igb_adapter *adapter)
3821 {
3822         igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
3823
3824         if (adapter->vlgrp) {
3825                 u16 vid;
3826                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
3827                         if (!vlan_group_get_device(adapter->vlgrp, vid))
3828                                 continue;
3829                         igb_vlan_rx_add_vid(adapter->netdev, vid);
3830                 }
3831         }
3832 }
3833
3834 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
3835 {
3836         struct e1000_mac_info *mac = &adapter->hw.mac;
3837
3838         mac->autoneg = 0;
3839
3840         /* Fiber NICs only allow 1000 gbps Full duplex */
3841         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
3842                 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
3843                 dev_err(&adapter->pdev->dev,
3844                         "Unsupported Speed/Duplex configuration\n");
3845                 return -EINVAL;
3846         }
3847
3848         switch (spddplx) {
3849         case SPEED_10 + DUPLEX_HALF:
3850                 mac->forced_speed_duplex = ADVERTISE_10_HALF;
3851                 break;
3852         case SPEED_10 + DUPLEX_FULL:
3853                 mac->forced_speed_duplex = ADVERTISE_10_FULL;
3854                 break;
3855         case SPEED_100 + DUPLEX_HALF:
3856                 mac->forced_speed_duplex = ADVERTISE_100_HALF;
3857                 break;
3858         case SPEED_100 + DUPLEX_FULL:
3859                 mac->forced_speed_duplex = ADVERTISE_100_FULL;
3860                 break;
3861         case SPEED_1000 + DUPLEX_FULL:
3862                 mac->autoneg = 1;
3863                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
3864                 break;
3865         case SPEED_1000 + DUPLEX_HALF: /* not supported */
3866         default:
3867                 dev_err(&adapter->pdev->dev,
3868                         "Unsupported Speed/Duplex configuration\n");
3869                 return -EINVAL;
3870         }
3871         return 0;
3872 }
3873
3874
3875 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
3876 {
3877         struct net_device *netdev = pci_get_drvdata(pdev);
3878         struct igb_adapter *adapter = netdev_priv(netdev);
3879         struct e1000_hw *hw = &adapter->hw;
3880         u32 ctrl, ctrl_ext, rctl, status;
3881         u32 wufc = adapter->wol;
3882 #ifdef CONFIG_PM
3883         int retval = 0;
3884 #endif
3885
3886         netif_device_detach(netdev);
3887
3888         if (netif_running(netdev)) {
3889                 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
3890                 igb_down(adapter);
3891                 igb_free_irq(adapter);
3892         }
3893
3894 #ifdef CONFIG_PM
3895         retval = pci_save_state(pdev);
3896         if (retval)
3897                 return retval;
3898 #endif
3899
3900         status = rd32(E1000_STATUS);
3901         if (status & E1000_STATUS_LU)
3902                 wufc &= ~E1000_WUFC_LNKC;
3903
3904         if (wufc) {
3905                 igb_setup_rctl(adapter);
3906                 igb_set_multi(netdev);
3907
3908                 /* turn on all-multi mode if wake on multicast is enabled */
3909                 if (wufc & E1000_WUFC_MC) {
3910                         rctl = rd32(E1000_RCTL);
3911                         rctl |= E1000_RCTL_MPE;
3912                         wr32(E1000_RCTL, rctl);
3913                 }
3914
3915                 ctrl = rd32(E1000_CTRL);
3916                 /* advertise wake from D3Cold */
3917                 #define E1000_CTRL_ADVD3WUC 0x00100000
3918                 /* phy power management enable */
3919                 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
3920                 ctrl |= E1000_CTRL_ADVD3WUC;
3921                 wr32(E1000_CTRL, ctrl);
3922
3923                 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3924                    adapter->hw.phy.media_type ==
3925                                         e1000_media_type_internal_serdes) {
3926                         /* keep the laser running in D3 */
3927                         ctrl_ext = rd32(E1000_CTRL_EXT);
3928                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3929                         wr32(E1000_CTRL_EXT, ctrl_ext);
3930                 }
3931
3932                 /* Allow time for pending master requests to run */
3933                 igb_disable_pcie_master(&adapter->hw);
3934
3935                 wr32(E1000_WUC, E1000_WUC_PME_EN);
3936                 wr32(E1000_WUFC, wufc);
3937                 pci_enable_wake(pdev, PCI_D3hot, 1);
3938                 pci_enable_wake(pdev, PCI_D3cold, 1);
3939         } else {
3940                 wr32(E1000_WUC, 0);
3941                 wr32(E1000_WUFC, 0);
3942                 pci_enable_wake(pdev, PCI_D3hot, 0);
3943                 pci_enable_wake(pdev, PCI_D3cold, 0);
3944         }
3945
3946         /* make sure adapter isn't asleep if manageability is enabled */
3947         if (adapter->en_mng_pt) {
3948                 pci_enable_wake(pdev, PCI_D3hot, 1);
3949                 pci_enable_wake(pdev, PCI_D3cold, 1);
3950         }
3951
3952         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
3953          * would have already happened in close and is redundant. */
3954         igb_release_hw_control(adapter);
3955
3956         pci_disable_device(pdev);
3957
3958         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3959
3960         return 0;
3961 }
3962
3963 #ifdef CONFIG_PM
3964 static int igb_resume(struct pci_dev *pdev)
3965 {
3966         struct net_device *netdev = pci_get_drvdata(pdev);
3967         struct igb_adapter *adapter = netdev_priv(netdev);
3968         struct e1000_hw *hw = &adapter->hw;
3969         u32 err;
3970
3971         pci_set_power_state(pdev, PCI_D0);
3972         pci_restore_state(pdev);
3973         err = pci_enable_device(pdev);
3974         if (err) {
3975                 dev_err(&pdev->dev,
3976                         "igb: Cannot enable PCI device from suspend\n");
3977                 return err;
3978         }
3979         pci_set_master(pdev);
3980
3981         pci_enable_wake(pdev, PCI_D3hot, 0);
3982         pci_enable_wake(pdev, PCI_D3cold, 0);
3983
3984         if (netif_running(netdev)) {
3985                 err = igb_request_irq(adapter);
3986                 if (err)
3987                         return err;
3988         }
3989
3990         /* e1000_power_up_phy(adapter); */
3991
3992         igb_reset(adapter);
3993         wr32(E1000_WUS, ~0);
3994
3995         igb_init_manageability(adapter);
3996
3997         if (netif_running(netdev))
3998                 igb_up(adapter);
3999
4000         netif_device_attach(netdev);
4001
4002         /* let the f/w know that the h/w is now under the control of the
4003          * driver. */
4004         igb_get_hw_control(adapter);
4005
4006         return 0;
4007 }
4008 #endif
4009
4010 static void igb_shutdown(struct pci_dev *pdev)
4011 {
4012         igb_suspend(pdev, PMSG_SUSPEND);
4013 }
4014
4015 #ifdef CONFIG_NET_POLL_CONTROLLER
4016 /*
4017  * Polling 'interrupt' - used by things like netconsole to send skbs
4018  * without having to re-enable interrupts. It's not called while
4019  * the interrupt routine is executing.
4020  */
4021 static void igb_netpoll(struct net_device *netdev)
4022 {
4023         struct igb_adapter *adapter = netdev_priv(netdev);
4024         int i;
4025         int work_done = 0;
4026
4027         igb_irq_disable(adapter);
4028         for (i = 0; i < adapter->num_tx_queues; i++)
4029                 igb_clean_tx_irq(adapter, &adapter->tx_ring[i]);
4030
4031         for (i = 0; i < adapter->num_rx_queues; i++)
4032                 igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i],
4033                                      &work_done,
4034                                      adapter->rx_ring[i].napi.weight);
4035
4036         igb_irq_enable(adapter);
4037 }
4038 #endif /* CONFIG_NET_POLL_CONTROLLER */
4039
4040 /**
4041  * igb_io_error_detected - called when PCI error is detected
4042  * @pdev: Pointer to PCI device
4043  * @state: The current pci connection state
4044  *
4045  * This function is called after a PCI bus error affecting
4046  * this device has been detected.
4047  */
4048 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
4049                                               pci_channel_state_t state)
4050 {
4051         struct net_device *netdev = pci_get_drvdata(pdev);
4052         struct igb_adapter *adapter = netdev_priv(netdev);
4053
4054         netif_device_detach(netdev);
4055
4056         if (netif_running(netdev))
4057                 igb_down(adapter);
4058         pci_disable_device(pdev);
4059
4060         /* Request a slot slot reset. */
4061         return PCI_ERS_RESULT_NEED_RESET;
4062 }
4063
4064 /**
4065  * igb_io_slot_reset - called after the pci bus has been reset.
4066  * @pdev: Pointer to PCI device
4067  *
4068  * Restart the card from scratch, as if from a cold-boot. Implementation
4069  * resembles the first-half of the igb_resume routine.
4070  */
4071 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4072 {
4073         struct net_device *netdev = pci_get_drvdata(pdev);
4074         struct igb_adapter *adapter = netdev_priv(netdev);
4075         struct e1000_hw *hw = &adapter->hw;
4076
4077         if (pci_enable_device(pdev)) {
4078                 dev_err(&pdev->dev,
4079                         "Cannot re-enable PCI device after reset.\n");
4080                 return PCI_ERS_RESULT_DISCONNECT;
4081         }
4082         pci_set_master(pdev);
4083         pci_restore_state(pdev);
4084
4085         pci_enable_wake(pdev, PCI_D3hot, 0);
4086         pci_enable_wake(pdev, PCI_D3cold, 0);
4087
4088         igb_reset(adapter);
4089         wr32(E1000_WUS, ~0);
4090
4091         return PCI_ERS_RESULT_RECOVERED;
4092 }
4093
4094 /**
4095  * igb_io_resume - called when traffic can start flowing again.
4096  * @pdev: Pointer to PCI device
4097  *
4098  * This callback is called when the error recovery driver tells us that
4099  * its OK to resume normal operation. Implementation resembles the
4100  * second-half of the igb_resume routine.
4101  */
4102 static void igb_io_resume(struct pci_dev *pdev)
4103 {
4104         struct net_device *netdev = pci_get_drvdata(pdev);
4105         struct igb_adapter *adapter = netdev_priv(netdev);
4106
4107         igb_init_manageability(adapter);
4108
4109         if (netif_running(netdev)) {
4110                 if (igb_up(adapter)) {
4111                         dev_err(&pdev->dev, "igb_up failed after reset\n");
4112                         return;
4113                 }
4114         }
4115
4116         netif_device_attach(netdev);
4117
4118         /* let the f/w know that the h/w is now under the control of the
4119          * driver. */
4120         igb_get_hw_control(adapter);
4121
4122 }
4123
4124 /* igb_main.c */