qlge: Fix receive packets drop.
[pandora-kernel.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378
379                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380                                      "Adding %s address %pM at index %d in the CAM.\n",
381                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
382                                      "MULTICAST" : "UNICAST",
383                                      addr, index);
384
385                         status =
386                             ql_wait_reg_rdy(qdev,
387                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388                         if (status)
389                                 goto exit;
390                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
392                                    type);       /* type */
393                         ql_write32(qdev, MAC_ADDR_DATA, lower);
394                         status =
395                             ql_wait_reg_rdy(qdev,
396                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397                         if (status)
398                                 goto exit;
399                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
401                                    type);       /* type */
402                         ql_write32(qdev, MAC_ADDR_DATA, upper);
403                         status =
404                             ql_wait_reg_rdy(qdev,
405                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406                         if (status)
407                                 goto exit;
408                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
409                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
410                                    type);       /* type */
411                         /* This field should also include the queue id
412                            and possibly the function id.  Right now we hardcode
413                            the route field to NIC core.
414                          */
415                         cam_output = (CAM_OUT_ROUTE_NIC |
416                                       (qdev->
417                                        func << CAM_OUT_FUNC_SHIFT) |
418                                         (0 << CAM_OUT_CQ_ID_SHIFT));
419                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420                                 cam_output |= CAM_OUT_RV;
421                         /* route to NIC core */
422                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423                         break;
424                 }
425         case MAC_ADDR_TYPE_VLAN:
426                 {
427                         u32 enable_bit = *((u32 *) &addr[0]);
428                         /* For VLAN, the addr actually holds a bit that
429                          * either enables or disables the vlan id we are
430                          * addressing. It's either MAC_ADDR_E on or off.
431                          * That's bit-27 we're talking about.
432                          */
433                         netif_info(qdev, ifup, qdev->ndev,
434                                    "%s VLAN ID %d %s the CAM.\n",
435                                    enable_bit ? "Adding" : "Removing",
436                                    index,
437                                    enable_bit ? "to" : "from");
438
439                         status =
440                             ql_wait_reg_rdy(qdev,
441                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442                         if (status)
443                                 goto exit;
444                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
446                                    type |       /* type */
447                                    enable_bit); /* enable/disable */
448                         break;
449                 }
450         case MAC_ADDR_TYPE_MULTI_FLTR:
451         default:
452                 netif_crit(qdev, ifup, qdev->ndev,
453                            "Address type %d not yet supported.\n", type);
454                 status = -EPERM;
455         }
456 exit:
457         return status;
458 }
459
460 /* Set or clear MAC address in hardware. We sometimes
461  * have to clear it to prevent wrong frame routing
462  * especially in a bonding environment.
463  */
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 {
466         int status;
467         char zero_mac_addr[ETH_ALEN];
468         char *addr;
469
470         if (set) {
471                 addr = &qdev->current_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Set Mac addr %pM\n", addr);
474         } else {
475                 memset(zero_mac_addr, 0, ETH_ALEN);
476                 addr = &zero_mac_addr[0];
477                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478                              "Clearing MAC address\n");
479         }
480         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 return status;
483         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486         if (status)
487                 netif_err(qdev, ifup, qdev->ndev,
488                           "Failed to init mac address.\n");
489         return status;
490 }
491
492 void ql_link_on(struct ql_adapter *qdev)
493 {
494         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495         netif_carrier_on(qdev->ndev);
496         ql_set_mac_addr(qdev, 1);
497 }
498
499 void ql_link_off(struct ql_adapter *qdev)
500 {
501         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502         netif_carrier_off(qdev->ndev);
503         ql_set_mac_addr(qdev, 0);
504 }
505
506 /* Get a specific frame routing value from the CAM.
507  * Used for debug and reg dump.
508  */
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510 {
511         int status = 0;
512
513         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514         if (status)
515                 goto exit;
516
517         ql_write32(qdev, RT_IDX,
518                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520         if (status)
521                 goto exit;
522         *value = ql_read32(qdev, RT_DATA);
523 exit:
524         return status;
525 }
526
527 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
528  * to route different frame types to various inbound queues.  We send broadcast/
529  * multicast/error frames to the default queue for slow handling,
530  * and CAM hit/RSS frames to the fast handling queues.
531  */
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533                               int enable)
534 {
535         int status = -EINVAL; /* Return error if no mask match. */
536         u32 value = 0;
537
538         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539                      "%s %s mask %s the routing reg.\n",
540                      enable ? "Adding" : "Removing",
541                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
555                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
556                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557                      "(Bad index != RT_IDX)",
558                      enable ? "to" : "from");
559
560         switch (mask) {
561         case RT_IDX_CAM_HIT:
562                 {
563                         value = RT_IDX_DST_CAM_Q |      /* dest */
564                             RT_IDX_TYPE_NICQ |  /* type */
565                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566                         break;
567                 }
568         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
569                 {
570                         value = RT_IDX_DST_DFLT_Q |     /* dest */
571                             RT_IDX_TYPE_NICQ |  /* type */
572                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573                         break;
574                 }
575         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
576                 {
577                         value = RT_IDX_DST_DFLT_Q |     /* dest */
578                             RT_IDX_TYPE_NICQ |  /* type */
579                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580                         break;
581                 }
582         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583                 {
584                         value = RT_IDX_DST_DFLT_Q | /* dest */
585                                 RT_IDX_TYPE_NICQ | /* type */
586                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
587                                 RT_IDX_IDX_SHIFT); /* index */
588                         break;
589                 }
590         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591                 {
592                         value = RT_IDX_DST_DFLT_Q | /* dest */
593                                 RT_IDX_TYPE_NICQ | /* type */
594                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595                                 RT_IDX_IDX_SHIFT); /* index */
596                         break;
597                 }
598         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
599                 {
600                         value = RT_IDX_DST_DFLT_Q |     /* dest */
601                             RT_IDX_TYPE_NICQ |  /* type */
602                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603                         break;
604                 }
605         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
606                 {
607                         value = RT_IDX_DST_DFLT_Q |     /* dest */
608                             RT_IDX_TYPE_NICQ |  /* type */
609                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610                         break;
611                 }
612         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
613                 {
614                         value = RT_IDX_DST_DFLT_Q |     /* dest */
615                             RT_IDX_TYPE_NICQ |  /* type */
616                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617                         break;
618                 }
619         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
620                 {
621                         value = RT_IDX_DST_RSS |        /* dest */
622                             RT_IDX_TYPE_NICQ |  /* type */
623                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624                         break;
625                 }
626         case 0:         /* Clear the E-bit on an entry. */
627                 {
628                         value = RT_IDX_DST_DFLT_Q |     /* dest */
629                             RT_IDX_TYPE_NICQ |  /* type */
630                             (index << RT_IDX_IDX_SHIFT);/* index */
631                         break;
632                 }
633         default:
634                 netif_err(qdev, ifup, qdev->ndev,
635                           "Mask type %d not yet supported.\n", mask);
636                 status = -EPERM;
637                 goto exit;
638         }
639
640         if (value) {
641                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642                 if (status)
643                         goto exit;
644                 value |= (enable ? RT_IDX_E : 0);
645                 ql_write32(qdev, RT_IDX, value);
646                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647         }
648 exit:
649         return status;
650 }
651
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
653 {
654         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 }
656
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
658 {
659         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 }
661
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663  * Otherwise, we may have multiple outstanding workers and don't want to
664  * enable until the last one finishes. In this case, the irq_cnt gets
665  * incremented every time we queue a worker and decremented every time
666  * a worker finishes.  Once it hits zero we enable the interrupt.
667  */
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 {
670         u32 var = 0;
671         unsigned long hw_flags = 0;
672         struct intr_context *ctx = qdev->intr_context + intr;
673
674         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675                 /* Always enable if we're MSIX multi interrupts and
676                  * it's not the default (zeroeth) interrupt.
677                  */
678                 ql_write32(qdev, INTR_EN,
679                            ctx->intr_en_mask);
680                 var = ql_read32(qdev, STS);
681                 return var;
682         }
683
684         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685         if (atomic_dec_and_test(&ctx->irq_cnt)) {
686                 ql_write32(qdev, INTR_EN,
687                            ctx->intr_en_mask);
688                 var = ql_read32(qdev, STS);
689         }
690         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691         return var;
692 }
693
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 {
696         u32 var = 0;
697         struct intr_context *ctx;
698
699         /* HW disables for us if we're MSIX multi interrupts and
700          * it's not the default (zeroeth) interrupt.
701          */
702         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703                 return 0;
704
705         ctx = qdev->intr_context + intr;
706         spin_lock(&qdev->hw_lock);
707         if (!atomic_read(&ctx->irq_cnt)) {
708                 ql_write32(qdev, INTR_EN,
709                 ctx->intr_dis_mask);
710                 var = ql_read32(qdev, STS);
711         }
712         atomic_inc(&ctx->irq_cnt);
713         spin_unlock(&qdev->hw_lock);
714         return var;
715 }
716
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 {
719         int i;
720         for (i = 0; i < qdev->intr_count; i++) {
721                 /* The enable call does a atomic_dec_and_test
722                  * and enables only if the result is zero.
723                  * So we precharge it here.
724                  */
725                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726                         i == 0))
727                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728                 ql_enable_completion_interrupt(qdev, i);
729         }
730
731 }
732
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734 {
735         int status, i;
736         u16 csum = 0;
737         __le16 *flash = (__le16 *)&qdev->flash;
738
739         status = strncmp((char *)&qdev->flash, str, 4);
740         if (status) {
741                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742                 return  status;
743         }
744
745         for (i = 0; i < size; i++)
746                 csum += le16_to_cpu(*flash++);
747
748         if (csum)
749                 netif_err(qdev, ifup, qdev->ndev,
750                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751
752         return csum;
753 }
754
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 {
757         int status = 0;
758         /* wait for reg to come ready */
759         status = ql_wait_reg_rdy(qdev,
760                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761         if (status)
762                 goto exit;
763         /* set up for reg read */
764         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765         /* wait for reg to come ready */
766         status = ql_wait_reg_rdy(qdev,
767                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768         if (status)
769                 goto exit;
770          /* This data is stored on flash as an array of
771          * __le32.  Since ql_read32() returns cpu endian
772          * we need to swap it back.
773          */
774         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775 exit:
776         return status;
777 }
778
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780 {
781         u32 i, size;
782         int status;
783         __le32 *p = (__le32 *)&qdev->flash;
784         u32 offset;
785         u8 mac_addr[6];
786
787         /* Get flash offset for function and adjust
788          * for dword access.
789          */
790         if (!qdev->port)
791                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792         else
793                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796                 return -ETIMEDOUT;
797
798         size = sizeof(struct flash_params_8000) / sizeof(u32);
799         for (i = 0; i < size; i++, p++) {
800                 status = ql_read_flash_word(qdev, i+offset, p);
801                 if (status) {
802                         netif_err(qdev, ifup, qdev->ndev,
803                                   "Error reading flash.\n");
804                         goto exit;
805                 }
806         }
807
808         status = ql_validate_flash(qdev,
809                         sizeof(struct flash_params_8000) / sizeof(u16),
810                         "8000");
811         if (status) {
812                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813                 status = -EINVAL;
814                 goto exit;
815         }
816
817         /* Extract either manufacturer or BOFM modified
818          * MAC address.
819          */
820         if (qdev->flash.flash_params_8000.data_type1 == 2)
821                 memcpy(mac_addr,
822                         qdev->flash.flash_params_8000.mac_addr1,
823                         qdev->ndev->addr_len);
824         else
825                 memcpy(mac_addr,
826                         qdev->flash.flash_params_8000.mac_addr,
827                         qdev->ndev->addr_len);
828
829         if (!is_valid_ether_addr(mac_addr)) {
830                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831                 status = -EINVAL;
832                 goto exit;
833         }
834
835         memcpy(qdev->ndev->dev_addr,
836                 mac_addr,
837                 qdev->ndev->addr_len);
838
839 exit:
840         ql_sem_unlock(qdev, SEM_FLASH_MASK);
841         return status;
842 }
843
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845 {
846         int i;
847         int status;
848         __le32 *p = (__le32 *)&qdev->flash;
849         u32 offset = 0;
850         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851
852         /* Second function's parameters follow the first
853          * function's.
854          */
855         if (qdev->port)
856                 offset = size;
857
858         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859                 return -ETIMEDOUT;
860
861         for (i = 0; i < size; i++, p++) {
862                 status = ql_read_flash_word(qdev, i+offset, p);
863                 if (status) {
864                         netif_err(qdev, ifup, qdev->ndev,
865                                   "Error reading flash.\n");
866                         goto exit;
867                 }
868
869         }
870
871         status = ql_validate_flash(qdev,
872                         sizeof(struct flash_params_8012) / sizeof(u16),
873                         "8012");
874         if (status) {
875                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876                 status = -EINVAL;
877                 goto exit;
878         }
879
880         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881                 status = -EINVAL;
882                 goto exit;
883         }
884
885         memcpy(qdev->ndev->dev_addr,
886                 qdev->flash.flash_params_8012.mac_addr,
887                 qdev->ndev->addr_len);
888
889 exit:
890         ql_sem_unlock(qdev, SEM_FLASH_MASK);
891         return status;
892 }
893
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895  * register pair.  Each read/write requires us to wait for the ready
896  * bit before reading/writing the data.
897  */
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 {
900         int status;
901         /* wait for reg to come ready */
902         status = ql_wait_reg_rdy(qdev,
903                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904         if (status)
905                 return status;
906         /* write the data to the data reg */
907         ql_write32(qdev, XGMAC_DATA, data);
908         /* trigger the write */
909         ql_write32(qdev, XGMAC_ADDR, reg);
910         return status;
911 }
912
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914  * register pair.  Each read/write requires us to wait for the ready
915  * bit before reading/writing the data.
916  */
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 {
919         int status = 0;
920         /* wait for reg to come ready */
921         status = ql_wait_reg_rdy(qdev,
922                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923         if (status)
924                 goto exit;
925         /* set up for reg read */
926         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927         /* wait for reg to come ready */
928         status = ql_wait_reg_rdy(qdev,
929                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930         if (status)
931                 goto exit;
932         /* get the data */
933         *data = ql_read32(qdev, XGMAC_DATA);
934 exit:
935         return status;
936 }
937
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 {
941         int status = 0;
942         u32 hi = 0;
943         u32 lo = 0;
944
945         status = ql_read_xgmac_reg(qdev, reg, &lo);
946         if (status)
947                 goto exit;
948
949         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950         if (status)
951                 goto exit;
952
953         *data = (u64) lo | ((u64) hi << 32);
954
955 exit:
956         return status;
957 }
958
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
960 {
961         int status;
962         /*
963          * Get MPI firmware version for driver banner
964          * and ethool info.
965          */
966         status = ql_mb_about_fw(qdev);
967         if (status)
968                 goto exit;
969         status = ql_mb_get_fw_state(qdev);
970         if (status)
971                 goto exit;
972         /* Wake up a worker to get/set the TX/RX frame sizes. */
973         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974 exit:
975         return status;
976 }
977
978 /* Take the MAC Core out of reset.
979  * Enable statistics counting.
980  * Take the transmitter/receiver out of reset.
981  * This functionality may be done in the MPI firmware at a
982  * later date.
983  */
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
985 {
986         int status = 0;
987         u32 data;
988
989         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990                 /* Another function has the semaphore, so
991                  * wait for the port init bit to come ready.
992                  */
993                 netif_info(qdev, link, qdev->ndev,
994                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996                 if (status) {
997                         netif_crit(qdev, link, qdev->ndev,
998                                    "Port initialize timed out.\n");
999                 }
1000                 return status;
1001         }
1002
1003         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004         /* Set the core reset. */
1005         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006         if (status)
1007                 goto end;
1008         data |= GLOBAL_CFG_RESET;
1009         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010         if (status)
1011                 goto end;
1012
1013         /* Clear the core reset and turn on jumbo for receiver. */
1014         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1015         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1016         data |= GLOBAL_CFG_TX_STAT_EN;
1017         data |= GLOBAL_CFG_RX_STAT_EN;
1018         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019         if (status)
1020                 goto end;
1021
1022         /* Enable transmitter, and clear it's reset. */
1023         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024         if (status)
1025                 goto end;
1026         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1027         data |= TX_CFG_EN;      /* Enable the transmitter. */
1028         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029         if (status)
1030                 goto end;
1031
1032         /* Enable receiver and clear it's reset. */
1033         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034         if (status)
1035                 goto end;
1036         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1037         data |= RX_CFG_EN;      /* Enable the receiver. */
1038         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039         if (status)
1040                 goto end;
1041
1042         /* Turn on jumbo. */
1043         status =
1044             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045         if (status)
1046                 goto end;
1047         status =
1048             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049         if (status)
1050                 goto end;
1051
1052         /* Signal to the world that the port is enabled.        */
1053         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054 end:
1055         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056         return status;
1057 }
1058
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060 {
1061         return PAGE_SIZE << qdev->lbq_buf_order;
1062 }
1063
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066 {
1067         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068         rx_ring->lbq_curr_idx++;
1069         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070                 rx_ring->lbq_curr_idx = 0;
1071         rx_ring->lbq_free_cnt++;
1072         return lbq_desc;
1073 }
1074
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076                 struct rx_ring *rx_ring)
1077 {
1078         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080         pci_dma_sync_single_for_cpu(qdev->pdev,
1081                                         dma_unmap_addr(lbq_desc, mapaddr),
1082                                     rx_ring->lbq_buf_size,
1083                                         PCI_DMA_FROMDEVICE);
1084
1085         /* If it's the last chunk of our master page then
1086          * we unmap it.
1087          */
1088         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089                                         == ql_lbq_block_size(qdev))
1090                 pci_unmap_page(qdev->pdev,
1091                                 lbq_desc->p.pg_chunk.map,
1092                                 ql_lbq_block_size(qdev),
1093                                 PCI_DMA_FROMDEVICE);
1094         return lbq_desc;
1095 }
1096
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099 {
1100         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101         rx_ring->sbq_curr_idx++;
1102         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103                 rx_ring->sbq_curr_idx = 0;
1104         rx_ring->sbq_free_cnt++;
1105         return sbq_desc;
1106 }
1107
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1110 {
1111         rx_ring->cnsmr_idx++;
1112         rx_ring->curr_entry++;
1113         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114                 rx_ring->cnsmr_idx = 0;
1115                 rx_ring->curr_entry = rx_ring->cq_base;
1116         }
1117 }
1118
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120 {
1121         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 }
1123
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125                                                 struct bq_desc *lbq_desc)
1126 {
1127         if (!rx_ring->pg_chunk.page) {
1128                 u64 map;
1129                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130                                                 GFP_ATOMIC,
1131                                                 qdev->lbq_buf_order);
1132                 if (unlikely(!rx_ring->pg_chunk.page)) {
1133                         netif_err(qdev, drv, qdev->ndev,
1134                                   "page allocation failed.\n");
1135                         return -ENOMEM;
1136                 }
1137                 rx_ring->pg_chunk.offset = 0;
1138                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139                                         0, ql_lbq_block_size(qdev),
1140                                         PCI_DMA_FROMDEVICE);
1141                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142                         __free_pages(rx_ring->pg_chunk.page,
1143                                         qdev->lbq_buf_order);
1144                         netif_err(qdev, drv, qdev->ndev,
1145                                   "PCI mapping failed.\n");
1146                         return -ENOMEM;
1147                 }
1148                 rx_ring->pg_chunk.map = map;
1149                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150         }
1151
1152         /* Copy the current master pg_chunk info
1153          * to the current descriptor.
1154          */
1155         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157         /* Adjust the master page chunk for next
1158          * buffer get.
1159          */
1160         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162                 rx_ring->pg_chunk.page = NULL;
1163                 lbq_desc->p.pg_chunk.last_flag = 1;
1164         } else {
1165                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166                 get_page(rx_ring->pg_chunk.page);
1167                 lbq_desc->p.pg_chunk.last_flag = 0;
1168         }
1169         return 0;
1170 }
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173 {
1174         u32 clean_idx = rx_ring->lbq_clean_idx;
1175         u32 start_idx = clean_idx;
1176         struct bq_desc *lbq_desc;
1177         u64 map;
1178         int i;
1179
1180         while (rx_ring->lbq_free_cnt > 32) {
1181                 for (i = 0; i < 16; i++) {
1182                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                                      "lbq: try cleaning clean_idx = %d.\n",
1184                                      clean_idx);
1185                         lbq_desc = &rx_ring->lbq[clean_idx];
1186                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187                                 netif_err(qdev, ifup, qdev->ndev,
1188                                           "Could not get a page chunk.\n");
1189                                 return;
1190                         }
1191
1192                         map = lbq_desc->p.pg_chunk.map +
1193                                 lbq_desc->p.pg_chunk.offset;
1194                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195                         dma_unmap_len_set(lbq_desc, maplen,
1196                                         rx_ring->lbq_buf_size);
1197                                 *lbq_desc->addr = cpu_to_le64(map);
1198
1199                         pci_dma_sync_single_for_device(qdev->pdev, map,
1200                                                 rx_ring->lbq_buf_size,
1201                                                 PCI_DMA_FROMDEVICE);
1202                         clean_idx++;
1203                         if (clean_idx == rx_ring->lbq_len)
1204                                 clean_idx = 0;
1205                 }
1206
1207                 rx_ring->lbq_clean_idx = clean_idx;
1208                 rx_ring->lbq_prod_idx += 16;
1209                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210                         rx_ring->lbq_prod_idx = 0;
1211                 rx_ring->lbq_free_cnt -= 16;
1212         }
1213
1214         if (start_idx != clean_idx) {
1215                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216                              "lbq: updating prod idx = %d.\n",
1217                              rx_ring->lbq_prod_idx);
1218                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219                                 rx_ring->lbq_prod_idx_db_reg);
1220         }
1221 }
1222
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225 {
1226         u32 clean_idx = rx_ring->sbq_clean_idx;
1227         u32 start_idx = clean_idx;
1228         struct bq_desc *sbq_desc;
1229         u64 map;
1230         int i;
1231
1232         while (rx_ring->sbq_free_cnt > 16) {
1233                 for (i = 0; i < 16; i++) {
1234                         sbq_desc = &rx_ring->sbq[clean_idx];
1235                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236                                      "sbq: try cleaning clean_idx = %d.\n",
1237                                      clean_idx);
1238                         if (sbq_desc->p.skb == NULL) {
1239                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1240                                              qdev->ndev,
1241                                              "sbq: getting new skb for index %d.\n",
1242                                              sbq_desc->index);
1243                                 sbq_desc->p.skb =
1244                                     netdev_alloc_skb(qdev->ndev,
1245                                                      SMALL_BUFFER_SIZE);
1246                                 if (sbq_desc->p.skb == NULL) {
1247                                         netif_err(qdev, probe, qdev->ndev,
1248                                                   "Couldn't get an skb.\n");
1249                                         rx_ring->sbq_clean_idx = clean_idx;
1250                                         return;
1251                                 }
1252                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253                                 map = pci_map_single(qdev->pdev,
1254                                                      sbq_desc->p.skb->data,
1255                                                      rx_ring->sbq_buf_size,
1256                                                      PCI_DMA_FROMDEVICE);
1257                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258                                         netif_err(qdev, ifup, qdev->ndev,
1259                                                   "PCI mapping failed.\n");
1260                                         rx_ring->sbq_clean_idx = clean_idx;
1261                                         dev_kfree_skb_any(sbq_desc->p.skb);
1262                                         sbq_desc->p.skb = NULL;
1263                                         return;
1264                                 }
1265                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266                                 dma_unmap_len_set(sbq_desc, maplen,
1267                                                   rx_ring->sbq_buf_size);
1268                                 *sbq_desc->addr = cpu_to_le64(map);
1269                         }
1270
1271                         clean_idx++;
1272                         if (clean_idx == rx_ring->sbq_len)
1273                                 clean_idx = 0;
1274                 }
1275                 rx_ring->sbq_clean_idx = clean_idx;
1276                 rx_ring->sbq_prod_idx += 16;
1277                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278                         rx_ring->sbq_prod_idx = 0;
1279                 rx_ring->sbq_free_cnt -= 16;
1280         }
1281
1282         if (start_idx != clean_idx) {
1283                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284                              "sbq: updating prod idx = %d.\n",
1285                              rx_ring->sbq_prod_idx);
1286                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287                                 rx_ring->sbq_prod_idx_db_reg);
1288         }
1289 }
1290
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292                                     struct rx_ring *rx_ring)
1293 {
1294         ql_update_sbq(qdev, rx_ring);
1295         ql_update_lbq(qdev, rx_ring);
1296 }
1297
1298 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1299  * fails at some stage, or from the interrupt when a tx completes.
1300  */
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302                           struct tx_ring_desc *tx_ring_desc, int mapped)
1303 {
1304         int i;
1305         for (i = 0; i < mapped; i++) {
1306                 if (i == 0 || (i == 7 && mapped > 7)) {
1307                         /*
1308                          * Unmap the skb->data area, or the
1309                          * external sglist (AKA the Outbound
1310                          * Address List (OAL)).
1311                          * If its the zeroeth element, then it's
1312                          * the skb->data area.  If it's the 7th
1313                          * element and there is more than 6 frags,
1314                          * then its an OAL.
1315                          */
1316                         if (i == 7) {
1317                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1318                                              qdev->ndev,
1319                                              "unmapping OAL area.\n");
1320                         }
1321                         pci_unmap_single(qdev->pdev,
1322                                          dma_unmap_addr(&tx_ring_desc->map[i],
1323                                                         mapaddr),
1324                                          dma_unmap_len(&tx_ring_desc->map[i],
1325                                                        maplen),
1326                                          PCI_DMA_TODEVICE);
1327                 } else {
1328                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329                                      "unmapping frag %d.\n", i);
1330                         pci_unmap_page(qdev->pdev,
1331                                        dma_unmap_addr(&tx_ring_desc->map[i],
1332                                                       mapaddr),
1333                                        dma_unmap_len(&tx_ring_desc->map[i],
1334                                                      maplen), PCI_DMA_TODEVICE);
1335                 }
1336         }
1337
1338 }
1339
1340 /* Map the buffers for this transmit.  This will return
1341  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342  */
1343 static int ql_map_send(struct ql_adapter *qdev,
1344                        struct ob_mac_iocb_req *mac_iocb_ptr,
1345                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346 {
1347         int len = skb_headlen(skb);
1348         dma_addr_t map;
1349         int frag_idx, err, map_idx = 0;
1350         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351         int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353         if (frag_cnt) {
1354                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355                              "frag_cnt = %d.\n", frag_cnt);
1356         }
1357         /*
1358          * Map the skb buffer first.
1359          */
1360         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362         err = pci_dma_mapping_error(qdev->pdev, map);
1363         if (err) {
1364                 netif_err(qdev, tx_queued, qdev->ndev,
1365                           "PCI mapping failed with error: %d\n", err);
1366
1367                 return NETDEV_TX_BUSY;
1368         }
1369
1370         tbd->len = cpu_to_le32(len);
1371         tbd->addr = cpu_to_le64(map);
1372         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374         map_idx++;
1375
1376         /*
1377          * This loop fills the remainder of the 8 address descriptors
1378          * in the IOCB.  If there are more than 7 fragments, then the
1379          * eighth address desc will point to an external list (OAL).
1380          * When this happens, the remainder of the frags will be stored
1381          * in this list.
1382          */
1383         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385                 tbd++;
1386                 if (frag_idx == 6 && frag_cnt > 7) {
1387                         /* Let's tack on an sglist.
1388                          * Our control block will now
1389                          * look like this:
1390                          * iocb->seg[0] = skb->data
1391                          * iocb->seg[1] = frag[0]
1392                          * iocb->seg[2] = frag[1]
1393                          * iocb->seg[3] = frag[2]
1394                          * iocb->seg[4] = frag[3]
1395                          * iocb->seg[5] = frag[4]
1396                          * iocb->seg[6] = frag[5]
1397                          * iocb->seg[7] = ptr to OAL (external sglist)
1398                          * oal->seg[0] = frag[6]
1399                          * oal->seg[1] = frag[7]
1400                          * oal->seg[2] = frag[8]
1401                          * oal->seg[3] = frag[9]
1402                          * oal->seg[4] = frag[10]
1403                          *      etc...
1404                          */
1405                         /* Tack on the OAL in the eighth segment of IOCB. */
1406                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407                                              sizeof(struct oal),
1408                                              PCI_DMA_TODEVICE);
1409                         err = pci_dma_mapping_error(qdev->pdev, map);
1410                         if (err) {
1411                                 netif_err(qdev, tx_queued, qdev->ndev,
1412                                           "PCI mapping outbound address list with error: %d\n",
1413                                           err);
1414                                 goto map_error;
1415                         }
1416
1417                         tbd->addr = cpu_to_le64(map);
1418                         /*
1419                          * The length is the number of fragments
1420                          * that remain to be mapped times the length
1421                          * of our sglist (OAL).
1422                          */
1423                         tbd->len =
1424                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1425                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1426                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427                                            map);
1428                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429                                           sizeof(struct oal));
1430                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431                         map_idx++;
1432                 }
1433
1434                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1435                                        DMA_TO_DEVICE);
1436
1437                 err = dma_mapping_error(&qdev->pdev->dev, map);
1438                 if (err) {
1439                         netif_err(qdev, tx_queued, qdev->ndev,
1440                                   "PCI mapping frags failed with error: %d.\n",
1441                                   err);
1442                         goto map_error;
1443                 }
1444
1445                 tbd->addr = cpu_to_le64(map);
1446                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449                                   skb_frag_size(frag));
1450
1451         }
1452         /* Save the number of segments we've mapped. */
1453         tx_ring_desc->map_cnt = map_idx;
1454         /* Terminate the last segment. */
1455         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456         return NETDEV_TX_OK;
1457
1458 map_error:
1459         /*
1460          * If the first frag mapping failed, then i will be zero.
1461          * This causes the unmap of the skb->data area.  Otherwise
1462          * we pass in the number of frags that mapped successfully
1463          * so they can be umapped.
1464          */
1465         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466         return NETDEV_TX_BUSY;
1467 }
1468
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471                                         struct rx_ring *rx_ring,
1472                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                         u32 length,
1474                                         u16 vlan_id)
1475 {
1476         struct sk_buff *skb;
1477         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478         struct napi_struct *napi = &rx_ring->napi;
1479
1480         napi->dev = qdev->ndev;
1481
1482         skb = napi_get_frags(napi);
1483         if (!skb) {
1484                 netif_err(qdev, drv, qdev->ndev,
1485                           "Couldn't get an skb, exiting.\n");
1486                 rx_ring->rx_dropped++;
1487                 put_page(lbq_desc->p.pg_chunk.page);
1488                 return;
1489         }
1490         prefetch(lbq_desc->p.pg_chunk.va);
1491         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492                              lbq_desc->p.pg_chunk.page,
1493                              lbq_desc->p.pg_chunk.offset,
1494                              length);
1495
1496         skb->len += length;
1497         skb->data_len += length;
1498         skb->truesize += length;
1499         skb_shinfo(skb)->nr_frags++;
1500
1501         rx_ring->rx_packets++;
1502         rx_ring->rx_bytes += length;
1503         skb->ip_summed = CHECKSUM_UNNECESSARY;
1504         skb_record_rx_queue(skb, rx_ring->cq_id);
1505         if (vlan_id != 0xffff)
1506                 __vlan_hwaccel_put_tag(skb, vlan_id);
1507         napi_gro_frags(napi);
1508 }
1509
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512                                         struct rx_ring *rx_ring,
1513                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1514                                         u32 length,
1515                                         u16 vlan_id)
1516 {
1517         struct net_device *ndev = qdev->ndev;
1518         struct sk_buff *skb = NULL;
1519         void *addr;
1520         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521         struct napi_struct *napi = &rx_ring->napi;
1522
1523         skb = netdev_alloc_skb(ndev, length);
1524         if (!skb) {
1525                 netif_err(qdev, drv, qdev->ndev,
1526                           "Couldn't get an skb, need to unwind!.\n");
1527                 rx_ring->rx_dropped++;
1528                 put_page(lbq_desc->p.pg_chunk.page);
1529                 return;
1530         }
1531
1532         addr = lbq_desc->p.pg_chunk.va;
1533         prefetch(addr);
1534
1535
1536         /* Frame error, so drop the packet. */
1537         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538                 netif_info(qdev, drv, qdev->ndev,
1539                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540                 rx_ring->rx_errors++;
1541                 goto err_out;
1542         }
1543
1544         /* The max framesize filter on this chip is set higher than
1545          * MTU since FCoE uses 2k frames.
1546          */
1547         if (skb->len > ndev->mtu + ETH_HLEN) {
1548                 netif_err(qdev, drv, qdev->ndev,
1549                           "Segment too small, dropping.\n");
1550                 rx_ring->rx_dropped++;
1551                 goto err_out;
1552         }
1553         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556                      length);
1557         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559                                 length-ETH_HLEN);
1560         skb->len += length-ETH_HLEN;
1561         skb->data_len += length-ETH_HLEN;
1562         skb->truesize += length-ETH_HLEN;
1563
1564         rx_ring->rx_packets++;
1565         rx_ring->rx_bytes += skb->len;
1566         skb->protocol = eth_type_trans(skb, ndev);
1567         skb_checksum_none_assert(skb);
1568
1569         if ((ndev->features & NETIF_F_RXCSUM) &&
1570                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571                 /* TCP frame. */
1572                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574                                      "TCP checksum done!\n");
1575                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1576                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578                         /* Unfragmented ipv4 UDP frame. */
1579                         struct iphdr *iph = (struct iphdr *) skb->data;
1580                         if (!(iph->frag_off &
1581                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1584                                              qdev->ndev,
1585                                              "TCP checksum done!\n");
1586                         }
1587                 }
1588         }
1589
1590         skb_record_rx_queue(skb, rx_ring->cq_id);
1591         if (vlan_id != 0xffff)
1592                 __vlan_hwaccel_put_tag(skb, vlan_id);
1593         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594                 napi_gro_receive(napi, skb);
1595         else
1596                 netif_receive_skb(skb);
1597         return;
1598 err_out:
1599         dev_kfree_skb_any(skb);
1600         put_page(lbq_desc->p.pg_chunk.page);
1601 }
1602
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605                                         struct rx_ring *rx_ring,
1606                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1607                                         u32 length,
1608                                         u16 vlan_id)
1609 {
1610         struct net_device *ndev = qdev->ndev;
1611         struct sk_buff *skb = NULL;
1612         struct sk_buff *new_skb = NULL;
1613         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615         skb = sbq_desc->p.skb;
1616         /* Allocate new_skb and copy */
1617         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618         if (new_skb == NULL) {
1619                 netif_err(qdev, probe, qdev->ndev,
1620                           "No skb available, drop the packet.\n");
1621                 rx_ring->rx_dropped++;
1622                 return;
1623         }
1624         skb_reserve(new_skb, NET_IP_ALIGN);
1625
1626         pci_dma_sync_single_for_cpu(qdev->pdev,
1627                                     dma_unmap_addr(sbq_desc, mapaddr),
1628                                     dma_unmap_len(sbq_desc, maplen),
1629                                     PCI_DMA_FROMDEVICE);
1630
1631         memcpy(skb_put(new_skb, length), skb->data, length);
1632
1633         pci_dma_sync_single_for_device(qdev->pdev,
1634                                        dma_unmap_addr(sbq_desc, mapaddr),
1635                                        dma_unmap_len(sbq_desc, maplen),
1636                                        PCI_DMA_FROMDEVICE);
1637         skb = new_skb;
1638
1639         /* Frame error, so drop the packet. */
1640         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1641                 netif_info(qdev, drv, qdev->ndev,
1642                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1643                 dev_kfree_skb_any(skb);
1644                 rx_ring->rx_errors++;
1645                 return;
1646         }
1647
1648         /* loopback self test for ethtool */
1649         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1650                 ql_check_lb_frame(qdev, skb);
1651                 dev_kfree_skb_any(skb);
1652                 return;
1653         }
1654
1655         /* The max framesize filter on this chip is set higher than
1656          * MTU since FCoE uses 2k frames.
1657          */
1658         if (skb->len > ndev->mtu + ETH_HLEN) {
1659                 dev_kfree_skb_any(skb);
1660                 rx_ring->rx_dropped++;
1661                 return;
1662         }
1663
1664         prefetch(skb->data);
1665         skb->dev = ndev;
1666         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1667                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668                              "%s Multicast.\n",
1669                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1671                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1672                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1673                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1674                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1675         }
1676         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1677                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1678                              "Promiscuous Packet.\n");
1679
1680         rx_ring->rx_packets++;
1681         rx_ring->rx_bytes += skb->len;
1682         skb->protocol = eth_type_trans(skb, ndev);
1683         skb_checksum_none_assert(skb);
1684
1685         /* If rx checksum is on, and there are no
1686          * csum or frame errors.
1687          */
1688         if ((ndev->features & NETIF_F_RXCSUM) &&
1689                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1690                 /* TCP frame. */
1691                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1692                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1693                                      "TCP checksum done!\n");
1694                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1695                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1696                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1697                         /* Unfragmented ipv4 UDP frame. */
1698                         struct iphdr *iph = (struct iphdr *) skb->data;
1699                         if (!(iph->frag_off &
1700                                 ntohs(IP_MF|IP_OFFSET))) {
1701                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1702                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1703                                              qdev->ndev,
1704                                              "TCP checksum done!\n");
1705                         }
1706                 }
1707         }
1708
1709         skb_record_rx_queue(skb, rx_ring->cq_id);
1710         if (vlan_id != 0xffff)
1711                 __vlan_hwaccel_put_tag(skb, vlan_id);
1712         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1713                 napi_gro_receive(&rx_ring->napi, skb);
1714         else
1715                 netif_receive_skb(skb);
1716 }
1717
1718 static void ql_realign_skb(struct sk_buff *skb, int len)
1719 {
1720         void *temp_addr = skb->data;
1721
1722         /* Undo the skb_reserve(skb,32) we did before
1723          * giving to hardware, and realign data on
1724          * a 2-byte boundary.
1725          */
1726         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1727         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1728         skb_copy_to_linear_data(skb, temp_addr,
1729                 (unsigned int)len);
1730 }
1731
1732 /*
1733  * This function builds an skb for the given inbound
1734  * completion.  It will be rewritten for readability in the near
1735  * future, but for not it works well.
1736  */
1737 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1738                                        struct rx_ring *rx_ring,
1739                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1740 {
1741         struct bq_desc *lbq_desc;
1742         struct bq_desc *sbq_desc;
1743         struct sk_buff *skb = NULL;
1744         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1745        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1746
1747         /*
1748          * Handle the header buffer if present.
1749          */
1750         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1751             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1752                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1753                              "Header of %d bytes in small buffer.\n", hdr_len);
1754                 /*
1755                  * Headers fit nicely into a small buffer.
1756                  */
1757                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758                 pci_unmap_single(qdev->pdev,
1759                                 dma_unmap_addr(sbq_desc, mapaddr),
1760                                 dma_unmap_len(sbq_desc, maplen),
1761                                 PCI_DMA_FROMDEVICE);
1762                 skb = sbq_desc->p.skb;
1763                 ql_realign_skb(skb, hdr_len);
1764                 skb_put(skb, hdr_len);
1765                 sbq_desc->p.skb = NULL;
1766         }
1767
1768         /*
1769          * Handle the data buffer(s).
1770          */
1771         if (unlikely(!length)) {        /* Is there data too? */
1772                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1773                              "No Data buffer in this packet.\n");
1774                 return skb;
1775         }
1776
1777         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1778                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1779                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780                                      "Headers in small, data of %d bytes in small, combine them.\n",
1781                                      length);
1782                         /*
1783                          * Data is less than small buffer size so it's
1784                          * stuffed in a small buffer.
1785                          * For this case we append the data
1786                          * from the "data" small buffer to the "header" small
1787                          * buffer.
1788                          */
1789                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1790                         pci_dma_sync_single_for_cpu(qdev->pdev,
1791                                                     dma_unmap_addr
1792                                                     (sbq_desc, mapaddr),
1793                                                     dma_unmap_len
1794                                                     (sbq_desc, maplen),
1795                                                     PCI_DMA_FROMDEVICE);
1796                         memcpy(skb_put(skb, length),
1797                                sbq_desc->p.skb->data, length);
1798                         pci_dma_sync_single_for_device(qdev->pdev,
1799                                                        dma_unmap_addr
1800                                                        (sbq_desc,
1801                                                         mapaddr),
1802                                                        dma_unmap_len
1803                                                        (sbq_desc,
1804                                                         maplen),
1805                                                        PCI_DMA_FROMDEVICE);
1806                 } else {
1807                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1808                                      "%d bytes in a single small buffer.\n",
1809                                      length);
1810                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1811                         skb = sbq_desc->p.skb;
1812                         ql_realign_skb(skb, length);
1813                         skb_put(skb, length);
1814                         pci_unmap_single(qdev->pdev,
1815                                          dma_unmap_addr(sbq_desc,
1816                                                         mapaddr),
1817                                          dma_unmap_len(sbq_desc,
1818                                                        maplen),
1819                                          PCI_DMA_FROMDEVICE);
1820                         sbq_desc->p.skb = NULL;
1821                 }
1822         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1823                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1824                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1825                                      "Header in small, %d bytes in large. Chain large to small!\n",
1826                                      length);
1827                         /*
1828                          * The data is in a single large buffer.  We
1829                          * chain it to the header buffer's skb and let
1830                          * it rip.
1831                          */
1832                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1833                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1834                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1835                                      lbq_desc->p.pg_chunk.offset, length);
1836                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1837                                                 lbq_desc->p.pg_chunk.offset,
1838                                                 length);
1839                         skb->len += length;
1840                         skb->data_len += length;
1841                         skb->truesize += length;
1842                 } else {
1843                         /*
1844                          * The headers and data are in a single large buffer. We
1845                          * copy it to a new skb and let it go. This can happen with
1846                          * jumbo mtu on a non-TCP/UDP frame.
1847                          */
1848                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1849                         skb = netdev_alloc_skb(qdev->ndev, length);
1850                         if (skb == NULL) {
1851                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1852                                              "No skb available, drop the packet.\n");
1853                                 return NULL;
1854                         }
1855                         pci_unmap_page(qdev->pdev,
1856                                        dma_unmap_addr(lbq_desc,
1857                                                       mapaddr),
1858                                        dma_unmap_len(lbq_desc, maplen),
1859                                        PCI_DMA_FROMDEVICE);
1860                         skb_reserve(skb, NET_IP_ALIGN);
1861                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1862                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1863                                      length);
1864                         skb_fill_page_desc(skb, 0,
1865                                                 lbq_desc->p.pg_chunk.page,
1866                                                 lbq_desc->p.pg_chunk.offset,
1867                                                 length);
1868                         skb->len += length;
1869                         skb->data_len += length;
1870                         skb->truesize += length;
1871                         length -= length;
1872                         __pskb_pull_tail(skb,
1873                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1874                                 VLAN_ETH_HLEN : ETH_HLEN);
1875                 }
1876         } else {
1877                 /*
1878                  * The data is in a chain of large buffers
1879                  * pointed to by a small buffer.  We loop
1880                  * thru and chain them to the our small header
1881                  * buffer's skb.
1882                  * frags:  There are 18 max frags and our small
1883                  *         buffer will hold 32 of them. The thing is,
1884                  *         we'll use 3 max for our 9000 byte jumbo
1885                  *         frames.  If the MTU goes up we could
1886                  *          eventually be in trouble.
1887                  */
1888                 int size, i = 0;
1889                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1890                 pci_unmap_single(qdev->pdev,
1891                                  dma_unmap_addr(sbq_desc, mapaddr),
1892                                  dma_unmap_len(sbq_desc, maplen),
1893                                  PCI_DMA_FROMDEVICE);
1894                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1895                         /*
1896                          * This is an non TCP/UDP IP frame, so
1897                          * the headers aren't split into a small
1898                          * buffer.  We have to use the small buffer
1899                          * that contains our sg list as our skb to
1900                          * send upstairs. Copy the sg list here to
1901                          * a local buffer and use it to find the
1902                          * pages to chain.
1903                          */
1904                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905                                      "%d bytes of headers & data in chain of large.\n",
1906                                      length);
1907                         skb = sbq_desc->p.skb;
1908                         sbq_desc->p.skb = NULL;
1909                         skb_reserve(skb, NET_IP_ALIGN);
1910                 }
1911                 while (length > 0) {
1912                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1913                         size = (length < rx_ring->lbq_buf_size) ? length :
1914                                 rx_ring->lbq_buf_size;
1915
1916                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917                                      "Adding page %d to skb for %d bytes.\n",
1918                                      i, size);
1919                         skb_fill_page_desc(skb, i,
1920                                                 lbq_desc->p.pg_chunk.page,
1921                                                 lbq_desc->p.pg_chunk.offset,
1922                                                 size);
1923                         skb->len += size;
1924                         skb->data_len += size;
1925                         skb->truesize += size;
1926                         length -= size;
1927                         i++;
1928                 }
1929                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1930                                 VLAN_ETH_HLEN : ETH_HLEN);
1931         }
1932         return skb;
1933 }
1934
1935 /* Process an inbound completion from an rx ring. */
1936 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1937                                    struct rx_ring *rx_ring,
1938                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1939                                    u16 vlan_id)
1940 {
1941         struct net_device *ndev = qdev->ndev;
1942         struct sk_buff *skb = NULL;
1943
1944         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1945
1946         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1947         if (unlikely(!skb)) {
1948                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1949                              "No skb available, drop packet.\n");
1950                 rx_ring->rx_dropped++;
1951                 return;
1952         }
1953
1954         /* Frame error, so drop the packet. */
1955         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1956                 netif_info(qdev, drv, qdev->ndev,
1957                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1958                 dev_kfree_skb_any(skb);
1959                 rx_ring->rx_errors++;
1960                 return;
1961         }
1962
1963         /* The max framesize filter on this chip is set higher than
1964          * MTU since FCoE uses 2k frames.
1965          */
1966         if (skb->len > ndev->mtu + ETH_HLEN) {
1967                 dev_kfree_skb_any(skb);
1968                 rx_ring->rx_dropped++;
1969                 return;
1970         }
1971
1972         /* loopback self test for ethtool */
1973         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1974                 ql_check_lb_frame(qdev, skb);
1975                 dev_kfree_skb_any(skb);
1976                 return;
1977         }
1978
1979         prefetch(skb->data);
1980         skb->dev = ndev;
1981         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1982                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1983                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1984                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1985                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1987                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1989                 rx_ring->rx_multicast++;
1990         }
1991         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1992                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993                              "Promiscuous Packet.\n");
1994         }
1995
1996         skb->protocol = eth_type_trans(skb, ndev);
1997         skb_checksum_none_assert(skb);
1998
1999         /* If rx checksum is on, and there are no
2000          * csum or frame errors.
2001          */
2002         if ((ndev->features & NETIF_F_RXCSUM) &&
2003                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2004                 /* TCP frame. */
2005                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2006                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2007                                      "TCP checksum done!\n");
2008                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2009                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2010                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2011                 /* Unfragmented ipv4 UDP frame. */
2012                         struct iphdr *iph = (struct iphdr *) skb->data;
2013                         if (!(iph->frag_off &
2014                                 ntohs(IP_MF|IP_OFFSET))) {
2015                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2017                                              "TCP checksum done!\n");
2018                         }
2019                 }
2020         }
2021
2022         rx_ring->rx_packets++;
2023         rx_ring->rx_bytes += skb->len;
2024         skb_record_rx_queue(skb, rx_ring->cq_id);
2025         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2026                 __vlan_hwaccel_put_tag(skb, vlan_id);
2027         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2028                 napi_gro_receive(&rx_ring->napi, skb);
2029         else
2030                 netif_receive_skb(skb);
2031 }
2032
2033 /* Process an inbound completion from an rx ring. */
2034 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2035                                         struct rx_ring *rx_ring,
2036                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2037 {
2038         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2039         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2040                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2041                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2042
2043         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2044
2045         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2046                 /* The data and headers are split into
2047                  * separate buffers.
2048                  */
2049                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2050                                                 vlan_id);
2051         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2052                 /* The data fit in a single small buffer.
2053                  * Allocate a new skb, copy the data and
2054                  * return the buffer to the free pool.
2055                  */
2056                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2057                                                 length, vlan_id);
2058         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2059                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2060                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2061                 /* TCP packet in a page chunk that's been checksummed.
2062                  * Tack it on to our GRO skb and let it go.
2063                  */
2064                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2065                                                 length, vlan_id);
2066         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2067                 /* Non-TCP packet in a page chunk. Allocate an
2068                  * skb, tack it on frags, and send it up.
2069                  */
2070                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2071                                                 length, vlan_id);
2072         } else {
2073                 /* Non-TCP/UDP large frames that span multiple buffers
2074                  * can be processed corrrectly by the split frame logic.
2075                  */
2076                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2077                                                 vlan_id);
2078         }
2079
2080         return (unsigned long)length;
2081 }
2082
2083 /* Process an outbound completion from an rx ring. */
2084 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2085                                    struct ob_mac_iocb_rsp *mac_rsp)
2086 {
2087         struct tx_ring *tx_ring;
2088         struct tx_ring_desc *tx_ring_desc;
2089
2090         QL_DUMP_OB_MAC_RSP(mac_rsp);
2091         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2092         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2093         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2094         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2095         tx_ring->tx_packets++;
2096         dev_kfree_skb(tx_ring_desc->skb);
2097         tx_ring_desc->skb = NULL;
2098
2099         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2100                                         OB_MAC_IOCB_RSP_S |
2101                                         OB_MAC_IOCB_RSP_L |
2102                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2103                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2104                         netif_warn(qdev, tx_done, qdev->ndev,
2105                                    "Total descriptor length did not match transfer length.\n");
2106                 }
2107                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2108                         netif_warn(qdev, tx_done, qdev->ndev,
2109                                    "Frame too short to be valid, not sent.\n");
2110                 }
2111                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2112                         netif_warn(qdev, tx_done, qdev->ndev,
2113                                    "Frame too long, but sent anyway.\n");
2114                 }
2115                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2116                         netif_warn(qdev, tx_done, qdev->ndev,
2117                                    "PCI backplane error. Frame not sent.\n");
2118                 }
2119         }
2120         atomic_inc(&tx_ring->tx_count);
2121 }
2122
2123 /* Fire up a handler to reset the MPI processor. */
2124 void ql_queue_fw_error(struct ql_adapter *qdev)
2125 {
2126         ql_link_off(qdev);
2127         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2128 }
2129
2130 void ql_queue_asic_error(struct ql_adapter *qdev)
2131 {
2132         ql_link_off(qdev);
2133         ql_disable_interrupts(qdev);
2134         /* Clear adapter up bit to signal the recovery
2135          * process that it shouldn't kill the reset worker
2136          * thread
2137          */
2138         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2139         /* Set asic recovery bit to indicate reset process that we are
2140          * in fatal error recovery process rather than normal close
2141          */
2142         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2143         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2144 }
2145
2146 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2147                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2148 {
2149         switch (ib_ae_rsp->event) {
2150         case MGMT_ERR_EVENT:
2151                 netif_err(qdev, rx_err, qdev->ndev,
2152                           "Management Processor Fatal Error.\n");
2153                 ql_queue_fw_error(qdev);
2154                 return;
2155
2156         case CAM_LOOKUP_ERR_EVENT:
2157                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2158                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2159                 ql_queue_asic_error(qdev);
2160                 return;
2161
2162         case SOFT_ECC_ERROR_EVENT:
2163                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2164                 ql_queue_asic_error(qdev);
2165                 break;
2166
2167         case PCI_ERR_ANON_BUF_RD:
2168                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2169                                         "anonymous buffers from rx_ring %d.\n",
2170                                         ib_ae_rsp->q_id);
2171                 ql_queue_asic_error(qdev);
2172                 break;
2173
2174         default:
2175                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2176                           ib_ae_rsp->event);
2177                 ql_queue_asic_error(qdev);
2178                 break;
2179         }
2180 }
2181
2182 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2183 {
2184         struct ql_adapter *qdev = rx_ring->qdev;
2185         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2186         struct ob_mac_iocb_rsp *net_rsp = NULL;
2187         int count = 0;
2188
2189         struct tx_ring *tx_ring;
2190         /* While there are entries in the completion queue. */
2191         while (prod != rx_ring->cnsmr_idx) {
2192
2193                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2194                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2195                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2196
2197                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2198                 rmb();
2199                 switch (net_rsp->opcode) {
2200
2201                 case OPCODE_OB_MAC_TSO_IOCB:
2202                 case OPCODE_OB_MAC_IOCB:
2203                         ql_process_mac_tx_intr(qdev, net_rsp);
2204                         break;
2205                 default:
2206                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2208                                      net_rsp->opcode);
2209                 }
2210                 count++;
2211                 ql_update_cq(rx_ring);
2212                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2213         }
2214         if (!net_rsp)
2215                 return 0;
2216         ql_write_cq_idx(rx_ring);
2217         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2218         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2219                 if (atomic_read(&tx_ring->queue_stopped) &&
2220                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2221                         /*
2222                          * The queue got stopped because the tx_ring was full.
2223                          * Wake it up, because it's now at least 25% empty.
2224                          */
2225                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2226         }
2227
2228         return count;
2229 }
2230
2231 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2232 {
2233         struct ql_adapter *qdev = rx_ring->qdev;
2234         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2235         struct ql_net_rsp_iocb *net_rsp;
2236         int count = 0;
2237
2238         /* While there are entries in the completion queue. */
2239         while (prod != rx_ring->cnsmr_idx) {
2240
2241                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2242                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2243                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2244
2245                 net_rsp = rx_ring->curr_entry;
2246                 rmb();
2247                 switch (net_rsp->opcode) {
2248                 case OPCODE_IB_MAC_IOCB:
2249                         ql_process_mac_rx_intr(qdev, rx_ring,
2250                                                (struct ib_mac_iocb_rsp *)
2251                                                net_rsp);
2252                         break;
2253
2254                 case OPCODE_IB_AE_IOCB:
2255                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2256                                                 net_rsp);
2257                         break;
2258                 default:
2259                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2260                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2261                                      net_rsp->opcode);
2262                         break;
2263                 }
2264                 count++;
2265                 ql_update_cq(rx_ring);
2266                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2267                 if (count == budget)
2268                         break;
2269         }
2270         ql_update_buffer_queues(qdev, rx_ring);
2271         ql_write_cq_idx(rx_ring);
2272         return count;
2273 }
2274
2275 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2276 {
2277         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2278         struct ql_adapter *qdev = rx_ring->qdev;
2279         struct rx_ring *trx_ring;
2280         int i, work_done = 0;
2281         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2282
2283         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2284                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2285
2286         /* Service the TX rings first.  They start
2287          * right after the RSS rings. */
2288         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2289                 trx_ring = &qdev->rx_ring[i];
2290                 /* If this TX completion ring belongs to this vector and
2291                  * it's not empty then service it.
2292                  */
2293                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2294                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2295                                         trx_ring->cnsmr_idx)) {
2296                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2297                                      "%s: Servicing TX completion ring %d.\n",
2298                                      __func__, trx_ring->cq_id);
2299                         ql_clean_outbound_rx_ring(trx_ring);
2300                 }
2301         }
2302
2303         /*
2304          * Now service the RSS ring if it's active.
2305          */
2306         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2307                                         rx_ring->cnsmr_idx) {
2308                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309                              "%s: Servicing RX completion ring %d.\n",
2310                              __func__, rx_ring->cq_id);
2311                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2312         }
2313
2314         if (work_done < budget) {
2315                 napi_complete(napi);
2316                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2317         }
2318         return work_done;
2319 }
2320
2321 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2322 {
2323         struct ql_adapter *qdev = netdev_priv(ndev);
2324
2325         if (features & NETIF_F_HW_VLAN_RX) {
2326                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2327                              "Turning on VLAN in NIC_RCV_CFG.\n");
2328                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2329                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2330         } else {
2331                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2332                              "Turning off VLAN in NIC_RCV_CFG.\n");
2333                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2334         }
2335 }
2336
2337 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2338 {
2339         /*
2340          * Since there is no support for separate rx/tx vlan accel
2341          * enable/disable make sure tx flag is always in same state as rx.
2342          */
2343         if (features & NETIF_F_HW_VLAN_RX)
2344                 features |= NETIF_F_HW_VLAN_TX;
2345         else
2346                 features &= ~NETIF_F_HW_VLAN_TX;
2347
2348         return features;
2349 }
2350
2351 static int qlge_set_features(struct net_device *ndev, u32 features)
2352 {
2353         u32 changed = ndev->features ^ features;
2354
2355         if (changed & NETIF_F_HW_VLAN_RX)
2356                 qlge_vlan_mode(ndev, features);
2357
2358         return 0;
2359 }
2360
2361 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2362 {
2363         u32 enable_bit = MAC_ADDR_E;
2364
2365         if (ql_set_mac_addr_reg
2366             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2367                 netif_err(qdev, ifup, qdev->ndev,
2368                           "Failed to init vlan address.\n");
2369         }
2370 }
2371
2372 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2373 {
2374         struct ql_adapter *qdev = netdev_priv(ndev);
2375         int status;
2376
2377         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2378         if (status)
2379                 return;
2380
2381         __qlge_vlan_rx_add_vid(qdev, vid);
2382         set_bit(vid, qdev->active_vlans);
2383
2384         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2385 }
2386
2387 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2388 {
2389         u32 enable_bit = 0;
2390
2391         if (ql_set_mac_addr_reg
2392             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2393                 netif_err(qdev, ifup, qdev->ndev,
2394                           "Failed to clear vlan address.\n");
2395         }
2396 }
2397
2398 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2399 {
2400         struct ql_adapter *qdev = netdev_priv(ndev);
2401         int status;
2402
2403         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2404         if (status)
2405                 return;
2406
2407         __qlge_vlan_rx_kill_vid(qdev, vid);
2408         clear_bit(vid, qdev->active_vlans);
2409
2410         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2411 }
2412
2413 static void qlge_restore_vlan(struct ql_adapter *qdev)
2414 {
2415         int status;
2416         u16 vid;
2417
2418         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2419         if (status)
2420                 return;
2421
2422         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2423                 __qlge_vlan_rx_add_vid(qdev, vid);
2424
2425         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2426 }
2427
2428 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2429 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2430 {
2431         struct rx_ring *rx_ring = dev_id;
2432         napi_schedule(&rx_ring->napi);
2433         return IRQ_HANDLED;
2434 }
2435
2436 /* This handles a fatal error, MPI activity, and the default
2437  * rx_ring in an MSI-X multiple vector environment.
2438  * In MSI/Legacy environment it also process the rest of
2439  * the rx_rings.
2440  */
2441 static irqreturn_t qlge_isr(int irq, void *dev_id)
2442 {
2443         struct rx_ring *rx_ring = dev_id;
2444         struct ql_adapter *qdev = rx_ring->qdev;
2445         struct intr_context *intr_context = &qdev->intr_context[0];
2446         u32 var;
2447         int work_done = 0;
2448
2449         spin_lock(&qdev->hw_lock);
2450         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2451                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2452                              "Shared Interrupt, Not ours!\n");
2453                 spin_unlock(&qdev->hw_lock);
2454                 return IRQ_NONE;
2455         }
2456         spin_unlock(&qdev->hw_lock);
2457
2458         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2459
2460         /*
2461          * Check for fatal error.
2462          */
2463         if (var & STS_FE) {
2464                 ql_queue_asic_error(qdev);
2465                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2466                 var = ql_read32(qdev, ERR_STS);
2467                 netdev_err(qdev->ndev, "Resetting chip. "
2468                                         "Error Status Register = 0x%x\n", var);
2469                 return IRQ_HANDLED;
2470         }
2471
2472         /*
2473          * Check MPI processor activity.
2474          */
2475         if ((var & STS_PI) &&
2476                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2477                 /*
2478                  * We've got an async event or mailbox completion.
2479                  * Handle it and clear the source of the interrupt.
2480                  */
2481                 netif_err(qdev, intr, qdev->ndev,
2482                           "Got MPI processor interrupt.\n");
2483                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2484                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2485                 queue_delayed_work_on(smp_processor_id(),
2486                                 qdev->workqueue, &qdev->mpi_work, 0);
2487                 work_done++;
2488         }
2489
2490         /*
2491          * Get the bit-mask that shows the active queues for this
2492          * pass.  Compare it to the queues that this irq services
2493          * and call napi if there's a match.
2494          */
2495         var = ql_read32(qdev, ISR1);
2496         if (var & intr_context->irq_mask) {
2497                 netif_info(qdev, intr, qdev->ndev,
2498                            "Waking handler for rx_ring[0].\n");
2499                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2500                 napi_schedule(&rx_ring->napi);
2501                 work_done++;
2502         }
2503         ql_enable_completion_interrupt(qdev, intr_context->intr);
2504         return work_done ? IRQ_HANDLED : IRQ_NONE;
2505 }
2506
2507 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2508 {
2509
2510         if (skb_is_gso(skb)) {
2511                 int err;
2512                 if (skb_header_cloned(skb)) {
2513                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2514                         if (err)
2515                                 return err;
2516                 }
2517
2518                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2519                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2520                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2521                 mac_iocb_ptr->total_hdrs_len =
2522                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2523                 mac_iocb_ptr->net_trans_offset =
2524                     cpu_to_le16(skb_network_offset(skb) |
2525                                 skb_transport_offset(skb)
2526                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2527                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2528                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2529                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2530                         struct iphdr *iph = ip_hdr(skb);
2531                         iph->check = 0;
2532                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2534                                                                  iph->daddr, 0,
2535                                                                  IPPROTO_TCP,
2536                                                                  0);
2537                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2538                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2539                         tcp_hdr(skb)->check =
2540                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2541                                              &ipv6_hdr(skb)->daddr,
2542                                              0, IPPROTO_TCP, 0);
2543                 }
2544                 return 1;
2545         }
2546         return 0;
2547 }
2548
2549 static void ql_hw_csum_setup(struct sk_buff *skb,
2550                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2551 {
2552         int len;
2553         struct iphdr *iph = ip_hdr(skb);
2554         __sum16 *check;
2555         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2556         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2557         mac_iocb_ptr->net_trans_offset =
2558                 cpu_to_le16(skb_network_offset(skb) |
2559                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2560
2561         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2562         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2563         if (likely(iph->protocol == IPPROTO_TCP)) {
2564                 check = &(tcp_hdr(skb)->check);
2565                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2566                 mac_iocb_ptr->total_hdrs_len =
2567                     cpu_to_le16(skb_transport_offset(skb) +
2568                                 (tcp_hdr(skb)->doff << 2));
2569         } else {
2570                 check = &(udp_hdr(skb)->check);
2571                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2572                 mac_iocb_ptr->total_hdrs_len =
2573                     cpu_to_le16(skb_transport_offset(skb) +
2574                                 sizeof(struct udphdr));
2575         }
2576         *check = ~csum_tcpudp_magic(iph->saddr,
2577                                     iph->daddr, len, iph->protocol, 0);
2578 }
2579
2580 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2581 {
2582         struct tx_ring_desc *tx_ring_desc;
2583         struct ob_mac_iocb_req *mac_iocb_ptr;
2584         struct ql_adapter *qdev = netdev_priv(ndev);
2585         int tso;
2586         struct tx_ring *tx_ring;
2587         u32 tx_ring_idx = (u32) skb->queue_mapping;
2588
2589         tx_ring = &qdev->tx_ring[tx_ring_idx];
2590
2591         if (skb_padto(skb, ETH_ZLEN))
2592                 return NETDEV_TX_OK;
2593
2594         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2595                 netif_info(qdev, tx_queued, qdev->ndev,
2596                            "%s: shutting down tx queue %d du to lack of resources.\n",
2597                            __func__, tx_ring_idx);
2598                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2599                 atomic_inc(&tx_ring->queue_stopped);
2600                 tx_ring->tx_errors++;
2601                 return NETDEV_TX_BUSY;
2602         }
2603         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2604         mac_iocb_ptr = tx_ring_desc->queue_entry;
2605         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2606
2607         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2608         mac_iocb_ptr->tid = tx_ring_desc->index;
2609         /* We use the upper 32-bits to store the tx queue for this IO.
2610          * When we get the completion we can use it to establish the context.
2611          */
2612         mac_iocb_ptr->txq_idx = tx_ring_idx;
2613         tx_ring_desc->skb = skb;
2614
2615         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2616
2617         if (vlan_tx_tag_present(skb)) {
2618                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2619                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2620                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2621                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2622         }
2623         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2624         if (tso < 0) {
2625                 dev_kfree_skb_any(skb);
2626                 return NETDEV_TX_OK;
2627         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2628                 ql_hw_csum_setup(skb,
2629                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2630         }
2631         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2632                         NETDEV_TX_OK) {
2633                 netif_err(qdev, tx_queued, qdev->ndev,
2634                           "Could not map the segments.\n");
2635                 tx_ring->tx_errors++;
2636                 return NETDEV_TX_BUSY;
2637         }
2638         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2639         tx_ring->prod_idx++;
2640         if (tx_ring->prod_idx == tx_ring->wq_len)
2641                 tx_ring->prod_idx = 0;
2642         wmb();
2643
2644         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2645         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2646                      "tx queued, slot %d, len %d\n",
2647                      tx_ring->prod_idx, skb->len);
2648
2649         atomic_dec(&tx_ring->tx_count);
2650         return NETDEV_TX_OK;
2651 }
2652
2653
2654 static void ql_free_shadow_space(struct ql_adapter *qdev)
2655 {
2656         if (qdev->rx_ring_shadow_reg_area) {
2657                 pci_free_consistent(qdev->pdev,
2658                                     PAGE_SIZE,
2659                                     qdev->rx_ring_shadow_reg_area,
2660                                     qdev->rx_ring_shadow_reg_dma);
2661                 qdev->rx_ring_shadow_reg_area = NULL;
2662         }
2663         if (qdev->tx_ring_shadow_reg_area) {
2664                 pci_free_consistent(qdev->pdev,
2665                                     PAGE_SIZE,
2666                                     qdev->tx_ring_shadow_reg_area,
2667                                     qdev->tx_ring_shadow_reg_dma);
2668                 qdev->tx_ring_shadow_reg_area = NULL;
2669         }
2670 }
2671
2672 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2673 {
2674         qdev->rx_ring_shadow_reg_area =
2675             pci_alloc_consistent(qdev->pdev,
2676                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2677         if (qdev->rx_ring_shadow_reg_area == NULL) {
2678                 netif_err(qdev, ifup, qdev->ndev,
2679                           "Allocation of RX shadow space failed.\n");
2680                 return -ENOMEM;
2681         }
2682         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2683         qdev->tx_ring_shadow_reg_area =
2684             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2685                                  &qdev->tx_ring_shadow_reg_dma);
2686         if (qdev->tx_ring_shadow_reg_area == NULL) {
2687                 netif_err(qdev, ifup, qdev->ndev,
2688                           "Allocation of TX shadow space failed.\n");
2689                 goto err_wqp_sh_area;
2690         }
2691         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2692         return 0;
2693
2694 err_wqp_sh_area:
2695         pci_free_consistent(qdev->pdev,
2696                             PAGE_SIZE,
2697                             qdev->rx_ring_shadow_reg_area,
2698                             qdev->rx_ring_shadow_reg_dma);
2699         return -ENOMEM;
2700 }
2701
2702 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2703 {
2704         struct tx_ring_desc *tx_ring_desc;
2705         int i;
2706         struct ob_mac_iocb_req *mac_iocb_ptr;
2707
2708         mac_iocb_ptr = tx_ring->wq_base;
2709         tx_ring_desc = tx_ring->q;
2710         for (i = 0; i < tx_ring->wq_len; i++) {
2711                 tx_ring_desc->index = i;
2712                 tx_ring_desc->skb = NULL;
2713                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2714                 mac_iocb_ptr++;
2715                 tx_ring_desc++;
2716         }
2717         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2718         atomic_set(&tx_ring->queue_stopped, 0);
2719 }
2720
2721 static void ql_free_tx_resources(struct ql_adapter *qdev,
2722                                  struct tx_ring *tx_ring)
2723 {
2724         if (tx_ring->wq_base) {
2725                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2726                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2727                 tx_ring->wq_base = NULL;
2728         }
2729         kfree(tx_ring->q);
2730         tx_ring->q = NULL;
2731 }
2732
2733 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2734                                  struct tx_ring *tx_ring)
2735 {
2736         tx_ring->wq_base =
2737             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2738                                  &tx_ring->wq_base_dma);
2739
2740         if ((tx_ring->wq_base == NULL) ||
2741             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2742                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2743                 return -ENOMEM;
2744         }
2745         tx_ring->q =
2746             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2747         if (tx_ring->q == NULL)
2748                 goto err;
2749
2750         return 0;
2751 err:
2752         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2753                             tx_ring->wq_base, tx_ring->wq_base_dma);
2754         return -ENOMEM;
2755 }
2756
2757 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2758 {
2759         struct bq_desc *lbq_desc;
2760
2761         uint32_t  curr_idx, clean_idx;
2762
2763         curr_idx = rx_ring->lbq_curr_idx;
2764         clean_idx = rx_ring->lbq_clean_idx;
2765         while (curr_idx != clean_idx) {
2766                 lbq_desc = &rx_ring->lbq[curr_idx];
2767
2768                 if (lbq_desc->p.pg_chunk.last_flag) {
2769                         pci_unmap_page(qdev->pdev,
2770                                 lbq_desc->p.pg_chunk.map,
2771                                 ql_lbq_block_size(qdev),
2772                                        PCI_DMA_FROMDEVICE);
2773                         lbq_desc->p.pg_chunk.last_flag = 0;
2774                 }
2775
2776                 put_page(lbq_desc->p.pg_chunk.page);
2777                 lbq_desc->p.pg_chunk.page = NULL;
2778
2779                 if (++curr_idx == rx_ring->lbq_len)
2780                         curr_idx = 0;
2781
2782         }
2783 }
2784
2785 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2786 {
2787         int i;
2788         struct bq_desc *sbq_desc;
2789
2790         for (i = 0; i < rx_ring->sbq_len; i++) {
2791                 sbq_desc = &rx_ring->sbq[i];
2792                 if (sbq_desc == NULL) {
2793                         netif_err(qdev, ifup, qdev->ndev,
2794                                   "sbq_desc %d is NULL.\n", i);
2795                         return;
2796                 }
2797                 if (sbq_desc->p.skb) {
2798                         pci_unmap_single(qdev->pdev,
2799                                          dma_unmap_addr(sbq_desc, mapaddr),
2800                                          dma_unmap_len(sbq_desc, maplen),
2801                                          PCI_DMA_FROMDEVICE);
2802                         dev_kfree_skb(sbq_desc->p.skb);
2803                         sbq_desc->p.skb = NULL;
2804                 }
2805         }
2806 }
2807
2808 /* Free all large and small rx buffers associated
2809  * with the completion queues for this device.
2810  */
2811 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2812 {
2813         int i;
2814         struct rx_ring *rx_ring;
2815
2816         for (i = 0; i < qdev->rx_ring_count; i++) {
2817                 rx_ring = &qdev->rx_ring[i];
2818                 if (rx_ring->lbq)
2819                         ql_free_lbq_buffers(qdev, rx_ring);
2820                 if (rx_ring->sbq)
2821                         ql_free_sbq_buffers(qdev, rx_ring);
2822         }
2823 }
2824
2825 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2826 {
2827         struct rx_ring *rx_ring;
2828         int i;
2829
2830         for (i = 0; i < qdev->rx_ring_count; i++) {
2831                 rx_ring = &qdev->rx_ring[i];
2832                 if (rx_ring->type != TX_Q)
2833                         ql_update_buffer_queues(qdev, rx_ring);
2834         }
2835 }
2836
2837 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2838                                 struct rx_ring *rx_ring)
2839 {
2840         int i;
2841         struct bq_desc *lbq_desc;
2842         __le64 *bq = rx_ring->lbq_base;
2843
2844         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2845         for (i = 0; i < rx_ring->lbq_len; i++) {
2846                 lbq_desc = &rx_ring->lbq[i];
2847                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2848                 lbq_desc->index = i;
2849                 lbq_desc->addr = bq;
2850                 bq++;
2851         }
2852 }
2853
2854 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2855                                 struct rx_ring *rx_ring)
2856 {
2857         int i;
2858         struct bq_desc *sbq_desc;
2859         __le64 *bq = rx_ring->sbq_base;
2860
2861         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2862         for (i = 0; i < rx_ring->sbq_len; i++) {
2863                 sbq_desc = &rx_ring->sbq[i];
2864                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2865                 sbq_desc->index = i;
2866                 sbq_desc->addr = bq;
2867                 bq++;
2868         }
2869 }
2870
2871 static void ql_free_rx_resources(struct ql_adapter *qdev,
2872                                  struct rx_ring *rx_ring)
2873 {
2874         /* Free the small buffer queue. */
2875         if (rx_ring->sbq_base) {
2876                 pci_free_consistent(qdev->pdev,
2877                                     rx_ring->sbq_size,
2878                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2879                 rx_ring->sbq_base = NULL;
2880         }
2881
2882         /* Free the small buffer queue control blocks. */
2883         kfree(rx_ring->sbq);
2884         rx_ring->sbq = NULL;
2885
2886         /* Free the large buffer queue. */
2887         if (rx_ring->lbq_base) {
2888                 pci_free_consistent(qdev->pdev,
2889                                     rx_ring->lbq_size,
2890                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2891                 rx_ring->lbq_base = NULL;
2892         }
2893
2894         /* Free the large buffer queue control blocks. */
2895         kfree(rx_ring->lbq);
2896         rx_ring->lbq = NULL;
2897
2898         /* Free the rx queue. */
2899         if (rx_ring->cq_base) {
2900                 pci_free_consistent(qdev->pdev,
2901                                     rx_ring->cq_size,
2902                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2903                 rx_ring->cq_base = NULL;
2904         }
2905 }
2906
2907 /* Allocate queues and buffers for this completions queue based
2908  * on the values in the parameter structure. */
2909 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2910                                  struct rx_ring *rx_ring)
2911 {
2912
2913         /*
2914          * Allocate the completion queue for this rx_ring.
2915          */
2916         rx_ring->cq_base =
2917             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2918                                  &rx_ring->cq_base_dma);
2919
2920         if (rx_ring->cq_base == NULL) {
2921                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2922                 return -ENOMEM;
2923         }
2924
2925         if (rx_ring->sbq_len) {
2926                 /*
2927                  * Allocate small buffer queue.
2928                  */
2929                 rx_ring->sbq_base =
2930                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2931                                          &rx_ring->sbq_base_dma);
2932
2933                 if (rx_ring->sbq_base == NULL) {
2934                         netif_err(qdev, ifup, qdev->ndev,
2935                                   "Small buffer queue allocation failed.\n");
2936                         goto err_mem;
2937                 }
2938
2939                 /*
2940                  * Allocate small buffer queue control blocks.
2941                  */
2942                 rx_ring->sbq =
2943                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2944                             GFP_KERNEL);
2945                 if (rx_ring->sbq == NULL) {
2946                         netif_err(qdev, ifup, qdev->ndev,
2947                                   "Small buffer queue control block allocation failed.\n");
2948                         goto err_mem;
2949                 }
2950
2951                 ql_init_sbq_ring(qdev, rx_ring);
2952         }
2953
2954         if (rx_ring->lbq_len) {
2955                 /*
2956                  * Allocate large buffer queue.
2957                  */
2958                 rx_ring->lbq_base =
2959                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2960                                          &rx_ring->lbq_base_dma);
2961
2962                 if (rx_ring->lbq_base == NULL) {
2963                         netif_err(qdev, ifup, qdev->ndev,
2964                                   "Large buffer queue allocation failed.\n");
2965                         goto err_mem;
2966                 }
2967                 /*
2968                  * Allocate large buffer queue control blocks.
2969                  */
2970                 rx_ring->lbq =
2971                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2972                             GFP_KERNEL);
2973                 if (rx_ring->lbq == NULL) {
2974                         netif_err(qdev, ifup, qdev->ndev,
2975                                   "Large buffer queue control block allocation failed.\n");
2976                         goto err_mem;
2977                 }
2978
2979                 ql_init_lbq_ring(qdev, rx_ring);
2980         }
2981
2982         return 0;
2983
2984 err_mem:
2985         ql_free_rx_resources(qdev, rx_ring);
2986         return -ENOMEM;
2987 }
2988
2989 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2990 {
2991         struct tx_ring *tx_ring;
2992         struct tx_ring_desc *tx_ring_desc;
2993         int i, j;
2994
2995         /*
2996          * Loop through all queues and free
2997          * any resources.
2998          */
2999         for (j = 0; j < qdev->tx_ring_count; j++) {
3000                 tx_ring = &qdev->tx_ring[j];
3001                 for (i = 0; i < tx_ring->wq_len; i++) {
3002                         tx_ring_desc = &tx_ring->q[i];
3003                         if (tx_ring_desc && tx_ring_desc->skb) {
3004                                 netif_err(qdev, ifdown, qdev->ndev,
3005                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
3006                                           tx_ring_desc->skb, j,
3007                                           tx_ring_desc->index);
3008                                 ql_unmap_send(qdev, tx_ring_desc,
3009                                               tx_ring_desc->map_cnt);
3010                                 dev_kfree_skb(tx_ring_desc->skb);
3011                                 tx_ring_desc->skb = NULL;
3012                         }
3013                 }
3014         }
3015 }
3016
3017 static void ql_free_mem_resources(struct ql_adapter *qdev)
3018 {
3019         int i;
3020
3021         for (i = 0; i < qdev->tx_ring_count; i++)
3022                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3023         for (i = 0; i < qdev->rx_ring_count; i++)
3024                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3025         ql_free_shadow_space(qdev);
3026 }
3027
3028 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3029 {
3030         int i;
3031
3032         /* Allocate space for our shadow registers and such. */
3033         if (ql_alloc_shadow_space(qdev))
3034                 return -ENOMEM;
3035
3036         for (i = 0; i < qdev->rx_ring_count; i++) {
3037                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3038                         netif_err(qdev, ifup, qdev->ndev,
3039                                   "RX resource allocation failed.\n");
3040                         goto err_mem;
3041                 }
3042         }
3043         /* Allocate tx queue resources */
3044         for (i = 0; i < qdev->tx_ring_count; i++) {
3045                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3046                         netif_err(qdev, ifup, qdev->ndev,
3047                                   "TX resource allocation failed.\n");
3048                         goto err_mem;
3049                 }
3050         }
3051         return 0;
3052
3053 err_mem:
3054         ql_free_mem_resources(qdev);
3055         return -ENOMEM;
3056 }
3057
3058 /* Set up the rx ring control block and pass it to the chip.
3059  * The control block is defined as
3060  * "Completion Queue Initialization Control Block", or cqicb.
3061  */
3062 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3063 {
3064         struct cqicb *cqicb = &rx_ring->cqicb;
3065         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3066                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3067         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3068                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3069         void __iomem *doorbell_area =
3070             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3071         int err = 0;
3072         u16 bq_len;
3073         u64 tmp;
3074         __le64 *base_indirect_ptr;
3075         int page_entries;
3076
3077         /* Set up the shadow registers for this ring. */
3078         rx_ring->prod_idx_sh_reg = shadow_reg;
3079         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3080         *rx_ring->prod_idx_sh_reg = 0;
3081         shadow_reg += sizeof(u64);
3082         shadow_reg_dma += sizeof(u64);
3083         rx_ring->lbq_base_indirect = shadow_reg;
3084         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3085         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3086         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3087         rx_ring->sbq_base_indirect = shadow_reg;
3088         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3089
3090         /* PCI doorbell mem area + 0x00 for consumer index register */
3091         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3092         rx_ring->cnsmr_idx = 0;
3093         rx_ring->curr_entry = rx_ring->cq_base;
3094
3095         /* PCI doorbell mem area + 0x04 for valid register */
3096         rx_ring->valid_db_reg = doorbell_area + 0x04;
3097
3098         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3099         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3100
3101         /* PCI doorbell mem area + 0x1c */
3102         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3103
3104         memset((void *)cqicb, 0, sizeof(struct cqicb));
3105         cqicb->msix_vect = rx_ring->irq;
3106
3107         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3108         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3109
3110         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3111
3112         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3113
3114         /*
3115          * Set up the control block load flags.
3116          */
3117         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3118             FLAGS_LV |          /* Load MSI-X vector */
3119             FLAGS_LI;           /* Load irq delay values */
3120         if (rx_ring->lbq_len) {
3121                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3122                 tmp = (u64)rx_ring->lbq_base_dma;
3123                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3124                 page_entries = 0;
3125                 do {
3126                         *base_indirect_ptr = cpu_to_le64(tmp);
3127                         tmp += DB_PAGE_SIZE;
3128                         base_indirect_ptr++;
3129                         page_entries++;
3130                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3131                 cqicb->lbq_addr =
3132                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3133                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3134                         (u16) rx_ring->lbq_buf_size;
3135                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3136                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3137                         (u16) rx_ring->lbq_len;
3138                 cqicb->lbq_len = cpu_to_le16(bq_len);
3139                 rx_ring->lbq_prod_idx = 0;
3140                 rx_ring->lbq_curr_idx = 0;
3141                 rx_ring->lbq_clean_idx = 0;
3142                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3143         }
3144         if (rx_ring->sbq_len) {
3145                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3146                 tmp = (u64)rx_ring->sbq_base_dma;
3147                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3148                 page_entries = 0;
3149                 do {
3150                         *base_indirect_ptr = cpu_to_le64(tmp);
3151                         tmp += DB_PAGE_SIZE;
3152                         base_indirect_ptr++;
3153                         page_entries++;
3154                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3155                 cqicb->sbq_addr =
3156                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3157                 cqicb->sbq_buf_size =
3158                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3159                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3160                         (u16) rx_ring->sbq_len;
3161                 cqicb->sbq_len = cpu_to_le16(bq_len);
3162                 rx_ring->sbq_prod_idx = 0;
3163                 rx_ring->sbq_curr_idx = 0;
3164                 rx_ring->sbq_clean_idx = 0;
3165                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3166         }
3167         switch (rx_ring->type) {
3168         case TX_Q:
3169                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3170                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3171                 break;
3172         case RX_Q:
3173                 /* Inbound completion handling rx_rings run in
3174                  * separate NAPI contexts.
3175                  */
3176                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3177                                64);
3178                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3179                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3180                 break;
3181         default:
3182                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3183                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3184         }
3185         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3186                      "Initializing rx work queue.\n");
3187         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3188                            CFG_LCQ, rx_ring->cq_id);
3189         if (err) {
3190                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3191                 return err;
3192         }
3193         return err;
3194 }
3195
3196 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3197 {
3198         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3199         void __iomem *doorbell_area =
3200             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3201         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3202             (tx_ring->wq_id * sizeof(u64));
3203         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3204             (tx_ring->wq_id * sizeof(u64));
3205         int err = 0;
3206
3207         /*
3208          * Assign doorbell registers for this tx_ring.
3209          */
3210         /* TX PCI doorbell mem area for tx producer index */
3211         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3212         tx_ring->prod_idx = 0;
3213         /* TX PCI doorbell mem area + 0x04 */
3214         tx_ring->valid_db_reg = doorbell_area + 0x04;
3215
3216         /*
3217          * Assign shadow registers for this tx_ring.
3218          */
3219         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3220         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3221
3222         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3223         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3224                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3225         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3226         wqicb->rid = 0;
3227         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3228
3229         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3230
3231         ql_init_tx_ring(qdev, tx_ring);
3232
3233         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3234                            (u16) tx_ring->wq_id);
3235         if (err) {
3236                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3237                 return err;
3238         }
3239         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3240                      "Successfully loaded WQICB.\n");
3241         return err;
3242 }
3243
3244 static void ql_disable_msix(struct ql_adapter *qdev)
3245 {
3246         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3247                 pci_disable_msix(qdev->pdev);
3248                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3249                 kfree(qdev->msi_x_entry);
3250                 qdev->msi_x_entry = NULL;
3251         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3252                 pci_disable_msi(qdev->pdev);
3253                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3254         }
3255 }
3256
3257 /* We start by trying to get the number of vectors
3258  * stored in qdev->intr_count. If we don't get that
3259  * many then we reduce the count and try again.
3260  */
3261 static void ql_enable_msix(struct ql_adapter *qdev)
3262 {
3263         int i, err;
3264
3265         /* Get the MSIX vectors. */
3266         if (qlge_irq_type == MSIX_IRQ) {
3267                 /* Try to alloc space for the msix struct,
3268                  * if it fails then go to MSI/legacy.
3269                  */
3270                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3271                                             sizeof(struct msix_entry),
3272                                             GFP_KERNEL);
3273                 if (!qdev->msi_x_entry) {
3274                         qlge_irq_type = MSI_IRQ;
3275                         goto msi;
3276                 }
3277
3278                 for (i = 0; i < qdev->intr_count; i++)
3279                         qdev->msi_x_entry[i].entry = i;
3280
3281                 /* Loop to get our vectors.  We start with
3282                  * what we want and settle for what we get.
3283                  */
3284                 do {
3285                         err = pci_enable_msix(qdev->pdev,
3286                                 qdev->msi_x_entry, qdev->intr_count);
3287                         if (err > 0)
3288                                 qdev->intr_count = err;
3289                 } while (err > 0);
3290
3291                 if (err < 0) {
3292                         kfree(qdev->msi_x_entry);
3293                         qdev->msi_x_entry = NULL;
3294                         netif_warn(qdev, ifup, qdev->ndev,
3295                                    "MSI-X Enable failed, trying MSI.\n");
3296                         qdev->intr_count = 1;
3297                         qlge_irq_type = MSI_IRQ;
3298                 } else if (err == 0) {
3299                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3300                         netif_info(qdev, ifup, qdev->ndev,
3301                                    "MSI-X Enabled, got %d vectors.\n",
3302                                    qdev->intr_count);
3303                         return;
3304                 }
3305         }
3306 msi:
3307         qdev->intr_count = 1;
3308         if (qlge_irq_type == MSI_IRQ) {
3309                 if (!pci_enable_msi(qdev->pdev)) {
3310                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3311                         netif_info(qdev, ifup, qdev->ndev,
3312                                    "Running with MSI interrupts.\n");
3313                         return;
3314                 }
3315         }
3316         qlge_irq_type = LEG_IRQ;
3317         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3318                      "Running with legacy interrupts.\n");
3319 }
3320
3321 /* Each vector services 1 RSS ring and and 1 or more
3322  * TX completion rings.  This function loops through
3323  * the TX completion rings and assigns the vector that
3324  * will service it.  An example would be if there are
3325  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3326  * This would mean that vector 0 would service RSS ring 0
3327  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3328  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3329  */
3330 static void ql_set_tx_vect(struct ql_adapter *qdev)
3331 {
3332         int i, j, vect;
3333         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3334
3335         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3336                 /* Assign irq vectors to TX rx_rings.*/
3337                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3338                                          i < qdev->rx_ring_count; i++) {
3339                         if (j == tx_rings_per_vector) {
3340                                 vect++;
3341                                 j = 0;
3342                         }
3343                         qdev->rx_ring[i].irq = vect;
3344                         j++;
3345                 }
3346         } else {
3347                 /* For single vector all rings have an irq
3348                  * of zero.
3349                  */
3350                 for (i = 0; i < qdev->rx_ring_count; i++)
3351                         qdev->rx_ring[i].irq = 0;
3352         }
3353 }
3354
3355 /* Set the interrupt mask for this vector.  Each vector
3356  * will service 1 RSS ring and 1 or more TX completion
3357  * rings.  This function sets up a bit mask per vector
3358  * that indicates which rings it services.
3359  */
3360 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3361 {
3362         int j, vect = ctx->intr;
3363         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3364
3365         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3366                 /* Add the RSS ring serviced by this vector
3367                  * to the mask.
3368                  */
3369                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3370                 /* Add the TX ring(s) serviced by this vector
3371                  * to the mask. */
3372                 for (j = 0; j < tx_rings_per_vector; j++) {
3373                         ctx->irq_mask |=
3374                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3375                         (vect * tx_rings_per_vector) + j].cq_id);
3376                 }
3377         } else {
3378                 /* For single vector we just shift each queue's
3379                  * ID into the mask.
3380                  */
3381                 for (j = 0; j < qdev->rx_ring_count; j++)
3382                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3383         }
3384 }
3385
3386 /*
3387  * Here we build the intr_context structures based on
3388  * our rx_ring count and intr vector count.
3389  * The intr_context structure is used to hook each vector
3390  * to possibly different handlers.
3391  */
3392 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3393 {
3394         int i = 0;
3395         struct intr_context *intr_context = &qdev->intr_context[0];
3396
3397         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3398                 /* Each rx_ring has it's
3399                  * own intr_context since we have separate
3400                  * vectors for each queue.
3401                  */
3402                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3403                         qdev->rx_ring[i].irq = i;
3404                         intr_context->intr = i;
3405                         intr_context->qdev = qdev;
3406                         /* Set up this vector's bit-mask that indicates
3407                          * which queues it services.
3408                          */
3409                         ql_set_irq_mask(qdev, intr_context);
3410                         /*
3411                          * We set up each vectors enable/disable/read bits so
3412                          * there's no bit/mask calculations in the critical path.
3413                          */
3414                         intr_context->intr_en_mask =
3415                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3416                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3417                             | i;
3418                         intr_context->intr_dis_mask =
3419                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3421                             INTR_EN_IHD | i;
3422                         intr_context->intr_read_mask =
3423                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3424                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3425                             i;
3426                         if (i == 0) {
3427                                 /* The first vector/queue handles
3428                                  * broadcast/multicast, fatal errors,
3429                                  * and firmware events.  This in addition
3430                                  * to normal inbound NAPI processing.
3431                                  */
3432                                 intr_context->handler = qlge_isr;
3433                                 sprintf(intr_context->name, "%s-rx-%d",
3434                                         qdev->ndev->name, i);
3435                         } else {
3436                                 /*
3437                                  * Inbound queues handle unicast frames only.
3438                                  */
3439                                 intr_context->handler = qlge_msix_rx_isr;
3440                                 sprintf(intr_context->name, "%s-rx-%d",
3441                                         qdev->ndev->name, i);
3442                         }
3443                 }
3444         } else {
3445                 /*
3446                  * All rx_rings use the same intr_context since
3447                  * there is only one vector.
3448                  */
3449                 intr_context->intr = 0;
3450                 intr_context->qdev = qdev;
3451                 /*
3452                  * We set up each vectors enable/disable/read bits so
3453                  * there's no bit/mask calculations in the critical path.
3454                  */
3455                 intr_context->intr_en_mask =
3456                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3457                 intr_context->intr_dis_mask =
3458                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3459                     INTR_EN_TYPE_DISABLE;
3460                 intr_context->intr_read_mask =
3461                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3462                 /*
3463                  * Single interrupt means one handler for all rings.
3464                  */
3465                 intr_context->handler = qlge_isr;
3466                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3467                 /* Set up this vector's bit-mask that indicates
3468                  * which queues it services. In this case there is
3469                  * a single vector so it will service all RSS and
3470                  * TX completion rings.
3471                  */
3472                 ql_set_irq_mask(qdev, intr_context);
3473         }
3474         /* Tell the TX completion rings which MSIx vector
3475          * they will be using.
3476          */
3477         ql_set_tx_vect(qdev);
3478 }
3479
3480 static void ql_free_irq(struct ql_adapter *qdev)
3481 {
3482         int i;
3483         struct intr_context *intr_context = &qdev->intr_context[0];
3484
3485         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3486                 if (intr_context->hooked) {
3487                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3488                                 free_irq(qdev->msi_x_entry[i].vector,
3489                                          &qdev->rx_ring[i]);
3490                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3491                                              "freeing msix interrupt %d.\n", i);
3492                         } else {
3493                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3494                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3495                                              "freeing msi interrupt %d.\n", i);
3496                         }
3497                 }
3498         }
3499         ql_disable_msix(qdev);
3500 }
3501
3502 static int ql_request_irq(struct ql_adapter *qdev)
3503 {
3504         int i;
3505         int status = 0;
3506         struct pci_dev *pdev = qdev->pdev;
3507         struct intr_context *intr_context = &qdev->intr_context[0];
3508
3509         ql_resolve_queues_to_irqs(qdev);
3510
3511         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3512                 atomic_set(&intr_context->irq_cnt, 0);
3513                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3514                         status = request_irq(qdev->msi_x_entry[i].vector,
3515                                              intr_context->handler,
3516                                              0,
3517                                              intr_context->name,
3518                                              &qdev->rx_ring[i]);
3519                         if (status) {
3520                                 netif_err(qdev, ifup, qdev->ndev,
3521                                           "Failed request for MSIX interrupt %d.\n",
3522                                           i);
3523                                 goto err_irq;
3524                         } else {
3525                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3526                                              "Hooked intr %d, queue type %s, with name %s.\n",
3527                                              i,
3528                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3529                                              "DEFAULT_Q" :
3530                                              qdev->rx_ring[i].type == TX_Q ?
3531                                              "TX_Q" :
3532                                              qdev->rx_ring[i].type == RX_Q ?
3533                                              "RX_Q" : "",
3534                                              intr_context->name);
3535                         }
3536                 } else {
3537                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3538                                      "trying msi or legacy interrupts.\n");
3539                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3540                                      "%s: irq = %d.\n", __func__, pdev->irq);
3541                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3542                                      "%s: context->name = %s.\n", __func__,
3543                                      intr_context->name);
3544                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3545                                      "%s: dev_id = 0x%p.\n", __func__,
3546                                      &qdev->rx_ring[0]);
3547                         status =
3548                             request_irq(pdev->irq, qlge_isr,
3549                                         test_bit(QL_MSI_ENABLED,
3550                                                  &qdev->
3551                                                  flags) ? 0 : IRQF_SHARED,
3552                                         intr_context->name, &qdev->rx_ring[0]);
3553                         if (status)
3554                                 goto err_irq;
3555
3556                         netif_err(qdev, ifup, qdev->ndev,
3557                                   "Hooked intr %d, queue type %s, with name %s.\n",
3558                                   i,
3559                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3560                                   "DEFAULT_Q" :
3561                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3562                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3563                                   intr_context->name);
3564                 }
3565                 intr_context->hooked = 1;
3566         }
3567         return status;
3568 err_irq:
3569         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3570         ql_free_irq(qdev);
3571         return status;
3572 }
3573
3574 static int ql_start_rss(struct ql_adapter *qdev)
3575 {
3576         static const u8 init_hash_seed[] = {
3577                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3578                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3579                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3580                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3581                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3582         };
3583         struct ricb *ricb = &qdev->ricb;
3584         int status = 0;
3585         int i;
3586         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3587
3588         memset((void *)ricb, 0, sizeof(*ricb));
3589
3590         ricb->base_cq = RSS_L4K;
3591         ricb->flags =
3592                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3593         ricb->mask = cpu_to_le16((u16)(0x3ff));
3594
3595         /*
3596          * Fill out the Indirection Table.
3597          */
3598         for (i = 0; i < 1024; i++)
3599                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3600
3601         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3602         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3603
3604         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3605
3606         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3607         if (status) {
3608                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3609                 return status;
3610         }
3611         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3612                      "Successfully loaded RICB.\n");
3613         return status;
3614 }
3615
3616 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3617 {
3618         int i, status = 0;
3619
3620         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3621         if (status)
3622                 return status;
3623         /* Clear all the entries in the routing table. */
3624         for (i = 0; i < 16; i++) {
3625                 status = ql_set_routing_reg(qdev, i, 0, 0);
3626                 if (status) {
3627                         netif_err(qdev, ifup, qdev->ndev,
3628                                   "Failed to init routing register for CAM packets.\n");
3629                         break;
3630                 }
3631         }
3632         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3633         return status;
3634 }
3635
3636 /* Initialize the frame-to-queue routing. */
3637 static int ql_route_initialize(struct ql_adapter *qdev)
3638 {
3639         int status = 0;
3640
3641         /* Clear all the entries in the routing table. */
3642         status = ql_clear_routing_entries(qdev);
3643         if (status)
3644                 return status;
3645
3646         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3647         if (status)
3648                 return status;
3649
3650         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3651                                                 RT_IDX_IP_CSUM_ERR, 1);
3652         if (status) {
3653                 netif_err(qdev, ifup, qdev->ndev,
3654                         "Failed to init routing register "
3655                         "for IP CSUM error packets.\n");
3656                 goto exit;
3657         }
3658         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3659                                                 RT_IDX_TU_CSUM_ERR, 1);
3660         if (status) {
3661                 netif_err(qdev, ifup, qdev->ndev,
3662                         "Failed to init routing register "
3663                         "for TCP/UDP CSUM error packets.\n");
3664                 goto exit;
3665         }
3666         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3667         if (status) {
3668                 netif_err(qdev, ifup, qdev->ndev,
3669                           "Failed to init routing register for broadcast packets.\n");
3670                 goto exit;
3671         }
3672         /* If we have more than one inbound queue, then turn on RSS in the
3673          * routing block.
3674          */
3675         if (qdev->rss_ring_count > 1) {
3676                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3677                                         RT_IDX_RSS_MATCH, 1);
3678                 if (status) {
3679                         netif_err(qdev, ifup, qdev->ndev,
3680                                   "Failed to init routing register for MATCH RSS packets.\n");
3681                         goto exit;
3682                 }
3683         }
3684
3685         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3686                                     RT_IDX_CAM_HIT, 1);
3687         if (status)
3688                 netif_err(qdev, ifup, qdev->ndev,
3689                           "Failed to init routing register for CAM packets.\n");
3690 exit:
3691         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3692         return status;
3693 }
3694
3695 int ql_cam_route_initialize(struct ql_adapter *qdev)
3696 {
3697         int status, set;
3698
3699         /* If check if the link is up and use to
3700          * determine if we are setting or clearing
3701          * the MAC address in the CAM.
3702          */
3703         set = ql_read32(qdev, STS);
3704         set &= qdev->port_link_up;
3705         status = ql_set_mac_addr(qdev, set);
3706         if (status) {
3707                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3708                 return status;
3709         }
3710
3711         status = ql_route_initialize(qdev);
3712         if (status)
3713                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3714
3715         return status;
3716 }
3717
3718 static int ql_adapter_initialize(struct ql_adapter *qdev)
3719 {
3720         u32 value, mask;
3721         int i;
3722         int status = 0;
3723
3724         /*
3725          * Set up the System register to halt on errors.
3726          */
3727         value = SYS_EFE | SYS_FAE;
3728         mask = value << 16;
3729         ql_write32(qdev, SYS, mask | value);
3730
3731         /* Set the default queue, and VLAN behavior. */
3732         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3733         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3734         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3735
3736         /* Set the MPI interrupt to enabled. */
3737         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3738
3739         /* Enable the function, set pagesize, enable error checking. */
3740         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3741             FSC_EC | FSC_VM_PAGE_4K;
3742         value |= SPLT_SETTING;
3743
3744         /* Set/clear header splitting. */
3745         mask = FSC_VM_PAGESIZE_MASK |
3746             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3747         ql_write32(qdev, FSC, mask | value);
3748
3749         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3750
3751         /* Set RX packet routing to use port/pci function on which the
3752          * packet arrived on in addition to usual frame routing.
3753          * This is helpful on bonding where both interfaces can have
3754          * the same MAC address.
3755          */
3756         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3757         /* Reroute all packets to our Interface.
3758          * They may have been routed to MPI firmware
3759          * due to WOL.
3760          */
3761         value = ql_read32(qdev, MGMT_RCV_CFG);
3762         value &= ~MGMT_RCV_CFG_RM;
3763         mask = 0xffff0000;
3764
3765         /* Sticky reg needs clearing due to WOL. */
3766         ql_write32(qdev, MGMT_RCV_CFG, mask);
3767         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3768
3769         /* Default WOL is enable on Mezz cards */
3770         if (qdev->pdev->subsystem_device == 0x0068 ||
3771                         qdev->pdev->subsystem_device == 0x0180)
3772                 qdev->wol = WAKE_MAGIC;
3773
3774         /* Start up the rx queues. */
3775         for (i = 0; i < qdev->rx_ring_count; i++) {
3776                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3777                 if (status) {
3778                         netif_err(qdev, ifup, qdev->ndev,
3779                                   "Failed to start rx ring[%d].\n", i);
3780                         return status;
3781                 }
3782         }
3783
3784         /* If there is more than one inbound completion queue
3785          * then download a RICB to configure RSS.
3786          */
3787         if (qdev->rss_ring_count > 1) {
3788                 status = ql_start_rss(qdev);
3789                 if (status) {
3790                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3791                         return status;
3792                 }
3793         }
3794
3795         /* Start up the tx queues. */
3796         for (i = 0; i < qdev->tx_ring_count; i++) {
3797                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3798                 if (status) {
3799                         netif_err(qdev, ifup, qdev->ndev,
3800                                   "Failed to start tx ring[%d].\n", i);
3801                         return status;
3802                 }
3803         }
3804
3805         /* Initialize the port and set the max framesize. */
3806         status = qdev->nic_ops->port_initialize(qdev);
3807         if (status)
3808                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3809
3810         /* Set up the MAC address and frame routing filter. */
3811         status = ql_cam_route_initialize(qdev);
3812         if (status) {
3813                 netif_err(qdev, ifup, qdev->ndev,
3814                           "Failed to init CAM/Routing tables.\n");
3815                 return status;
3816         }
3817
3818         /* Start NAPI for the RSS queues. */
3819         for (i = 0; i < qdev->rss_ring_count; i++) {
3820                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3821                              "Enabling NAPI for rx_ring[%d].\n", i);
3822                 napi_enable(&qdev->rx_ring[i].napi);
3823         }
3824
3825         return status;
3826 }
3827
3828 /* Issue soft reset to chip. */
3829 static int ql_adapter_reset(struct ql_adapter *qdev)
3830 {
3831         u32 value;
3832         int status = 0;
3833         unsigned long end_jiffies;
3834
3835         /* Clear all the entries in the routing table. */
3836         status = ql_clear_routing_entries(qdev);
3837         if (status) {
3838                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3839                 return status;
3840         }
3841
3842         end_jiffies = jiffies +
3843                 max((unsigned long)1, usecs_to_jiffies(30));
3844
3845         /* Check if bit is set then skip the mailbox command and
3846          * clear the bit, else we are in normal reset process.
3847          */
3848         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3849                 /* Stop management traffic. */
3850                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3851
3852                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3853                 ql_wait_fifo_empty(qdev);
3854         } else
3855                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3856
3857         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3858
3859         do {
3860                 value = ql_read32(qdev, RST_FO);
3861                 if ((value & RST_FO_FR) == 0)
3862                         break;
3863                 cpu_relax();
3864         } while (time_before(jiffies, end_jiffies));
3865
3866         if (value & RST_FO_FR) {
3867                 netif_err(qdev, ifdown, qdev->ndev,
3868                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3869                 status = -ETIMEDOUT;
3870         }
3871
3872         /* Resume management traffic. */
3873         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3874         return status;
3875 }
3876
3877 static void ql_display_dev_info(struct net_device *ndev)
3878 {
3879         struct ql_adapter *qdev = netdev_priv(ndev);
3880
3881         netif_info(qdev, probe, qdev->ndev,
3882                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3883                    "XG Roll = %d, XG Rev = %d.\n",
3884                    qdev->func,
3885                    qdev->port,
3886                    qdev->chip_rev_id & 0x0000000f,
3887                    qdev->chip_rev_id >> 4 & 0x0000000f,
3888                    qdev->chip_rev_id >> 8 & 0x0000000f,
3889                    qdev->chip_rev_id >> 12 & 0x0000000f);
3890         netif_info(qdev, probe, qdev->ndev,
3891                    "MAC address %pM\n", ndev->dev_addr);
3892 }
3893
3894 static int ql_wol(struct ql_adapter *qdev)
3895 {
3896         int status = 0;
3897         u32 wol = MB_WOL_DISABLE;
3898
3899         /* The CAM is still intact after a reset, but if we
3900          * are doing WOL, then we may need to program the
3901          * routing regs. We would also need to issue the mailbox
3902          * commands to instruct the MPI what to do per the ethtool
3903          * settings.
3904          */
3905
3906         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3907                         WAKE_MCAST | WAKE_BCAST)) {
3908                 netif_err(qdev, ifdown, qdev->ndev,
3909                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3910                           qdev->wol);
3911                 return -EINVAL;
3912         }
3913
3914         if (qdev->wol & WAKE_MAGIC) {
3915                 status = ql_mb_wol_set_magic(qdev, 1);
3916                 if (status) {
3917                         netif_err(qdev, ifdown, qdev->ndev,
3918                                   "Failed to set magic packet on %s.\n",
3919                                   qdev->ndev->name);
3920                         return status;
3921                 } else
3922                         netif_info(qdev, drv, qdev->ndev,
3923                                    "Enabled magic packet successfully on %s.\n",
3924                                    qdev->ndev->name);
3925
3926                 wol |= MB_WOL_MAGIC_PKT;
3927         }
3928
3929         if (qdev->wol) {
3930                 wol |= MB_WOL_MODE_ON;
3931                 status = ql_mb_wol_mode(qdev, wol);
3932                 netif_err(qdev, drv, qdev->ndev,
3933                           "WOL %s (wol code 0x%x) on %s\n",
3934                           (status == 0) ? "Successfully set" : "Failed",
3935                           wol, qdev->ndev->name);
3936         }
3937
3938         return status;
3939 }
3940
3941 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3942 {
3943
3944         /* Don't kill the reset worker thread if we
3945          * are in the process of recovery.
3946          */
3947         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3948                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3949         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3950         cancel_delayed_work_sync(&qdev->mpi_work);
3951         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3952         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3953         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3954 }
3955
3956 static int ql_adapter_down(struct ql_adapter *qdev)
3957 {
3958         int i, status = 0;
3959
3960         ql_link_off(qdev);
3961
3962         ql_cancel_all_work_sync(qdev);
3963
3964         for (i = 0; i < qdev->rss_ring_count; i++)
3965                 napi_disable(&qdev->rx_ring[i].napi);
3966
3967         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3968
3969         ql_disable_interrupts(qdev);
3970
3971         ql_tx_ring_clean(qdev);
3972
3973         /* Call netif_napi_del() from common point.
3974          */
3975         for (i = 0; i < qdev->rss_ring_count; i++)
3976                 netif_napi_del(&qdev->rx_ring[i].napi);
3977
3978         status = ql_adapter_reset(qdev);
3979         if (status)
3980                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3981                           qdev->func);
3982         ql_free_rx_buffers(qdev);
3983
3984         return status;
3985 }
3986
3987 static int ql_adapter_up(struct ql_adapter *qdev)
3988 {
3989         int err = 0;
3990
3991         err = ql_adapter_initialize(qdev);
3992         if (err) {
3993                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3994                 goto err_init;
3995         }
3996         set_bit(QL_ADAPTER_UP, &qdev->flags);
3997         ql_alloc_rx_buffers(qdev);
3998         /* If the port is initialized and the
3999          * link is up the turn on the carrier.
4000          */
4001         if ((ql_read32(qdev, STS) & qdev->port_init) &&
4002                         (ql_read32(qdev, STS) & qdev->port_link_up))
4003                 ql_link_on(qdev);
4004         /* Restore rx mode. */
4005         clear_bit(QL_ALLMULTI, &qdev->flags);
4006         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4007         qlge_set_multicast_list(qdev->ndev);
4008
4009         /* Restore vlan setting. */
4010         qlge_restore_vlan(qdev);
4011
4012         ql_enable_interrupts(qdev);
4013         ql_enable_all_completion_interrupts(qdev);
4014         netif_tx_start_all_queues(qdev->ndev);
4015
4016         return 0;
4017 err_init:
4018         ql_adapter_reset(qdev);
4019         return err;
4020 }
4021
4022 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4023 {
4024         ql_free_mem_resources(qdev);
4025         ql_free_irq(qdev);
4026 }
4027
4028 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4029 {
4030         int status = 0;
4031
4032         if (ql_alloc_mem_resources(qdev)) {
4033                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4034                 return -ENOMEM;
4035         }
4036         status = ql_request_irq(qdev);
4037         return status;
4038 }
4039
4040 static int qlge_close(struct net_device *ndev)
4041 {
4042         struct ql_adapter *qdev = netdev_priv(ndev);
4043
4044         /* If we hit pci_channel_io_perm_failure
4045          * failure condition, then we already
4046          * brought the adapter down.
4047          */
4048         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4049                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4050                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4051                 return 0;
4052         }
4053
4054         /*
4055          * Wait for device to recover from a reset.
4056          * (Rarely happens, but possible.)
4057          */
4058         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4059                 msleep(1);
4060         ql_adapter_down(qdev);
4061         ql_release_adapter_resources(qdev);
4062         return 0;
4063 }
4064
4065 static int ql_configure_rings(struct ql_adapter *qdev)
4066 {
4067         int i;
4068         struct rx_ring *rx_ring;
4069         struct tx_ring *tx_ring;
4070         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4071         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4072                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4073
4074         qdev->lbq_buf_order = get_order(lbq_buf_len);
4075
4076         /* In a perfect world we have one RSS ring for each CPU
4077          * and each has it's own vector.  To do that we ask for
4078          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4079          * vector count to what we actually get.  We then
4080          * allocate an RSS ring for each.
4081          * Essentially, we are doing min(cpu_count, msix_vector_count).
4082          */
4083         qdev->intr_count = cpu_cnt;
4084         ql_enable_msix(qdev);
4085         /* Adjust the RSS ring count to the actual vector count. */
4086         qdev->rss_ring_count = qdev->intr_count;
4087         qdev->tx_ring_count = cpu_cnt;
4088         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4089
4090         for (i = 0; i < qdev->tx_ring_count; i++) {
4091                 tx_ring = &qdev->tx_ring[i];
4092                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4093                 tx_ring->qdev = qdev;
4094                 tx_ring->wq_id = i;
4095                 tx_ring->wq_len = qdev->tx_ring_size;
4096                 tx_ring->wq_size =
4097                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4098
4099                 /*
4100                  * The completion queue ID for the tx rings start
4101                  * immediately after the rss rings.
4102                  */
4103                 tx_ring->cq_id = qdev->rss_ring_count + i;
4104         }
4105
4106         for (i = 0; i < qdev->rx_ring_count; i++) {
4107                 rx_ring = &qdev->rx_ring[i];
4108                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4109                 rx_ring->qdev = qdev;
4110                 rx_ring->cq_id = i;
4111                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4112                 if (i < qdev->rss_ring_count) {
4113                         /*
4114                          * Inbound (RSS) queues.
4115                          */
4116                         rx_ring->cq_len = qdev->rx_ring_size;
4117                         rx_ring->cq_size =
4118                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4119                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4120                         rx_ring->lbq_size =
4121                             rx_ring->lbq_len * sizeof(__le64);
4122                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4123                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4124                                      "lbq_buf_size %d, order = %d\n",
4125                                      rx_ring->lbq_buf_size,
4126                                      qdev->lbq_buf_order);
4127                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4128                         rx_ring->sbq_size =
4129                             rx_ring->sbq_len * sizeof(__le64);
4130                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4131                         rx_ring->type = RX_Q;
4132                 } else {
4133                         /*
4134                          * Outbound queue handles outbound completions only.
4135                          */
4136                         /* outbound cq is same size as tx_ring it services. */
4137                         rx_ring->cq_len = qdev->tx_ring_size;
4138                         rx_ring->cq_size =
4139                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4140                         rx_ring->lbq_len = 0;
4141                         rx_ring->lbq_size = 0;
4142                         rx_ring->lbq_buf_size = 0;
4143                         rx_ring->sbq_len = 0;
4144                         rx_ring->sbq_size = 0;
4145                         rx_ring->sbq_buf_size = 0;
4146                         rx_ring->type = TX_Q;
4147                 }
4148         }
4149         return 0;
4150 }
4151
4152 static int qlge_open(struct net_device *ndev)
4153 {
4154         int err = 0;
4155         struct ql_adapter *qdev = netdev_priv(ndev);
4156
4157         err = ql_adapter_reset(qdev);
4158         if (err)
4159                 return err;
4160
4161         err = ql_configure_rings(qdev);
4162         if (err)
4163                 return err;
4164
4165         err = ql_get_adapter_resources(qdev);
4166         if (err)
4167                 goto error_up;
4168
4169         err = ql_adapter_up(qdev);
4170         if (err)
4171                 goto error_up;
4172
4173         return err;
4174
4175 error_up:
4176         ql_release_adapter_resources(qdev);
4177         return err;
4178 }
4179
4180 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4181 {
4182         struct rx_ring *rx_ring;
4183         int i, status;
4184         u32 lbq_buf_len;
4185
4186         /* Wait for an outstanding reset to complete. */
4187         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4188                 int i = 3;
4189                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4190                         netif_err(qdev, ifup, qdev->ndev,
4191                                   "Waiting for adapter UP...\n");
4192                         ssleep(1);
4193                 }
4194
4195                 if (!i) {
4196                         netif_err(qdev, ifup, qdev->ndev,
4197                                   "Timed out waiting for adapter UP\n");
4198                         return -ETIMEDOUT;
4199                 }
4200         }
4201
4202         status = ql_adapter_down(qdev);
4203         if (status)
4204                 goto error;
4205
4206         /* Get the new rx buffer size. */
4207         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4208                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4209         qdev->lbq_buf_order = get_order(lbq_buf_len);
4210
4211         for (i = 0; i < qdev->rss_ring_count; i++) {
4212                 rx_ring = &qdev->rx_ring[i];
4213                 /* Set the new size. */
4214                 rx_ring->lbq_buf_size = lbq_buf_len;
4215         }
4216
4217         status = ql_adapter_up(qdev);
4218         if (status)
4219                 goto error;
4220
4221         return status;
4222 error:
4223         netif_alert(qdev, ifup, qdev->ndev,
4224                     "Driver up/down cycle failed, closing device.\n");
4225         set_bit(QL_ADAPTER_UP, &qdev->flags);
4226         dev_close(qdev->ndev);
4227         return status;
4228 }
4229
4230 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4231 {
4232         struct ql_adapter *qdev = netdev_priv(ndev);
4233         int status;
4234
4235         if (ndev->mtu == 1500 && new_mtu == 9000) {
4236                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4237         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4238                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4239         } else
4240                 return -EINVAL;
4241
4242         queue_delayed_work(qdev->workqueue,
4243                         &qdev->mpi_port_cfg_work, 3*HZ);
4244
4245         ndev->mtu = new_mtu;
4246
4247         if (!netif_running(qdev->ndev)) {
4248                 return 0;
4249         }
4250
4251         status = ql_change_rx_buffers(qdev);
4252         if (status) {
4253                 netif_err(qdev, ifup, qdev->ndev,
4254                           "Changing MTU failed.\n");
4255         }
4256
4257         return status;
4258 }
4259
4260 static struct net_device_stats *qlge_get_stats(struct net_device
4261                                                *ndev)
4262 {
4263         struct ql_adapter *qdev = netdev_priv(ndev);
4264         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4265         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4266         unsigned long pkts, mcast, dropped, errors, bytes;
4267         int i;
4268
4269         /* Get RX stats. */
4270         pkts = mcast = dropped = errors = bytes = 0;
4271         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4272                         pkts += rx_ring->rx_packets;
4273                         bytes += rx_ring->rx_bytes;
4274                         dropped += rx_ring->rx_dropped;
4275                         errors += rx_ring->rx_errors;
4276                         mcast += rx_ring->rx_multicast;
4277         }
4278         ndev->stats.rx_packets = pkts;
4279         ndev->stats.rx_bytes = bytes;
4280         ndev->stats.rx_dropped = dropped;
4281         ndev->stats.rx_errors = errors;
4282         ndev->stats.multicast = mcast;
4283
4284         /* Get TX stats. */
4285         pkts = errors = bytes = 0;
4286         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4287                         pkts += tx_ring->tx_packets;
4288                         bytes += tx_ring->tx_bytes;
4289                         errors += tx_ring->tx_errors;
4290         }
4291         ndev->stats.tx_packets = pkts;
4292         ndev->stats.tx_bytes = bytes;
4293         ndev->stats.tx_errors = errors;
4294         return &ndev->stats;
4295 }
4296
4297 static void qlge_set_multicast_list(struct net_device *ndev)
4298 {
4299         struct ql_adapter *qdev = netdev_priv(ndev);
4300         struct netdev_hw_addr *ha;
4301         int i, status;
4302
4303         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4304         if (status)
4305                 return;
4306         /*
4307          * Set or clear promiscuous mode if a
4308          * transition is taking place.
4309          */
4310         if (ndev->flags & IFF_PROMISC) {
4311                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4312                         if (ql_set_routing_reg
4313                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4314                                 netif_err(qdev, hw, qdev->ndev,
4315                                           "Failed to set promiscuous mode.\n");
4316                         } else {
4317                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4318                         }
4319                 }
4320         } else {
4321                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4322                         if (ql_set_routing_reg
4323                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4324                                 netif_err(qdev, hw, qdev->ndev,
4325                                           "Failed to clear promiscuous mode.\n");
4326                         } else {
4327                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4328                         }
4329                 }
4330         }
4331
4332         /*
4333          * Set or clear all multicast mode if a
4334          * transition is taking place.
4335          */
4336         if ((ndev->flags & IFF_ALLMULTI) ||
4337             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4338                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4339                         if (ql_set_routing_reg
4340                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4341                                 netif_err(qdev, hw, qdev->ndev,
4342                                           "Failed to set all-multi mode.\n");
4343                         } else {
4344                                 set_bit(QL_ALLMULTI, &qdev->flags);
4345                         }
4346                 }
4347         } else {
4348                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4349                         if (ql_set_routing_reg
4350                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4351                                 netif_err(qdev, hw, qdev->ndev,
4352                                           "Failed to clear all-multi mode.\n");
4353                         } else {
4354                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4355                         }
4356                 }
4357         }
4358
4359         if (!netdev_mc_empty(ndev)) {
4360                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4361                 if (status)
4362                         goto exit;
4363                 i = 0;
4364                 netdev_for_each_mc_addr(ha, ndev) {
4365                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4366                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4367                                 netif_err(qdev, hw, qdev->ndev,
4368                                           "Failed to loadmulticast address.\n");
4369                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4370                                 goto exit;
4371                         }
4372                         i++;
4373                 }
4374                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4375                 if (ql_set_routing_reg
4376                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4377                         netif_err(qdev, hw, qdev->ndev,
4378                                   "Failed to set multicast match mode.\n");
4379                 } else {
4380                         set_bit(QL_ALLMULTI, &qdev->flags);
4381                 }
4382         }
4383 exit:
4384         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4385 }
4386
4387 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4388 {
4389         struct ql_adapter *qdev = netdev_priv(ndev);
4390         struct sockaddr *addr = p;
4391         int status;
4392
4393         if (!is_valid_ether_addr(addr->sa_data))
4394                 return -EADDRNOTAVAIL;
4395         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4396         /* Update local copy of current mac address. */
4397         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4398
4399         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4400         if (status)
4401                 return status;
4402         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4403                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4404         if (status)
4405                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4406         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4407         return status;
4408 }
4409
4410 static void qlge_tx_timeout(struct net_device *ndev)
4411 {
4412         struct ql_adapter *qdev = netdev_priv(ndev);
4413         ql_queue_asic_error(qdev);
4414 }
4415
4416 static void ql_asic_reset_work(struct work_struct *work)
4417 {
4418         struct ql_adapter *qdev =
4419             container_of(work, struct ql_adapter, asic_reset_work.work);
4420         int status;
4421         rtnl_lock();
4422         status = ql_adapter_down(qdev);
4423         if (status)
4424                 goto error;
4425
4426         status = ql_adapter_up(qdev);
4427         if (status)
4428                 goto error;
4429
4430         /* Restore rx mode. */
4431         clear_bit(QL_ALLMULTI, &qdev->flags);
4432         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4433         qlge_set_multicast_list(qdev->ndev);
4434
4435         rtnl_unlock();
4436         return;
4437 error:
4438         netif_alert(qdev, ifup, qdev->ndev,
4439                     "Driver up/down cycle failed, closing device\n");
4440
4441         set_bit(QL_ADAPTER_UP, &qdev->flags);
4442         dev_close(qdev->ndev);
4443         rtnl_unlock();
4444 }
4445
4446 static const struct nic_operations qla8012_nic_ops = {
4447         .get_flash              = ql_get_8012_flash_params,
4448         .port_initialize        = ql_8012_port_initialize,
4449 };
4450
4451 static const struct nic_operations qla8000_nic_ops = {
4452         .get_flash              = ql_get_8000_flash_params,
4453         .port_initialize        = ql_8000_port_initialize,
4454 };
4455
4456 /* Find the pcie function number for the other NIC
4457  * on this chip.  Since both NIC functions share a
4458  * common firmware we have the lowest enabled function
4459  * do any common work.  Examples would be resetting
4460  * after a fatal firmware error, or doing a firmware
4461  * coredump.
4462  */
4463 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4464 {
4465         int status = 0;
4466         u32 temp;
4467         u32 nic_func1, nic_func2;
4468
4469         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4470                         &temp);
4471         if (status)
4472                 return status;
4473
4474         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4475                         MPI_TEST_NIC_FUNC_MASK);
4476         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4477                         MPI_TEST_NIC_FUNC_MASK);
4478
4479         if (qdev->func == nic_func1)
4480                 qdev->alt_func = nic_func2;
4481         else if (qdev->func == nic_func2)
4482                 qdev->alt_func = nic_func1;
4483         else
4484                 status = -EIO;
4485
4486         return status;
4487 }
4488
4489 static int ql_get_board_info(struct ql_adapter *qdev)
4490 {
4491         int status;
4492         qdev->func =
4493             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4494         if (qdev->func > 3)
4495                 return -EIO;
4496
4497         status = ql_get_alt_pcie_func(qdev);
4498         if (status)
4499                 return status;
4500
4501         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4502         if (qdev->port) {
4503                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4504                 qdev->port_link_up = STS_PL1;
4505                 qdev->port_init = STS_PI1;
4506                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4507                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4508         } else {
4509                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4510                 qdev->port_link_up = STS_PL0;
4511                 qdev->port_init = STS_PI0;
4512                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4513                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4514         }
4515         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4516         qdev->device_id = qdev->pdev->device;
4517         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4518                 qdev->nic_ops = &qla8012_nic_ops;
4519         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4520                 qdev->nic_ops = &qla8000_nic_ops;
4521         return status;
4522 }
4523
4524 static void ql_release_all(struct pci_dev *pdev)
4525 {
4526         struct net_device *ndev = pci_get_drvdata(pdev);
4527         struct ql_adapter *qdev = netdev_priv(ndev);
4528
4529         if (qdev->workqueue) {
4530                 destroy_workqueue(qdev->workqueue);
4531                 qdev->workqueue = NULL;
4532         }
4533
4534         if (qdev->reg_base)
4535                 iounmap(qdev->reg_base);
4536         if (qdev->doorbell_area)
4537                 iounmap(qdev->doorbell_area);
4538         vfree(qdev->mpi_coredump);
4539         pci_release_regions(pdev);
4540         pci_set_drvdata(pdev, NULL);
4541 }
4542
4543 static int __devinit ql_init_device(struct pci_dev *pdev,
4544                                     struct net_device *ndev, int cards_found)
4545 {
4546         struct ql_adapter *qdev = netdev_priv(ndev);
4547         int err = 0;
4548
4549         memset((void *)qdev, 0, sizeof(*qdev));
4550         err = pci_enable_device(pdev);
4551         if (err) {
4552                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4553                 return err;
4554         }
4555
4556         qdev->ndev = ndev;
4557         qdev->pdev = pdev;
4558         pci_set_drvdata(pdev, ndev);
4559
4560         /* Set PCIe read request size */
4561         err = pcie_set_readrq(pdev, 4096);
4562         if (err) {
4563                 dev_err(&pdev->dev, "Set readrq failed.\n");
4564                 goto err_out1;
4565         }
4566
4567         err = pci_request_regions(pdev, DRV_NAME);
4568         if (err) {
4569                 dev_err(&pdev->dev, "PCI region request failed.\n");
4570                 return err;
4571         }
4572
4573         pci_set_master(pdev);
4574         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4575                 set_bit(QL_DMA64, &qdev->flags);
4576                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4577         } else {
4578                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4579                 if (!err)
4580                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4581         }
4582
4583         if (err) {
4584                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4585                 goto err_out2;
4586         }
4587
4588         /* Set PCIe reset type for EEH to fundamental. */
4589         pdev->needs_freset = 1;
4590         pci_save_state(pdev);
4591         qdev->reg_base =
4592             ioremap_nocache(pci_resource_start(pdev, 1),
4593                             pci_resource_len(pdev, 1));
4594         if (!qdev->reg_base) {
4595                 dev_err(&pdev->dev, "Register mapping failed.\n");
4596                 err = -ENOMEM;
4597                 goto err_out2;
4598         }
4599
4600         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4601         qdev->doorbell_area =
4602             ioremap_nocache(pci_resource_start(pdev, 3),
4603                             pci_resource_len(pdev, 3));
4604         if (!qdev->doorbell_area) {
4605                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4606                 err = -ENOMEM;
4607                 goto err_out2;
4608         }
4609
4610         err = ql_get_board_info(qdev);
4611         if (err) {
4612                 dev_err(&pdev->dev, "Register access failed.\n");
4613                 err = -EIO;
4614                 goto err_out2;
4615         }
4616         qdev->msg_enable = netif_msg_init(debug, default_msg);
4617         spin_lock_init(&qdev->hw_lock);
4618         spin_lock_init(&qdev->stats_lock);
4619
4620         if (qlge_mpi_coredump) {
4621                 qdev->mpi_coredump =
4622                         vmalloc(sizeof(struct ql_mpi_coredump));
4623                 if (qdev->mpi_coredump == NULL) {
4624                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4625                         err = -ENOMEM;
4626                         goto err_out2;
4627                 }
4628                 if (qlge_force_coredump)
4629                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4630         }
4631         /* make sure the EEPROM is good */
4632         err = qdev->nic_ops->get_flash(qdev);
4633         if (err) {
4634                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4635                 goto err_out2;
4636         }
4637
4638         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4639         /* Keep local copy of current mac address. */
4640         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4641
4642         /* Set up the default ring sizes. */
4643         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4644         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4645
4646         /* Set up the coalescing parameters. */
4647         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4648         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4649         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4650         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4651
4652         /*
4653          * Set up the operating parameters.
4654          */
4655         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4656         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4657         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4658         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4659         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4660         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4661         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4662         init_completion(&qdev->ide_completion);
4663         mutex_init(&qdev->mpi_mutex);
4664
4665         if (!cards_found) {
4666                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4667                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4668                          DRV_NAME, DRV_VERSION);
4669         }
4670         return 0;
4671 err_out2:
4672         ql_release_all(pdev);
4673 err_out1:
4674         pci_disable_device(pdev);
4675         return err;
4676 }
4677
4678 static const struct net_device_ops qlge_netdev_ops = {
4679         .ndo_open               = qlge_open,
4680         .ndo_stop               = qlge_close,
4681         .ndo_start_xmit         = qlge_send,
4682         .ndo_change_mtu         = qlge_change_mtu,
4683         .ndo_get_stats          = qlge_get_stats,
4684         .ndo_set_rx_mode        = qlge_set_multicast_list,
4685         .ndo_set_mac_address    = qlge_set_mac_address,
4686         .ndo_validate_addr      = eth_validate_addr,
4687         .ndo_tx_timeout         = qlge_tx_timeout,
4688         .ndo_fix_features       = qlge_fix_features,
4689         .ndo_set_features       = qlge_set_features,
4690         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4691         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4692 };
4693
4694 static void ql_timer(unsigned long data)
4695 {
4696         struct ql_adapter *qdev = (struct ql_adapter *)data;
4697         u32 var = 0;
4698
4699         var = ql_read32(qdev, STS);
4700         if (pci_channel_offline(qdev->pdev)) {
4701                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4702                 return;
4703         }
4704
4705         mod_timer(&qdev->timer, jiffies + (5*HZ));
4706 }
4707
4708 static int __devinit qlge_probe(struct pci_dev *pdev,
4709                                 const struct pci_device_id *pci_entry)
4710 {
4711         struct net_device *ndev = NULL;
4712         struct ql_adapter *qdev = NULL;
4713         static int cards_found = 0;
4714         int err = 0;
4715
4716         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4717                         min(MAX_CPUS, (int)num_online_cpus()));
4718         if (!ndev)
4719                 return -ENOMEM;
4720
4721         err = ql_init_device(pdev, ndev, cards_found);
4722         if (err < 0) {
4723                 free_netdev(ndev);
4724                 return err;
4725         }
4726
4727         qdev = netdev_priv(ndev);
4728         SET_NETDEV_DEV(ndev, &pdev->dev);
4729         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4730                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4731                 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4732         ndev->features = ndev->hw_features |
4733                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4734
4735         if (test_bit(QL_DMA64, &qdev->flags))
4736                 ndev->features |= NETIF_F_HIGHDMA;
4737
4738         /*
4739          * Set up net_device structure.
4740          */
4741         ndev->tx_queue_len = qdev->tx_ring_size;
4742         ndev->irq = pdev->irq;
4743
4744         ndev->netdev_ops = &qlge_netdev_ops;
4745         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4746         ndev->watchdog_timeo = 10 * HZ;
4747
4748         err = register_netdev(ndev);
4749         if (err) {
4750                 dev_err(&pdev->dev, "net device registration failed.\n");
4751                 ql_release_all(pdev);
4752                 pci_disable_device(pdev);
4753                 return err;
4754         }
4755         /* Start up the timer to trigger EEH if
4756          * the bus goes dead
4757          */
4758         init_timer_deferrable(&qdev->timer);
4759         qdev->timer.data = (unsigned long)qdev;
4760         qdev->timer.function = ql_timer;
4761         qdev->timer.expires = jiffies + (5*HZ);
4762         add_timer(&qdev->timer);
4763         ql_link_off(qdev);
4764         ql_display_dev_info(ndev);
4765         atomic_set(&qdev->lb_count, 0);
4766         cards_found++;
4767         return 0;
4768 }
4769
4770 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4771 {
4772         return qlge_send(skb, ndev);
4773 }
4774
4775 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4776 {
4777         return ql_clean_inbound_rx_ring(rx_ring, budget);
4778 }
4779
4780 static void __devexit qlge_remove(struct pci_dev *pdev)
4781 {
4782         struct net_device *ndev = pci_get_drvdata(pdev);
4783         struct ql_adapter *qdev = netdev_priv(ndev);
4784         del_timer_sync(&qdev->timer);
4785         ql_cancel_all_work_sync(qdev);
4786         unregister_netdev(ndev);
4787         ql_release_all(pdev);
4788         pci_disable_device(pdev);
4789         free_netdev(ndev);
4790 }
4791
4792 /* Clean up resources without touching hardware. */
4793 static void ql_eeh_close(struct net_device *ndev)
4794 {
4795         int i;
4796         struct ql_adapter *qdev = netdev_priv(ndev);
4797
4798         if (netif_carrier_ok(ndev)) {
4799                 netif_carrier_off(ndev);
4800                 netif_stop_queue(ndev);
4801         }
4802
4803         /* Disabling the timer */
4804         del_timer_sync(&qdev->timer);
4805         ql_cancel_all_work_sync(qdev);
4806
4807         for (i = 0; i < qdev->rss_ring_count; i++)
4808                 netif_napi_del(&qdev->rx_ring[i].napi);
4809
4810         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4811         ql_tx_ring_clean(qdev);
4812         ql_free_rx_buffers(qdev);
4813         ql_release_adapter_resources(qdev);
4814 }
4815
4816 /*
4817  * This callback is called by the PCI subsystem whenever
4818  * a PCI bus error is detected.
4819  */
4820 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4821                                                enum pci_channel_state state)
4822 {
4823         struct net_device *ndev = pci_get_drvdata(pdev);
4824         struct ql_adapter *qdev = netdev_priv(ndev);
4825
4826         switch (state) {
4827         case pci_channel_io_normal:
4828                 return PCI_ERS_RESULT_CAN_RECOVER;
4829         case pci_channel_io_frozen:
4830                 netif_device_detach(ndev);
4831                 if (netif_running(ndev))
4832                         ql_eeh_close(ndev);
4833                 pci_disable_device(pdev);
4834                 return PCI_ERS_RESULT_NEED_RESET;
4835         case pci_channel_io_perm_failure:
4836                 dev_err(&pdev->dev,
4837                         "%s: pci_channel_io_perm_failure.\n", __func__);
4838                 ql_eeh_close(ndev);
4839                 set_bit(QL_EEH_FATAL, &qdev->flags);
4840                 return PCI_ERS_RESULT_DISCONNECT;
4841         }
4842
4843         /* Request a slot reset. */
4844         return PCI_ERS_RESULT_NEED_RESET;
4845 }
4846
4847 /*
4848  * This callback is called after the PCI buss has been reset.
4849  * Basically, this tries to restart the card from scratch.
4850  * This is a shortened version of the device probe/discovery code,
4851  * it resembles the first-half of the () routine.
4852  */
4853 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4854 {
4855         struct net_device *ndev = pci_get_drvdata(pdev);
4856         struct ql_adapter *qdev = netdev_priv(ndev);
4857
4858         pdev->error_state = pci_channel_io_normal;
4859
4860         pci_restore_state(pdev);
4861         if (pci_enable_device(pdev)) {
4862                 netif_err(qdev, ifup, qdev->ndev,
4863                           "Cannot re-enable PCI device after reset.\n");
4864                 return PCI_ERS_RESULT_DISCONNECT;
4865         }
4866         pci_set_master(pdev);
4867
4868         if (ql_adapter_reset(qdev)) {
4869                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4870                 set_bit(QL_EEH_FATAL, &qdev->flags);
4871                 return PCI_ERS_RESULT_DISCONNECT;
4872         }
4873
4874         return PCI_ERS_RESULT_RECOVERED;
4875 }
4876
4877 static void qlge_io_resume(struct pci_dev *pdev)
4878 {
4879         struct net_device *ndev = pci_get_drvdata(pdev);
4880         struct ql_adapter *qdev = netdev_priv(ndev);
4881         int err = 0;
4882
4883         if (netif_running(ndev)) {
4884                 err = qlge_open(ndev);
4885                 if (err) {
4886                         netif_err(qdev, ifup, qdev->ndev,
4887                                   "Device initialization failed after reset.\n");
4888                         return;
4889                 }
4890         } else {
4891                 netif_err(qdev, ifup, qdev->ndev,
4892                           "Device was not running prior to EEH.\n");
4893         }
4894         mod_timer(&qdev->timer, jiffies + (5*HZ));
4895         netif_device_attach(ndev);
4896 }
4897
4898 static struct pci_error_handlers qlge_err_handler = {
4899         .error_detected = qlge_io_error_detected,
4900         .slot_reset = qlge_io_slot_reset,
4901         .resume = qlge_io_resume,
4902 };
4903
4904 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4905 {
4906         struct net_device *ndev = pci_get_drvdata(pdev);
4907         struct ql_adapter *qdev = netdev_priv(ndev);
4908         int err;
4909
4910         netif_device_detach(ndev);
4911         del_timer_sync(&qdev->timer);
4912
4913         if (netif_running(ndev)) {
4914                 err = ql_adapter_down(qdev);
4915                 if (!err)
4916                         return err;
4917         }
4918
4919         ql_wol(qdev);
4920         err = pci_save_state(pdev);
4921         if (err)
4922                 return err;
4923
4924         pci_disable_device(pdev);
4925
4926         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4927
4928         return 0;
4929 }
4930
4931 #ifdef CONFIG_PM
4932 static int qlge_resume(struct pci_dev *pdev)
4933 {
4934         struct net_device *ndev = pci_get_drvdata(pdev);
4935         struct ql_adapter *qdev = netdev_priv(ndev);
4936         int err;
4937
4938         pci_set_power_state(pdev, PCI_D0);
4939         pci_restore_state(pdev);
4940         err = pci_enable_device(pdev);
4941         if (err) {
4942                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4943                 return err;
4944         }
4945         pci_set_master(pdev);
4946
4947         pci_enable_wake(pdev, PCI_D3hot, 0);
4948         pci_enable_wake(pdev, PCI_D3cold, 0);
4949
4950         if (netif_running(ndev)) {
4951                 err = ql_adapter_up(qdev);
4952                 if (err)
4953                         return err;
4954         }
4955
4956         mod_timer(&qdev->timer, jiffies + (5*HZ));
4957         netif_device_attach(ndev);
4958
4959         return 0;
4960 }
4961 #endif /* CONFIG_PM */
4962
4963 static void qlge_shutdown(struct pci_dev *pdev)
4964 {
4965         qlge_suspend(pdev, PMSG_SUSPEND);
4966 }
4967
4968 static struct pci_driver qlge_driver = {
4969         .name = DRV_NAME,
4970         .id_table = qlge_pci_tbl,
4971         .probe = qlge_probe,
4972         .remove = __devexit_p(qlge_remove),
4973 #ifdef CONFIG_PM
4974         .suspend = qlge_suspend,
4975         .resume = qlge_resume,
4976 #endif
4977         .shutdown = qlge_shutdown,
4978         .err_handler = &qlge_err_handler
4979 };
4980
4981 static int __init qlge_init_module(void)
4982 {
4983         return pci_register_driver(&qlge_driver);
4984 }
4985
4986 static void __exit qlge_exit(void)
4987 {
4988         pci_unregister_driver(&qlge_driver);
4989 }
4990
4991 module_init(qlge_init_module);
4992 module_exit(qlge_exit);