qlge: make local functions static
[pandora-kernel.git] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
42
43 #include "qlge.h"
44
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg =
54     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER |    */
56     NETIF_MSG_IFDOWN |
57     NETIF_MSG_IFUP |
58     NETIF_MSG_RX_ERR |
59     NETIF_MSG_TX_ERR |
60 /*  NETIF_MSG_TX_QUEUED | */
61 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65 static int debug = 0x00007fff;  /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79                 "Option to enable MPI firmware dump. "
80                 "Default is OFF - Do Not allocate memory. ");
81
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85                 "Option to allow force of firmware core dump. "
86                 "Default is OFF - Do not allow.");
87
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91         /* required last entry */
92         {0,}
93 };
94
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97 static int ql_wol(struct ql_adapter *qdev);
98 static void qlge_set_multicast_list(struct net_device *ndev);
99
100 /* This hardware semaphore causes exclusive access to
101  * resources shared between the NIC driver, MPI firmware,
102  * FCOE firmware and the FC driver.
103  */
104 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
105 {
106         u32 sem_bits = 0;
107
108         switch (sem_mask) {
109         case SEM_XGMAC0_MASK:
110                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
111                 break;
112         case SEM_XGMAC1_MASK:
113                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
114                 break;
115         case SEM_ICB_MASK:
116                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
117                 break;
118         case SEM_MAC_ADDR_MASK:
119                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
120                 break;
121         case SEM_FLASH_MASK:
122                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
123                 break;
124         case SEM_PROBE_MASK:
125                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
126                 break;
127         case SEM_RT_IDX_MASK:
128                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
129                 break;
130         case SEM_PROC_REG_MASK:
131                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
132                 break;
133         default:
134                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
135                 return -EINVAL;
136         }
137
138         ql_write32(qdev, SEM, sem_bits | sem_mask);
139         return !(ql_read32(qdev, SEM) & sem_bits);
140 }
141
142 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
143 {
144         unsigned int wait_count = 30;
145         do {
146                 if (!ql_sem_trylock(qdev, sem_mask))
147                         return 0;
148                 udelay(100);
149         } while (--wait_count);
150         return -ETIMEDOUT;
151 }
152
153 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
154 {
155         ql_write32(qdev, SEM, sem_mask);
156         ql_read32(qdev, SEM);   /* flush */
157 }
158
159 /* This function waits for a specific bit to come ready
160  * in a given register.  It is used mostly by the initialize
161  * process, but is also used in kernel thread API such as
162  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
163  */
164 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
165 {
166         u32 temp;
167         int count = UDELAY_COUNT;
168
169         while (count) {
170                 temp = ql_read32(qdev, reg);
171
172                 /* check for errors */
173                 if (temp & err_bit) {
174                         netif_alert(qdev, probe, qdev->ndev,
175                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
176                                     reg, temp);
177                         return -EIO;
178                 } else if (temp & bit)
179                         return 0;
180                 udelay(UDELAY_DELAY);
181                 count--;
182         }
183         netif_alert(qdev, probe, qdev->ndev,
184                     "Timed out waiting for reg %x to come ready.\n", reg);
185         return -ETIMEDOUT;
186 }
187
188 /* The CFG register is used to download TX and RX control blocks
189  * to the chip. This function waits for an operation to complete.
190  */
191 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
192 {
193         int count = UDELAY_COUNT;
194         u32 temp;
195
196         while (count) {
197                 temp = ql_read32(qdev, CFG);
198                 if (temp & CFG_LE)
199                         return -EIO;
200                 if (!(temp & bit))
201                         return 0;
202                 udelay(UDELAY_DELAY);
203                 count--;
204         }
205         return -ETIMEDOUT;
206 }
207
208
209 /* Used to issue init control blocks to hw. Maps control block,
210  * sets address, triggers download, waits for completion.
211  */
212 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
213                  u16 q_id)
214 {
215         u64 map;
216         int status = 0;
217         int direction;
218         u32 mask;
219         u32 value;
220
221         direction =
222             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
223             PCI_DMA_FROMDEVICE;
224
225         map = pci_map_single(qdev->pdev, ptr, size, direction);
226         if (pci_dma_mapping_error(qdev->pdev, map)) {
227                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
228                 return -ENOMEM;
229         }
230
231         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232         if (status)
233                 return status;
234
235         status = ql_wait_cfg(qdev, bit);
236         if (status) {
237                 netif_err(qdev, ifup, qdev->ndev,
238                           "Timed out waiting for CFG to come ready.\n");
239                 goto exit;
240         }
241
242         ql_write32(qdev, ICB_L, (u32) map);
243         ql_write32(qdev, ICB_H, (u32) (map >> 32));
244
245         mask = CFG_Q_MASK | (bit << 16);
246         value = bit | (q_id << CFG_Q_SHIFT);
247         ql_write32(qdev, CFG, (mask | value));
248
249         /*
250          * Wait for the bit to clear after signaling hw.
251          */
252         status = ql_wait_cfg(qdev, bit);
253 exit:
254         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
255         pci_unmap_single(qdev->pdev, map, size, direction);
256         return status;
257 }
258
259 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
260 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
261                         u32 *value)
262 {
263         u32 offset = 0;
264         int status;
265
266         switch (type) {
267         case MAC_ADDR_TYPE_MULTI_MAC:
268         case MAC_ADDR_TYPE_CAM_MAC:
269                 {
270                         status =
271                             ql_wait_reg_rdy(qdev,
272                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
273                         if (status)
274                                 goto exit;
275                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
277                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278                         status =
279                             ql_wait_reg_rdy(qdev,
280                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
281                         if (status)
282                                 goto exit;
283                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
284                         status =
285                             ql_wait_reg_rdy(qdev,
286                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
287                         if (status)
288                                 goto exit;
289                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
291                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292                         status =
293                             ql_wait_reg_rdy(qdev,
294                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
295                         if (status)
296                                 goto exit;
297                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
299                                 status =
300                                     ql_wait_reg_rdy(qdev,
301                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
302                                 if (status)
303                                         goto exit;
304                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
305                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
306                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
307                                 status =
308                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
309                                                     MAC_ADDR_MR, 0);
310                                 if (status)
311                                         goto exit;
312                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
313                         }
314                         break;
315                 }
316         case MAC_ADDR_TYPE_VLAN:
317         case MAC_ADDR_TYPE_MULTI_FLTR:
318         default:
319                 netif_crit(qdev, ifup, qdev->ndev,
320                            "Address type %d not yet supported.\n", type);
321                 status = -EPERM;
322         }
323 exit:
324         return status;
325 }
326
327 /* Set up a MAC, multicast or VLAN address for the
328  * inbound frame matching.
329  */
330 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
331                                u16 index)
332 {
333         u32 offset = 0;
334         int status = 0;
335
336         switch (type) {
337         case MAC_ADDR_TYPE_MULTI_MAC:
338                 {
339                         u32 upper = (addr[0] << 8) | addr[1];
340                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
341                                         (addr[4] << 8) | (addr[5]);
342
343                         status =
344                                 ql_wait_reg_rdy(qdev,
345                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
346                         if (status)
347                                 goto exit;
348                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
349                                 (index << MAC_ADDR_IDX_SHIFT) |
350                                 type | MAC_ADDR_E);
351                         ql_write32(qdev, MAC_ADDR_DATA, lower);
352                         status =
353                                 ql_wait_reg_rdy(qdev,
354                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
355                         if (status)
356                                 goto exit;
357                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
358                                 (index << MAC_ADDR_IDX_SHIFT) |
359                                 type | MAC_ADDR_E);
360
361                         ql_write32(qdev, MAC_ADDR_DATA, upper);
362                         status =
363                                 ql_wait_reg_rdy(qdev,
364                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
365                         if (status)
366                                 goto exit;
367                         break;
368                 }
369         case MAC_ADDR_TYPE_CAM_MAC:
370                 {
371                         u32 cam_output;
372                         u32 upper = (addr[0] << 8) | addr[1];
373                         u32 lower =
374                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
375                             (addr[5]);
376
377                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
378                                      "Adding %s address %pM at index %d in the CAM.\n",
379                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
380                                      "MULTICAST" : "UNICAST",
381                                      addr, index);
382
383                         status =
384                             ql_wait_reg_rdy(qdev,
385                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
386                         if (status)
387                                 goto exit;
388                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
389                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
390                                    type);       /* type */
391                         ql_write32(qdev, MAC_ADDR_DATA, lower);
392                         status =
393                             ql_wait_reg_rdy(qdev,
394                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
395                         if (status)
396                                 goto exit;
397                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
398                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
399                                    type);       /* type */
400                         ql_write32(qdev, MAC_ADDR_DATA, upper);
401                         status =
402                             ql_wait_reg_rdy(qdev,
403                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
404                         if (status)
405                                 goto exit;
406                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
407                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
408                                    type);       /* type */
409                         /* This field should also include the queue id
410                            and possibly the function id.  Right now we hardcode
411                            the route field to NIC core.
412                          */
413                         cam_output = (CAM_OUT_ROUTE_NIC |
414                                       (qdev->
415                                        func << CAM_OUT_FUNC_SHIFT) |
416                                         (0 << CAM_OUT_CQ_ID_SHIFT));
417                         if (qdev->vlgrp)
418                                 cam_output |= CAM_OUT_RV;
419                         /* route to NIC core */
420                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
421                         break;
422                 }
423         case MAC_ADDR_TYPE_VLAN:
424                 {
425                         u32 enable_bit = *((u32 *) &addr[0]);
426                         /* For VLAN, the addr actually holds a bit that
427                          * either enables or disables the vlan id we are
428                          * addressing. It's either MAC_ADDR_E on or off.
429                          * That's bit-27 we're talking about.
430                          */
431                         netif_info(qdev, ifup, qdev->ndev,
432                                    "%s VLAN ID %d %s the CAM.\n",
433                                    enable_bit ? "Adding" : "Removing",
434                                    index,
435                                    enable_bit ? "to" : "from");
436
437                         status =
438                             ql_wait_reg_rdy(qdev,
439                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
440                         if (status)
441                                 goto exit;
442                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
443                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
444                                    type |       /* type */
445                                    enable_bit); /* enable/disable */
446                         break;
447                 }
448         case MAC_ADDR_TYPE_MULTI_FLTR:
449         default:
450                 netif_crit(qdev, ifup, qdev->ndev,
451                            "Address type %d not yet supported.\n", type);
452                 status = -EPERM;
453         }
454 exit:
455         return status;
456 }
457
458 /* Set or clear MAC address in hardware. We sometimes
459  * have to clear it to prevent wrong frame routing
460  * especially in a bonding environment.
461  */
462 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
463 {
464         int status;
465         char zero_mac_addr[ETH_ALEN];
466         char *addr;
467
468         if (set) {
469                 addr = &qdev->current_mac_addr[0];
470                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
471                              "Set Mac addr %pM\n", addr);
472         } else {
473                 memset(zero_mac_addr, 0, ETH_ALEN);
474                 addr = &zero_mac_addr[0];
475                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
476                              "Clearing MAC address\n");
477         }
478         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
479         if (status)
480                 return status;
481         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
482                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
483         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
484         if (status)
485                 netif_err(qdev, ifup, qdev->ndev,
486                           "Failed to init mac address.\n");
487         return status;
488 }
489
490 void ql_link_on(struct ql_adapter *qdev)
491 {
492         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
493         netif_carrier_on(qdev->ndev);
494         ql_set_mac_addr(qdev, 1);
495 }
496
497 void ql_link_off(struct ql_adapter *qdev)
498 {
499         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
500         netif_carrier_off(qdev->ndev);
501         ql_set_mac_addr(qdev, 0);
502 }
503
504 /* Get a specific frame routing value from the CAM.
505  * Used for debug and reg dump.
506  */
507 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508 {
509         int status = 0;
510
511         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
512         if (status)
513                 goto exit;
514
515         ql_write32(qdev, RT_IDX,
516                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
517         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
518         if (status)
519                 goto exit;
520         *value = ql_read32(qdev, RT_DATA);
521 exit:
522         return status;
523 }
524
525 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
526  * to route different frame types to various inbound queues.  We send broadcast/
527  * multicast/error frames to the default queue for slow handling,
528  * and CAM hit/RSS frames to the fast handling queues.
529  */
530 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
531                               int enable)
532 {
533         int status = -EINVAL; /* Return error if no mask match. */
534         u32 value = 0;
535
536         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
537                      "%s %s mask %s the routing reg.\n",
538                      enable ? "Adding" : "Removing",
539                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
540                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
541                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
542                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
543                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
544                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
545                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
546                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
547                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
548                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
549                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
550                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
551                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
552                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
553                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
554                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
555                      "(Bad index != RT_IDX)",
556                      enable ? "to" : "from");
557
558         switch (mask) {
559         case RT_IDX_CAM_HIT:
560                 {
561                         value = RT_IDX_DST_CAM_Q |      /* dest */
562                             RT_IDX_TYPE_NICQ |  /* type */
563                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
564                         break;
565                 }
566         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
567                 {
568                         value = RT_IDX_DST_DFLT_Q |     /* dest */
569                             RT_IDX_TYPE_NICQ |  /* type */
570                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
571                         break;
572                 }
573         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
574                 {
575                         value = RT_IDX_DST_DFLT_Q |     /* dest */
576                             RT_IDX_TYPE_NICQ |  /* type */
577                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
578                         break;
579                 }
580         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
581                 {
582                         value = RT_IDX_DST_DFLT_Q | /* dest */
583                                 RT_IDX_TYPE_NICQ | /* type */
584                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
585                                 RT_IDX_IDX_SHIFT); /* index */
586                         break;
587                 }
588         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
589                 {
590                         value = RT_IDX_DST_DFLT_Q | /* dest */
591                                 RT_IDX_TYPE_NICQ | /* type */
592                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
593                                 RT_IDX_IDX_SHIFT); /* index */
594                         break;
595                 }
596         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
597                 {
598                         value = RT_IDX_DST_DFLT_Q |     /* dest */
599                             RT_IDX_TYPE_NICQ |  /* type */
600                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
601                         break;
602                 }
603         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
604                 {
605                         value = RT_IDX_DST_DFLT_Q |     /* dest */
606                             RT_IDX_TYPE_NICQ |  /* type */
607                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
608                         break;
609                 }
610         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
611                 {
612                         value = RT_IDX_DST_DFLT_Q |     /* dest */
613                             RT_IDX_TYPE_NICQ |  /* type */
614                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
615                         break;
616                 }
617         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
618                 {
619                         value = RT_IDX_DST_RSS |        /* dest */
620                             RT_IDX_TYPE_NICQ |  /* type */
621                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
622                         break;
623                 }
624         case 0:         /* Clear the E-bit on an entry. */
625                 {
626                         value = RT_IDX_DST_DFLT_Q |     /* dest */
627                             RT_IDX_TYPE_NICQ |  /* type */
628                             (index << RT_IDX_IDX_SHIFT);/* index */
629                         break;
630                 }
631         default:
632                 netif_err(qdev, ifup, qdev->ndev,
633                           "Mask type %d not yet supported.\n", mask);
634                 status = -EPERM;
635                 goto exit;
636         }
637
638         if (value) {
639                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
640                 if (status)
641                         goto exit;
642                 value |= (enable ? RT_IDX_E : 0);
643                 ql_write32(qdev, RT_IDX, value);
644                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
645         }
646 exit:
647         return status;
648 }
649
650 static void ql_enable_interrupts(struct ql_adapter *qdev)
651 {
652         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
653 }
654
655 static void ql_disable_interrupts(struct ql_adapter *qdev)
656 {
657         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
658 }
659
660 /* If we're running with multiple MSI-X vectors then we enable on the fly.
661  * Otherwise, we may have multiple outstanding workers and don't want to
662  * enable until the last one finishes. In this case, the irq_cnt gets
663  * incremented everytime we queue a worker and decremented everytime
664  * a worker finishes.  Once it hits zero we enable the interrupt.
665  */
666 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
667 {
668         u32 var = 0;
669         unsigned long hw_flags = 0;
670         struct intr_context *ctx = qdev->intr_context + intr;
671
672         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
673                 /* Always enable if we're MSIX multi interrupts and
674                  * it's not the default (zeroeth) interrupt.
675                  */
676                 ql_write32(qdev, INTR_EN,
677                            ctx->intr_en_mask);
678                 var = ql_read32(qdev, STS);
679                 return var;
680         }
681
682         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
683         if (atomic_dec_and_test(&ctx->irq_cnt)) {
684                 ql_write32(qdev, INTR_EN,
685                            ctx->intr_en_mask);
686                 var = ql_read32(qdev, STS);
687         }
688         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
689         return var;
690 }
691
692 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
693 {
694         u32 var = 0;
695         struct intr_context *ctx;
696
697         /* HW disables for us if we're MSIX multi interrupts and
698          * it's not the default (zeroeth) interrupt.
699          */
700         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
701                 return 0;
702
703         ctx = qdev->intr_context + intr;
704         spin_lock(&qdev->hw_lock);
705         if (!atomic_read(&ctx->irq_cnt)) {
706                 ql_write32(qdev, INTR_EN,
707                 ctx->intr_dis_mask);
708                 var = ql_read32(qdev, STS);
709         }
710         atomic_inc(&ctx->irq_cnt);
711         spin_unlock(&qdev->hw_lock);
712         return var;
713 }
714
715 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
716 {
717         int i;
718         for (i = 0; i < qdev->intr_count; i++) {
719                 /* The enable call does a atomic_dec_and_test
720                  * and enables only if the result is zero.
721                  * So we precharge it here.
722                  */
723                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
724                         i == 0))
725                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
726                 ql_enable_completion_interrupt(qdev, i);
727         }
728
729 }
730
731 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
732 {
733         int status, i;
734         u16 csum = 0;
735         __le16 *flash = (__le16 *)&qdev->flash;
736
737         status = strncmp((char *)&qdev->flash, str, 4);
738         if (status) {
739                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
740                 return  status;
741         }
742
743         for (i = 0; i < size; i++)
744                 csum += le16_to_cpu(*flash++);
745
746         if (csum)
747                 netif_err(qdev, ifup, qdev->ndev,
748                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
749
750         return csum;
751 }
752
753 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
754 {
755         int status = 0;
756         /* wait for reg to come ready */
757         status = ql_wait_reg_rdy(qdev,
758                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
759         if (status)
760                 goto exit;
761         /* set up for reg read */
762         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
763         /* wait for reg to come ready */
764         status = ql_wait_reg_rdy(qdev,
765                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
766         if (status)
767                 goto exit;
768          /* This data is stored on flash as an array of
769          * __le32.  Since ql_read32() returns cpu endian
770          * we need to swap it back.
771          */
772         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
773 exit:
774         return status;
775 }
776
777 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
778 {
779         u32 i, size;
780         int status;
781         __le32 *p = (__le32 *)&qdev->flash;
782         u32 offset;
783         u8 mac_addr[6];
784
785         /* Get flash offset for function and adjust
786          * for dword access.
787          */
788         if (!qdev->port)
789                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
790         else
791                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
792
793         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
794                 return -ETIMEDOUT;
795
796         size = sizeof(struct flash_params_8000) / sizeof(u32);
797         for (i = 0; i < size; i++, p++) {
798                 status = ql_read_flash_word(qdev, i+offset, p);
799                 if (status) {
800                         netif_err(qdev, ifup, qdev->ndev,
801                                   "Error reading flash.\n");
802                         goto exit;
803                 }
804         }
805
806         status = ql_validate_flash(qdev,
807                         sizeof(struct flash_params_8000) / sizeof(u16),
808                         "8000");
809         if (status) {
810                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
811                 status = -EINVAL;
812                 goto exit;
813         }
814
815         /* Extract either manufacturer or BOFM modified
816          * MAC address.
817          */
818         if (qdev->flash.flash_params_8000.data_type1 == 2)
819                 memcpy(mac_addr,
820                         qdev->flash.flash_params_8000.mac_addr1,
821                         qdev->ndev->addr_len);
822         else
823                 memcpy(mac_addr,
824                         qdev->flash.flash_params_8000.mac_addr,
825                         qdev->ndev->addr_len);
826
827         if (!is_valid_ether_addr(mac_addr)) {
828                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
829                 status = -EINVAL;
830                 goto exit;
831         }
832
833         memcpy(qdev->ndev->dev_addr,
834                 mac_addr,
835                 qdev->ndev->addr_len);
836
837 exit:
838         ql_sem_unlock(qdev, SEM_FLASH_MASK);
839         return status;
840 }
841
842 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
843 {
844         int i;
845         int status;
846         __le32 *p = (__le32 *)&qdev->flash;
847         u32 offset = 0;
848         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
849
850         /* Second function's parameters follow the first
851          * function's.
852          */
853         if (qdev->port)
854                 offset = size;
855
856         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
857                 return -ETIMEDOUT;
858
859         for (i = 0; i < size; i++, p++) {
860                 status = ql_read_flash_word(qdev, i+offset, p);
861                 if (status) {
862                         netif_err(qdev, ifup, qdev->ndev,
863                                   "Error reading flash.\n");
864                         goto exit;
865                 }
866
867         }
868
869         status = ql_validate_flash(qdev,
870                         sizeof(struct flash_params_8012) / sizeof(u16),
871                         "8012");
872         if (status) {
873                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
874                 status = -EINVAL;
875                 goto exit;
876         }
877
878         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
879                 status = -EINVAL;
880                 goto exit;
881         }
882
883         memcpy(qdev->ndev->dev_addr,
884                 qdev->flash.flash_params_8012.mac_addr,
885                 qdev->ndev->addr_len);
886
887 exit:
888         ql_sem_unlock(qdev, SEM_FLASH_MASK);
889         return status;
890 }
891
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893  * register pair.  Each read/write requires us to wait for the ready
894  * bit before reading/writing the data.
895  */
896 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
897 {
898         int status;
899         /* wait for reg to come ready */
900         status = ql_wait_reg_rdy(qdev,
901                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902         if (status)
903                 return status;
904         /* write the data to the data reg */
905         ql_write32(qdev, XGMAC_DATA, data);
906         /* trigger the write */
907         ql_write32(qdev, XGMAC_ADDR, reg);
908         return status;
909 }
910
911 /* xgmac register are located behind the xgmac_addr and xgmac_data
912  * register pair.  Each read/write requires us to wait for the ready
913  * bit before reading/writing the data.
914  */
915 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
916 {
917         int status = 0;
918         /* wait for reg to come ready */
919         status = ql_wait_reg_rdy(qdev,
920                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
921         if (status)
922                 goto exit;
923         /* set up for reg read */
924         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
925         /* wait for reg to come ready */
926         status = ql_wait_reg_rdy(qdev,
927                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
928         if (status)
929                 goto exit;
930         /* get the data */
931         *data = ql_read32(qdev, XGMAC_DATA);
932 exit:
933         return status;
934 }
935
936 /* This is used for reading the 64-bit statistics regs. */
937 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
938 {
939         int status = 0;
940         u32 hi = 0;
941         u32 lo = 0;
942
943         status = ql_read_xgmac_reg(qdev, reg, &lo);
944         if (status)
945                 goto exit;
946
947         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
948         if (status)
949                 goto exit;
950
951         *data = (u64) lo | ((u64) hi << 32);
952
953 exit:
954         return status;
955 }
956
957 static int ql_8000_port_initialize(struct ql_adapter *qdev)
958 {
959         int status;
960         /*
961          * Get MPI firmware version for driver banner
962          * and ethool info.
963          */
964         status = ql_mb_about_fw(qdev);
965         if (status)
966                 goto exit;
967         status = ql_mb_get_fw_state(qdev);
968         if (status)
969                 goto exit;
970         /* Wake up a worker to get/set the TX/RX frame sizes. */
971         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
972 exit:
973         return status;
974 }
975
976 /* Take the MAC Core out of reset.
977  * Enable statistics counting.
978  * Take the transmitter/receiver out of reset.
979  * This functionality may be done in the MPI firmware at a
980  * later date.
981  */
982 static int ql_8012_port_initialize(struct ql_adapter *qdev)
983 {
984         int status = 0;
985         u32 data;
986
987         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
988                 /* Another function has the semaphore, so
989                  * wait for the port init bit to come ready.
990                  */
991                 netif_info(qdev, link, qdev->ndev,
992                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
993                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
994                 if (status) {
995                         netif_crit(qdev, link, qdev->ndev,
996                                    "Port initialize timed out.\n");
997                 }
998                 return status;
999         }
1000
1001         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1002         /* Set the core reset. */
1003         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1004         if (status)
1005                 goto end;
1006         data |= GLOBAL_CFG_RESET;
1007         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1008         if (status)
1009                 goto end;
1010
1011         /* Clear the core reset and turn on jumbo for receiver. */
1012         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1013         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1014         data |= GLOBAL_CFG_TX_STAT_EN;
1015         data |= GLOBAL_CFG_RX_STAT_EN;
1016         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1017         if (status)
1018                 goto end;
1019
1020         /* Enable transmitter, and clear it's reset. */
1021         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1022         if (status)
1023                 goto end;
1024         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1025         data |= TX_CFG_EN;      /* Enable the transmitter. */
1026         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1027         if (status)
1028                 goto end;
1029
1030         /* Enable receiver and clear it's reset. */
1031         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1032         if (status)
1033                 goto end;
1034         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1035         data |= RX_CFG_EN;      /* Enable the receiver. */
1036         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1037         if (status)
1038                 goto end;
1039
1040         /* Turn on jumbo. */
1041         status =
1042             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1043         if (status)
1044                 goto end;
1045         status =
1046             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1047         if (status)
1048                 goto end;
1049
1050         /* Signal to the world that the port is enabled.        */
1051         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1052 end:
1053         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1054         return status;
1055 }
1056
1057 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1058 {
1059         return PAGE_SIZE << qdev->lbq_buf_order;
1060 }
1061
1062 /* Get the next large buffer. */
1063 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1064 {
1065         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1066         rx_ring->lbq_curr_idx++;
1067         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1068                 rx_ring->lbq_curr_idx = 0;
1069         rx_ring->lbq_free_cnt++;
1070         return lbq_desc;
1071 }
1072
1073 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1074                 struct rx_ring *rx_ring)
1075 {
1076         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1077
1078         pci_dma_sync_single_for_cpu(qdev->pdev,
1079                                         dma_unmap_addr(lbq_desc, mapaddr),
1080                                     rx_ring->lbq_buf_size,
1081                                         PCI_DMA_FROMDEVICE);
1082
1083         /* If it's the last chunk of our master page then
1084          * we unmap it.
1085          */
1086         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1087                                         == ql_lbq_block_size(qdev))
1088                 pci_unmap_page(qdev->pdev,
1089                                 lbq_desc->p.pg_chunk.map,
1090                                 ql_lbq_block_size(qdev),
1091                                 PCI_DMA_FROMDEVICE);
1092         return lbq_desc;
1093 }
1094
1095 /* Get the next small buffer. */
1096 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1097 {
1098         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1099         rx_ring->sbq_curr_idx++;
1100         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1101                 rx_ring->sbq_curr_idx = 0;
1102         rx_ring->sbq_free_cnt++;
1103         return sbq_desc;
1104 }
1105
1106 /* Update an rx ring index. */
1107 static void ql_update_cq(struct rx_ring *rx_ring)
1108 {
1109         rx_ring->cnsmr_idx++;
1110         rx_ring->curr_entry++;
1111         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1112                 rx_ring->cnsmr_idx = 0;
1113                 rx_ring->curr_entry = rx_ring->cq_base;
1114         }
1115 }
1116
1117 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1118 {
1119         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1120 }
1121
1122 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1123                                                 struct bq_desc *lbq_desc)
1124 {
1125         if (!rx_ring->pg_chunk.page) {
1126                 u64 map;
1127                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1128                                                 GFP_ATOMIC,
1129                                                 qdev->lbq_buf_order);
1130                 if (unlikely(!rx_ring->pg_chunk.page)) {
1131                         netif_err(qdev, drv, qdev->ndev,
1132                                   "page allocation failed.\n");
1133                         return -ENOMEM;
1134                 }
1135                 rx_ring->pg_chunk.offset = 0;
1136                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1137                                         0, ql_lbq_block_size(qdev),
1138                                         PCI_DMA_FROMDEVICE);
1139                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1140                         __free_pages(rx_ring->pg_chunk.page,
1141                                         qdev->lbq_buf_order);
1142                         netif_err(qdev, drv, qdev->ndev,
1143                                   "PCI mapping failed.\n");
1144                         return -ENOMEM;
1145                 }
1146                 rx_ring->pg_chunk.map = map;
1147                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1148         }
1149
1150         /* Copy the current master pg_chunk info
1151          * to the current descriptor.
1152          */
1153         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1154
1155         /* Adjust the master page chunk for next
1156          * buffer get.
1157          */
1158         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1159         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1160                 rx_ring->pg_chunk.page = NULL;
1161                 lbq_desc->p.pg_chunk.last_flag = 1;
1162         } else {
1163                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1164                 get_page(rx_ring->pg_chunk.page);
1165                 lbq_desc->p.pg_chunk.last_flag = 0;
1166         }
1167         return 0;
1168 }
1169 /* Process (refill) a large buffer queue. */
1170 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1171 {
1172         u32 clean_idx = rx_ring->lbq_clean_idx;
1173         u32 start_idx = clean_idx;
1174         struct bq_desc *lbq_desc;
1175         u64 map;
1176         int i;
1177
1178         while (rx_ring->lbq_free_cnt > 32) {
1179                 for (i = 0; i < 16; i++) {
1180                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181                                      "lbq: try cleaning clean_idx = %d.\n",
1182                                      clean_idx);
1183                         lbq_desc = &rx_ring->lbq[clean_idx];
1184                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1185                                 netif_err(qdev, ifup, qdev->ndev,
1186                                           "Could not get a page chunk.\n");
1187                                 return;
1188                         }
1189
1190                         map = lbq_desc->p.pg_chunk.map +
1191                                 lbq_desc->p.pg_chunk.offset;
1192                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1193                         dma_unmap_len_set(lbq_desc, maplen,
1194                                         rx_ring->lbq_buf_size);
1195                                 *lbq_desc->addr = cpu_to_le64(map);
1196
1197                         pci_dma_sync_single_for_device(qdev->pdev, map,
1198                                                 rx_ring->lbq_buf_size,
1199                                                 PCI_DMA_FROMDEVICE);
1200                         clean_idx++;
1201                         if (clean_idx == rx_ring->lbq_len)
1202                                 clean_idx = 0;
1203                 }
1204
1205                 rx_ring->lbq_clean_idx = clean_idx;
1206                 rx_ring->lbq_prod_idx += 16;
1207                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1208                         rx_ring->lbq_prod_idx = 0;
1209                 rx_ring->lbq_free_cnt -= 16;
1210         }
1211
1212         if (start_idx != clean_idx) {
1213                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1214                              "lbq: updating prod idx = %d.\n",
1215                              rx_ring->lbq_prod_idx);
1216                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1217                                 rx_ring->lbq_prod_idx_db_reg);
1218         }
1219 }
1220
1221 /* Process (refill) a small buffer queue. */
1222 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1223 {
1224         u32 clean_idx = rx_ring->sbq_clean_idx;
1225         u32 start_idx = clean_idx;
1226         struct bq_desc *sbq_desc;
1227         u64 map;
1228         int i;
1229
1230         while (rx_ring->sbq_free_cnt > 16) {
1231                 for (i = 0; i < 16; i++) {
1232                         sbq_desc = &rx_ring->sbq[clean_idx];
1233                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1234                                      "sbq: try cleaning clean_idx = %d.\n",
1235                                      clean_idx);
1236                         if (sbq_desc->p.skb == NULL) {
1237                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1238                                              qdev->ndev,
1239                                              "sbq: getting new skb for index %d.\n",
1240                                              sbq_desc->index);
1241                                 sbq_desc->p.skb =
1242                                     netdev_alloc_skb(qdev->ndev,
1243                                                      SMALL_BUFFER_SIZE);
1244                                 if (sbq_desc->p.skb == NULL) {
1245                                         netif_err(qdev, probe, qdev->ndev,
1246                                                   "Couldn't get an skb.\n");
1247                                         rx_ring->sbq_clean_idx = clean_idx;
1248                                         return;
1249                                 }
1250                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1251                                 map = pci_map_single(qdev->pdev,
1252                                                      sbq_desc->p.skb->data,
1253                                                      rx_ring->sbq_buf_size,
1254                                                      PCI_DMA_FROMDEVICE);
1255                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1256                                         netif_err(qdev, ifup, qdev->ndev,
1257                                                   "PCI mapping failed.\n");
1258                                         rx_ring->sbq_clean_idx = clean_idx;
1259                                         dev_kfree_skb_any(sbq_desc->p.skb);
1260                                         sbq_desc->p.skb = NULL;
1261                                         return;
1262                                 }
1263                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1264                                 dma_unmap_len_set(sbq_desc, maplen,
1265                                                   rx_ring->sbq_buf_size);
1266                                 *sbq_desc->addr = cpu_to_le64(map);
1267                         }
1268
1269                         clean_idx++;
1270                         if (clean_idx == rx_ring->sbq_len)
1271                                 clean_idx = 0;
1272                 }
1273                 rx_ring->sbq_clean_idx = clean_idx;
1274                 rx_ring->sbq_prod_idx += 16;
1275                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1276                         rx_ring->sbq_prod_idx = 0;
1277                 rx_ring->sbq_free_cnt -= 16;
1278         }
1279
1280         if (start_idx != clean_idx) {
1281                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1282                              "sbq: updating prod idx = %d.\n",
1283                              rx_ring->sbq_prod_idx);
1284                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1285                                 rx_ring->sbq_prod_idx_db_reg);
1286         }
1287 }
1288
1289 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1290                                     struct rx_ring *rx_ring)
1291 {
1292         ql_update_sbq(qdev, rx_ring);
1293         ql_update_lbq(qdev, rx_ring);
1294 }
1295
1296 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1297  * fails at some stage, or from the interrupt when a tx completes.
1298  */
1299 static void ql_unmap_send(struct ql_adapter *qdev,
1300                           struct tx_ring_desc *tx_ring_desc, int mapped)
1301 {
1302         int i;
1303         for (i = 0; i < mapped; i++) {
1304                 if (i == 0 || (i == 7 && mapped > 7)) {
1305                         /*
1306                          * Unmap the skb->data area, or the
1307                          * external sglist (AKA the Outbound
1308                          * Address List (OAL)).
1309                          * If its the zeroeth element, then it's
1310                          * the skb->data area.  If it's the 7th
1311                          * element and there is more than 6 frags,
1312                          * then its an OAL.
1313                          */
1314                         if (i == 7) {
1315                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1316                                              qdev->ndev,
1317                                              "unmapping OAL area.\n");
1318                         }
1319                         pci_unmap_single(qdev->pdev,
1320                                          dma_unmap_addr(&tx_ring_desc->map[i],
1321                                                         mapaddr),
1322                                          dma_unmap_len(&tx_ring_desc->map[i],
1323                                                        maplen),
1324                                          PCI_DMA_TODEVICE);
1325                 } else {
1326                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1327                                      "unmapping frag %d.\n", i);
1328                         pci_unmap_page(qdev->pdev,
1329                                        dma_unmap_addr(&tx_ring_desc->map[i],
1330                                                       mapaddr),
1331                                        dma_unmap_len(&tx_ring_desc->map[i],
1332                                                      maplen), PCI_DMA_TODEVICE);
1333                 }
1334         }
1335
1336 }
1337
1338 /* Map the buffers for this transmit.  This will return
1339  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1340  */
1341 static int ql_map_send(struct ql_adapter *qdev,
1342                        struct ob_mac_iocb_req *mac_iocb_ptr,
1343                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1344 {
1345         int len = skb_headlen(skb);
1346         dma_addr_t map;
1347         int frag_idx, err, map_idx = 0;
1348         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1349         int frag_cnt = skb_shinfo(skb)->nr_frags;
1350
1351         if (frag_cnt) {
1352                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1353                              "frag_cnt = %d.\n", frag_cnt);
1354         }
1355         /*
1356          * Map the skb buffer first.
1357          */
1358         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1359
1360         err = pci_dma_mapping_error(qdev->pdev, map);
1361         if (err) {
1362                 netif_err(qdev, tx_queued, qdev->ndev,
1363                           "PCI mapping failed with error: %d\n", err);
1364
1365                 return NETDEV_TX_BUSY;
1366         }
1367
1368         tbd->len = cpu_to_le32(len);
1369         tbd->addr = cpu_to_le64(map);
1370         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1371         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1372         map_idx++;
1373
1374         /*
1375          * This loop fills the remainder of the 8 address descriptors
1376          * in the IOCB.  If there are more than 7 fragments, then the
1377          * eighth address desc will point to an external list (OAL).
1378          * When this happens, the remainder of the frags will be stored
1379          * in this list.
1380          */
1381         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1382                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1383                 tbd++;
1384                 if (frag_idx == 6 && frag_cnt > 7) {
1385                         /* Let's tack on an sglist.
1386                          * Our control block will now
1387                          * look like this:
1388                          * iocb->seg[0] = skb->data
1389                          * iocb->seg[1] = frag[0]
1390                          * iocb->seg[2] = frag[1]
1391                          * iocb->seg[3] = frag[2]
1392                          * iocb->seg[4] = frag[3]
1393                          * iocb->seg[5] = frag[4]
1394                          * iocb->seg[6] = frag[5]
1395                          * iocb->seg[7] = ptr to OAL (external sglist)
1396                          * oal->seg[0] = frag[6]
1397                          * oal->seg[1] = frag[7]
1398                          * oal->seg[2] = frag[8]
1399                          * oal->seg[3] = frag[9]
1400                          * oal->seg[4] = frag[10]
1401                          *      etc...
1402                          */
1403                         /* Tack on the OAL in the eighth segment of IOCB. */
1404                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1405                                              sizeof(struct oal),
1406                                              PCI_DMA_TODEVICE);
1407                         err = pci_dma_mapping_error(qdev->pdev, map);
1408                         if (err) {
1409                                 netif_err(qdev, tx_queued, qdev->ndev,
1410                                           "PCI mapping outbound address list with error: %d\n",
1411                                           err);
1412                                 goto map_error;
1413                         }
1414
1415                         tbd->addr = cpu_to_le64(map);
1416                         /*
1417                          * The length is the number of fragments
1418                          * that remain to be mapped times the length
1419                          * of our sglist (OAL).
1420                          */
1421                         tbd->len =
1422                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1423                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1424                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1425                                            map);
1426                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1427                                           sizeof(struct oal));
1428                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1429                         map_idx++;
1430                 }
1431
1432                 map =
1433                     pci_map_page(qdev->pdev, frag->page,
1434                                  frag->page_offset, frag->size,
1435                                  PCI_DMA_TODEVICE);
1436
1437                 err = pci_dma_mapping_error(qdev->pdev, map);
1438                 if (err) {
1439                         netif_err(qdev, tx_queued, qdev->ndev,
1440                                   "PCI mapping frags failed with error: %d.\n",
1441                                   err);
1442                         goto map_error;
1443                 }
1444
1445                 tbd->addr = cpu_to_le64(map);
1446                 tbd->len = cpu_to_le32(frag->size);
1447                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449                                   frag->size);
1450
1451         }
1452         /* Save the number of segments we've mapped. */
1453         tx_ring_desc->map_cnt = map_idx;
1454         /* Terminate the last segment. */
1455         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456         return NETDEV_TX_OK;
1457
1458 map_error:
1459         /*
1460          * If the first frag mapping failed, then i will be zero.
1461          * This causes the unmap of the skb->data area.  Otherwise
1462          * we pass in the number of frags that mapped successfully
1463          * so they can be umapped.
1464          */
1465         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466         return NETDEV_TX_BUSY;
1467 }
1468
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471                                         struct rx_ring *rx_ring,
1472                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                         u32 length,
1474                                         u16 vlan_id)
1475 {
1476         struct sk_buff *skb;
1477         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478         struct skb_frag_struct *rx_frag;
1479         int nr_frags;
1480         struct napi_struct *napi = &rx_ring->napi;
1481
1482         napi->dev = qdev->ndev;
1483
1484         skb = napi_get_frags(napi);
1485         if (!skb) {
1486                 netif_err(qdev, drv, qdev->ndev,
1487                           "Couldn't get an skb, exiting.\n");
1488                 rx_ring->rx_dropped++;
1489                 put_page(lbq_desc->p.pg_chunk.page);
1490                 return;
1491         }
1492         prefetch(lbq_desc->p.pg_chunk.va);
1493         rx_frag = skb_shinfo(skb)->frags;
1494         nr_frags = skb_shinfo(skb)->nr_frags;
1495         rx_frag += nr_frags;
1496         rx_frag->page = lbq_desc->p.pg_chunk.page;
1497         rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1498         rx_frag->size = length;
1499
1500         skb->len += length;
1501         skb->data_len += length;
1502         skb->truesize += length;
1503         skb_shinfo(skb)->nr_frags++;
1504
1505         rx_ring->rx_packets++;
1506         rx_ring->rx_bytes += length;
1507         skb->ip_summed = CHECKSUM_UNNECESSARY;
1508         skb_record_rx_queue(skb, rx_ring->cq_id);
1509         if (qdev->vlgrp && (vlan_id != 0xffff))
1510                 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1511         else
1512                 napi_gro_frags(napi);
1513 }
1514
1515 /* Process an inbound completion from an rx ring. */
1516 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517                                         struct rx_ring *rx_ring,
1518                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1519                                         u32 length,
1520                                         u16 vlan_id)
1521 {
1522         struct net_device *ndev = qdev->ndev;
1523         struct sk_buff *skb = NULL;
1524         void *addr;
1525         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526         struct napi_struct *napi = &rx_ring->napi;
1527
1528         skb = netdev_alloc_skb(ndev, length);
1529         if (!skb) {
1530                 netif_err(qdev, drv, qdev->ndev,
1531                           "Couldn't get an skb, need to unwind!.\n");
1532                 rx_ring->rx_dropped++;
1533                 put_page(lbq_desc->p.pg_chunk.page);
1534                 return;
1535         }
1536
1537         addr = lbq_desc->p.pg_chunk.va;
1538         prefetch(addr);
1539
1540
1541         /* Frame error, so drop the packet. */
1542         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1543                 netif_info(qdev, drv, qdev->ndev,
1544                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1545                 rx_ring->rx_errors++;
1546                 goto err_out;
1547         }
1548
1549         /* The max framesize filter on this chip is set higher than
1550          * MTU since FCoE uses 2k frames.
1551          */
1552         if (skb->len > ndev->mtu + ETH_HLEN) {
1553                 netif_err(qdev, drv, qdev->ndev,
1554                           "Segment too small, dropping.\n");
1555                 rx_ring->rx_dropped++;
1556                 goto err_out;
1557         }
1558         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1559         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1561                      length);
1562         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1563                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1564                                 length-ETH_HLEN);
1565         skb->len += length-ETH_HLEN;
1566         skb->data_len += length-ETH_HLEN;
1567         skb->truesize += length-ETH_HLEN;
1568
1569         rx_ring->rx_packets++;
1570         rx_ring->rx_bytes += skb->len;
1571         skb->protocol = eth_type_trans(skb, ndev);
1572         skb_checksum_none_assert(skb);
1573
1574         if (qdev->rx_csum &&
1575                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1576                 /* TCP frame. */
1577                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1578                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1579                                      "TCP checksum done!\n");
1580                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1581                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1582                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1583                         /* Unfragmented ipv4 UDP frame. */
1584                         struct iphdr *iph = (struct iphdr *) skb->data;
1585                         if (!(iph->frag_off &
1586                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1587                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1589                                              qdev->ndev,
1590                                              "TCP checksum done!\n");
1591                         }
1592                 }
1593         }
1594
1595         skb_record_rx_queue(skb, rx_ring->cq_id);
1596         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1597                 if (qdev->vlgrp && (vlan_id != 0xffff))
1598                         vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1599                 else
1600                         napi_gro_receive(napi, skb);
1601         } else {
1602                 if (qdev->vlgrp && (vlan_id != 0xffff))
1603                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1604                 else
1605                         netif_receive_skb(skb);
1606         }
1607         return;
1608 err_out:
1609         dev_kfree_skb_any(skb);
1610         put_page(lbq_desc->p.pg_chunk.page);
1611 }
1612
1613 /* Process an inbound completion from an rx ring. */
1614 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1615                                         struct rx_ring *rx_ring,
1616                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1617                                         u32 length,
1618                                         u16 vlan_id)
1619 {
1620         struct net_device *ndev = qdev->ndev;
1621         struct sk_buff *skb = NULL;
1622         struct sk_buff *new_skb = NULL;
1623         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1624
1625         skb = sbq_desc->p.skb;
1626         /* Allocate new_skb and copy */
1627         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1628         if (new_skb == NULL) {
1629                 netif_err(qdev, probe, qdev->ndev,
1630                           "No skb available, drop the packet.\n");
1631                 rx_ring->rx_dropped++;
1632                 return;
1633         }
1634         skb_reserve(new_skb, NET_IP_ALIGN);
1635         memcpy(skb_put(new_skb, length), skb->data, length);
1636         skb = new_skb;
1637
1638         /* Frame error, so drop the packet. */
1639         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1640                 netif_info(qdev, drv, qdev->ndev,
1641                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1642                 dev_kfree_skb_any(skb);
1643                 rx_ring->rx_errors++;
1644                 return;
1645         }
1646
1647         /* loopback self test for ethtool */
1648         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1649                 ql_check_lb_frame(qdev, skb);
1650                 dev_kfree_skb_any(skb);
1651                 return;
1652         }
1653
1654         /* The max framesize filter on this chip is set higher than
1655          * MTU since FCoE uses 2k frames.
1656          */
1657         if (skb->len > ndev->mtu + ETH_HLEN) {
1658                 dev_kfree_skb_any(skb);
1659                 rx_ring->rx_dropped++;
1660                 return;
1661         }
1662
1663         prefetch(skb->data);
1664         skb->dev = ndev;
1665         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1666                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667                              "%s Multicast.\n",
1668                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1670                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1671                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1672                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1673                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1674         }
1675         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1676                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1677                              "Promiscuous Packet.\n");
1678
1679         rx_ring->rx_packets++;
1680         rx_ring->rx_bytes += skb->len;
1681         skb->protocol = eth_type_trans(skb, ndev);
1682         skb_checksum_none_assert(skb);
1683
1684         /* If rx checksum is on, and there are no
1685          * csum or frame errors.
1686          */
1687         if (qdev->rx_csum &&
1688                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1689                 /* TCP frame. */
1690                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1691                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692                                      "TCP checksum done!\n");
1693                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1694                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1695                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1696                         /* Unfragmented ipv4 UDP frame. */
1697                         struct iphdr *iph = (struct iphdr *) skb->data;
1698                         if (!(iph->frag_off &
1699                                 ntohs(IP_MF|IP_OFFSET))) {
1700                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1701                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1702                                              qdev->ndev,
1703                                              "TCP checksum done!\n");
1704                         }
1705                 }
1706         }
1707
1708         skb_record_rx_queue(skb, rx_ring->cq_id);
1709         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1710                 if (qdev->vlgrp && (vlan_id != 0xffff))
1711                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1712                                                 vlan_id, skb);
1713                 else
1714                         napi_gro_receive(&rx_ring->napi, skb);
1715         } else {
1716                 if (qdev->vlgrp && (vlan_id != 0xffff))
1717                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1718                 else
1719                         netif_receive_skb(skb);
1720         }
1721 }
1722
1723 static void ql_realign_skb(struct sk_buff *skb, int len)
1724 {
1725         void *temp_addr = skb->data;
1726
1727         /* Undo the skb_reserve(skb,32) we did before
1728          * giving to hardware, and realign data on
1729          * a 2-byte boundary.
1730          */
1731         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1732         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1733         skb_copy_to_linear_data(skb, temp_addr,
1734                 (unsigned int)len);
1735 }
1736
1737 /*
1738  * This function builds an skb for the given inbound
1739  * completion.  It will be rewritten for readability in the near
1740  * future, but for not it works well.
1741  */
1742 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1743                                        struct rx_ring *rx_ring,
1744                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1745 {
1746         struct bq_desc *lbq_desc;
1747         struct bq_desc *sbq_desc;
1748         struct sk_buff *skb = NULL;
1749         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1750        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1751
1752         /*
1753          * Handle the header buffer if present.
1754          */
1755         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1756             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1757                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1758                              "Header of %d bytes in small buffer.\n", hdr_len);
1759                 /*
1760                  * Headers fit nicely into a small buffer.
1761                  */
1762                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1763                 pci_unmap_single(qdev->pdev,
1764                                 dma_unmap_addr(sbq_desc, mapaddr),
1765                                 dma_unmap_len(sbq_desc, maplen),
1766                                 PCI_DMA_FROMDEVICE);
1767                 skb = sbq_desc->p.skb;
1768                 ql_realign_skb(skb, hdr_len);
1769                 skb_put(skb, hdr_len);
1770                 sbq_desc->p.skb = NULL;
1771         }
1772
1773         /*
1774          * Handle the data buffer(s).
1775          */
1776         if (unlikely(!length)) {        /* Is there data too? */
1777                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1778                              "No Data buffer in this packet.\n");
1779                 return skb;
1780         }
1781
1782         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1783                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1784                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1785                                      "Headers in small, data of %d bytes in small, combine them.\n",
1786                                      length);
1787                         /*
1788                          * Data is less than small buffer size so it's
1789                          * stuffed in a small buffer.
1790                          * For this case we append the data
1791                          * from the "data" small buffer to the "header" small
1792                          * buffer.
1793                          */
1794                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1795                         pci_dma_sync_single_for_cpu(qdev->pdev,
1796                                                     dma_unmap_addr
1797                                                     (sbq_desc, mapaddr),
1798                                                     dma_unmap_len
1799                                                     (sbq_desc, maplen),
1800                                                     PCI_DMA_FROMDEVICE);
1801                         memcpy(skb_put(skb, length),
1802                                sbq_desc->p.skb->data, length);
1803                         pci_dma_sync_single_for_device(qdev->pdev,
1804                                                        dma_unmap_addr
1805                                                        (sbq_desc,
1806                                                         mapaddr),
1807                                                        dma_unmap_len
1808                                                        (sbq_desc,
1809                                                         maplen),
1810                                                        PCI_DMA_FROMDEVICE);
1811                 } else {
1812                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1813                                      "%d bytes in a single small buffer.\n",
1814                                      length);
1815                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1816                         skb = sbq_desc->p.skb;
1817                         ql_realign_skb(skb, length);
1818                         skb_put(skb, length);
1819                         pci_unmap_single(qdev->pdev,
1820                                          dma_unmap_addr(sbq_desc,
1821                                                         mapaddr),
1822                                          dma_unmap_len(sbq_desc,
1823                                                        maplen),
1824                                          PCI_DMA_FROMDEVICE);
1825                         sbq_desc->p.skb = NULL;
1826                 }
1827         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1828                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1829                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830                                      "Header in small, %d bytes in large. Chain large to small!\n",
1831                                      length);
1832                         /*
1833                          * The data is in a single large buffer.  We
1834                          * chain it to the header buffer's skb and let
1835                          * it rip.
1836                          */
1837                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1840                                      lbq_desc->p.pg_chunk.offset, length);
1841                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1842                                                 lbq_desc->p.pg_chunk.offset,
1843                                                 length);
1844                         skb->len += length;
1845                         skb->data_len += length;
1846                         skb->truesize += length;
1847                 } else {
1848                         /*
1849                          * The headers and data are in a single large buffer. We
1850                          * copy it to a new skb and let it go. This can happen with
1851                          * jumbo mtu on a non-TCP/UDP frame.
1852                          */
1853                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854                         skb = netdev_alloc_skb(qdev->ndev, length);
1855                         if (skb == NULL) {
1856                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1857                                              "No skb available, drop the packet.\n");
1858                                 return NULL;
1859                         }
1860                         pci_unmap_page(qdev->pdev,
1861                                        dma_unmap_addr(lbq_desc,
1862                                                       mapaddr),
1863                                        dma_unmap_len(lbq_desc, maplen),
1864                                        PCI_DMA_FROMDEVICE);
1865                         skb_reserve(skb, NET_IP_ALIGN);
1866                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1868                                      length);
1869                         skb_fill_page_desc(skb, 0,
1870                                                 lbq_desc->p.pg_chunk.page,
1871                                                 lbq_desc->p.pg_chunk.offset,
1872                                                 length);
1873                         skb->len += length;
1874                         skb->data_len += length;
1875                         skb->truesize += length;
1876                         length -= length;
1877                         __pskb_pull_tail(skb,
1878                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1879                                 VLAN_ETH_HLEN : ETH_HLEN);
1880                 }
1881         } else {
1882                 /*
1883                  * The data is in a chain of large buffers
1884                  * pointed to by a small buffer.  We loop
1885                  * thru and chain them to the our small header
1886                  * buffer's skb.
1887                  * frags:  There are 18 max frags and our small
1888                  *         buffer will hold 32 of them. The thing is,
1889                  *         we'll use 3 max for our 9000 byte jumbo
1890                  *         frames.  If the MTU goes up we could
1891                  *          eventually be in trouble.
1892                  */
1893                 int size, i = 0;
1894                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1895                 pci_unmap_single(qdev->pdev,
1896                                  dma_unmap_addr(sbq_desc, mapaddr),
1897                                  dma_unmap_len(sbq_desc, maplen),
1898                                  PCI_DMA_FROMDEVICE);
1899                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1900                         /*
1901                          * This is an non TCP/UDP IP frame, so
1902                          * the headers aren't split into a small
1903                          * buffer.  We have to use the small buffer
1904                          * that contains our sg list as our skb to
1905                          * send upstairs. Copy the sg list here to
1906                          * a local buffer and use it to find the
1907                          * pages to chain.
1908                          */
1909                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1910                                      "%d bytes of headers & data in chain of large.\n",
1911                                      length);
1912                         skb = sbq_desc->p.skb;
1913                         sbq_desc->p.skb = NULL;
1914                         skb_reserve(skb, NET_IP_ALIGN);
1915                 }
1916                 while (length > 0) {
1917                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1918                         size = (length < rx_ring->lbq_buf_size) ? length :
1919                                 rx_ring->lbq_buf_size;
1920
1921                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1922                                      "Adding page %d to skb for %d bytes.\n",
1923                                      i, size);
1924                         skb_fill_page_desc(skb, i,
1925                                                 lbq_desc->p.pg_chunk.page,
1926                                                 lbq_desc->p.pg_chunk.offset,
1927                                                 size);
1928                         skb->len += size;
1929                         skb->data_len += size;
1930                         skb->truesize += size;
1931                         length -= size;
1932                         i++;
1933                 }
1934                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1935                                 VLAN_ETH_HLEN : ETH_HLEN);
1936         }
1937         return skb;
1938 }
1939
1940 /* Process an inbound completion from an rx ring. */
1941 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1942                                    struct rx_ring *rx_ring,
1943                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1944                                    u16 vlan_id)
1945 {
1946         struct net_device *ndev = qdev->ndev;
1947         struct sk_buff *skb = NULL;
1948
1949         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1950
1951         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1952         if (unlikely(!skb)) {
1953                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1954                              "No skb available, drop packet.\n");
1955                 rx_ring->rx_dropped++;
1956                 return;
1957         }
1958
1959         /* Frame error, so drop the packet. */
1960         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1961                 netif_info(qdev, drv, qdev->ndev,
1962                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1963                 dev_kfree_skb_any(skb);
1964                 rx_ring->rx_errors++;
1965                 return;
1966         }
1967
1968         /* The max framesize filter on this chip is set higher than
1969          * MTU since FCoE uses 2k frames.
1970          */
1971         if (skb->len > ndev->mtu + ETH_HLEN) {
1972                 dev_kfree_skb_any(skb);
1973                 rx_ring->rx_dropped++;
1974                 return;
1975         }
1976
1977         /* loopback self test for ethtool */
1978         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1979                 ql_check_lb_frame(qdev, skb);
1980                 dev_kfree_skb_any(skb);
1981                 return;
1982         }
1983
1984         prefetch(skb->data);
1985         skb->dev = ndev;
1986         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1987                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1988                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1989                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1990                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1991                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1992                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1993                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1994                 rx_ring->rx_multicast++;
1995         }
1996         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1997                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1998                              "Promiscuous Packet.\n");
1999         }
2000
2001         skb->protocol = eth_type_trans(skb, ndev);
2002         skb_checksum_none_assert(skb);
2003
2004         /* If rx checksum is on, and there are no
2005          * csum or frame errors.
2006          */
2007         if (qdev->rx_csum &&
2008                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2009                 /* TCP frame. */
2010                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2011                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012                                      "TCP checksum done!\n");
2013                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2014                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2015                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2016                 /* Unfragmented ipv4 UDP frame. */
2017                         struct iphdr *iph = (struct iphdr *) skb->data;
2018                         if (!(iph->frag_off &
2019                                 ntohs(IP_MF|IP_OFFSET))) {
2020                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2021                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2022                                              "TCP checksum done!\n");
2023                         }
2024                 }
2025         }
2026
2027         rx_ring->rx_packets++;
2028         rx_ring->rx_bytes += skb->len;
2029         skb_record_rx_queue(skb, rx_ring->cq_id);
2030         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2031                 if (qdev->vlgrp &&
2032                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2033                         (vlan_id != 0))
2034                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2035                                 vlan_id, skb);
2036                 else
2037                         napi_gro_receive(&rx_ring->napi, skb);
2038         } else {
2039                 if (qdev->vlgrp &&
2040                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2041                         (vlan_id != 0))
2042                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2043                 else
2044                         netif_receive_skb(skb);
2045         }
2046 }
2047
2048 /* Process an inbound completion from an rx ring. */
2049 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2050                                         struct rx_ring *rx_ring,
2051                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2052 {
2053         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2054         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2055                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2056                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2057
2058         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2059
2060         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2061                 /* The data and headers are split into
2062                  * separate buffers.
2063                  */
2064                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2065                                                 vlan_id);
2066         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2067                 /* The data fit in a single small buffer.
2068                  * Allocate a new skb, copy the data and
2069                  * return the buffer to the free pool.
2070                  */
2071                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2072                                                 length, vlan_id);
2073         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2074                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2075                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2076                 /* TCP packet in a page chunk that's been checksummed.
2077                  * Tack it on to our GRO skb and let it go.
2078                  */
2079                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2080                                                 length, vlan_id);
2081         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2082                 /* Non-TCP packet in a page chunk. Allocate an
2083                  * skb, tack it on frags, and send it up.
2084                  */
2085                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2086                                                 length, vlan_id);
2087         } else {
2088                 /* Non-TCP/UDP large frames that span multiple buffers
2089                  * can be processed corrrectly by the split frame logic.
2090                  */
2091                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2092                                                 vlan_id);
2093         }
2094
2095         return (unsigned long)length;
2096 }
2097
2098 /* Process an outbound completion from an rx ring. */
2099 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2100                                    struct ob_mac_iocb_rsp *mac_rsp)
2101 {
2102         struct tx_ring *tx_ring;
2103         struct tx_ring_desc *tx_ring_desc;
2104
2105         QL_DUMP_OB_MAC_RSP(mac_rsp);
2106         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2107         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2108         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2109         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2110         tx_ring->tx_packets++;
2111         dev_kfree_skb(tx_ring_desc->skb);
2112         tx_ring_desc->skb = NULL;
2113
2114         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2115                                         OB_MAC_IOCB_RSP_S |
2116                                         OB_MAC_IOCB_RSP_L |
2117                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2118                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2119                         netif_warn(qdev, tx_done, qdev->ndev,
2120                                    "Total descriptor length did not match transfer length.\n");
2121                 }
2122                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2123                         netif_warn(qdev, tx_done, qdev->ndev,
2124                                    "Frame too short to be valid, not sent.\n");
2125                 }
2126                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2127                         netif_warn(qdev, tx_done, qdev->ndev,
2128                                    "Frame too long, but sent anyway.\n");
2129                 }
2130                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2131                         netif_warn(qdev, tx_done, qdev->ndev,
2132                                    "PCI backplane error. Frame not sent.\n");
2133                 }
2134         }
2135         atomic_inc(&tx_ring->tx_count);
2136 }
2137
2138 /* Fire up a handler to reset the MPI processor. */
2139 void ql_queue_fw_error(struct ql_adapter *qdev)
2140 {
2141         ql_link_off(qdev);
2142         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2143 }
2144
2145 void ql_queue_asic_error(struct ql_adapter *qdev)
2146 {
2147         ql_link_off(qdev);
2148         ql_disable_interrupts(qdev);
2149         /* Clear adapter up bit to signal the recovery
2150          * process that it shouldn't kill the reset worker
2151          * thread
2152          */
2153         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2154         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2155 }
2156
2157 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2158                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2159 {
2160         switch (ib_ae_rsp->event) {
2161         case MGMT_ERR_EVENT:
2162                 netif_err(qdev, rx_err, qdev->ndev,
2163                           "Management Processor Fatal Error.\n");
2164                 ql_queue_fw_error(qdev);
2165                 return;
2166
2167         case CAM_LOOKUP_ERR_EVENT:
2168                 netif_err(qdev, link, qdev->ndev,
2169                           "Multiple CAM hits lookup occurred.\n");
2170                 netif_err(qdev, drv, qdev->ndev,
2171                           "This event shouldn't occur.\n");
2172                 ql_queue_asic_error(qdev);
2173                 return;
2174
2175         case SOFT_ECC_ERROR_EVENT:
2176                 netif_err(qdev, rx_err, qdev->ndev,
2177                           "Soft ECC error detected.\n");
2178                 ql_queue_asic_error(qdev);
2179                 break;
2180
2181         case PCI_ERR_ANON_BUF_RD:
2182                 netif_err(qdev, rx_err, qdev->ndev,
2183                           "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2184                           ib_ae_rsp->q_id);
2185                 ql_queue_asic_error(qdev);
2186                 break;
2187
2188         default:
2189                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2190                           ib_ae_rsp->event);
2191                 ql_queue_asic_error(qdev);
2192                 break;
2193         }
2194 }
2195
2196 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2197 {
2198         struct ql_adapter *qdev = rx_ring->qdev;
2199         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2200         struct ob_mac_iocb_rsp *net_rsp = NULL;
2201         int count = 0;
2202
2203         struct tx_ring *tx_ring;
2204         /* While there are entries in the completion queue. */
2205         while (prod != rx_ring->cnsmr_idx) {
2206
2207                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2210
2211                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2212                 rmb();
2213                 switch (net_rsp->opcode) {
2214
2215                 case OPCODE_OB_MAC_TSO_IOCB:
2216                 case OPCODE_OB_MAC_IOCB:
2217                         ql_process_mac_tx_intr(qdev, net_rsp);
2218                         break;
2219                 default:
2220                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2222                                      net_rsp->opcode);
2223                 }
2224                 count++;
2225                 ql_update_cq(rx_ring);
2226                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2227         }
2228         if (!net_rsp)
2229                 return 0;
2230         ql_write_cq_idx(rx_ring);
2231         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2232         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2233                 if (atomic_read(&tx_ring->queue_stopped) &&
2234                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2235                         /*
2236                          * The queue got stopped because the tx_ring was full.
2237                          * Wake it up, because it's now at least 25% empty.
2238                          */
2239                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2240         }
2241
2242         return count;
2243 }
2244
2245 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2246 {
2247         struct ql_adapter *qdev = rx_ring->qdev;
2248         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2249         struct ql_net_rsp_iocb *net_rsp;
2250         int count = 0;
2251
2252         /* While there are entries in the completion queue. */
2253         while (prod != rx_ring->cnsmr_idx) {
2254
2255                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2257                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2258
2259                 net_rsp = rx_ring->curr_entry;
2260                 rmb();
2261                 switch (net_rsp->opcode) {
2262                 case OPCODE_IB_MAC_IOCB:
2263                         ql_process_mac_rx_intr(qdev, rx_ring,
2264                                                (struct ib_mac_iocb_rsp *)
2265                                                net_rsp);
2266                         break;
2267
2268                 case OPCODE_IB_AE_IOCB:
2269                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2270                                                 net_rsp);
2271                         break;
2272                 default:
2273                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2274                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2275                                      net_rsp->opcode);
2276                         break;
2277                 }
2278                 count++;
2279                 ql_update_cq(rx_ring);
2280                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2281                 if (count == budget)
2282                         break;
2283         }
2284         ql_update_buffer_queues(qdev, rx_ring);
2285         ql_write_cq_idx(rx_ring);
2286         return count;
2287 }
2288
2289 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2290 {
2291         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2292         struct ql_adapter *qdev = rx_ring->qdev;
2293         struct rx_ring *trx_ring;
2294         int i, work_done = 0;
2295         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2296
2297         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2298                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2299
2300         /* Service the TX rings first.  They start
2301          * right after the RSS rings. */
2302         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2303                 trx_ring = &qdev->rx_ring[i];
2304                 /* If this TX completion ring belongs to this vector and
2305                  * it's not empty then service it.
2306                  */
2307                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2308                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2309                                         trx_ring->cnsmr_idx)) {
2310                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2311                                      "%s: Servicing TX completion ring %d.\n",
2312                                      __func__, trx_ring->cq_id);
2313                         ql_clean_outbound_rx_ring(trx_ring);
2314                 }
2315         }
2316
2317         /*
2318          * Now service the RSS ring if it's active.
2319          */
2320         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2321                                         rx_ring->cnsmr_idx) {
2322                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2323                              "%s: Servicing RX completion ring %d.\n",
2324                              __func__, rx_ring->cq_id);
2325                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2326         }
2327
2328         if (work_done < budget) {
2329                 napi_complete(napi);
2330                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2331         }
2332         return work_done;
2333 }
2334
2335 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2336 {
2337         struct ql_adapter *qdev = netdev_priv(ndev);
2338
2339         qdev->vlgrp = grp;
2340         if (grp) {
2341                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2342                              "Turning on VLAN in NIC_RCV_CFG.\n");
2343                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2344                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2345         } else {
2346                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2347                              "Turning off VLAN in NIC_RCV_CFG.\n");
2348                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349         }
2350 }
2351
2352 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2353 {
2354         struct ql_adapter *qdev = netdev_priv(ndev);
2355         u32 enable_bit = MAC_ADDR_E;
2356         int status;
2357
2358         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359         if (status)
2360                 return;
2361         if (ql_set_mac_addr_reg
2362             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2363                 netif_err(qdev, ifup, qdev->ndev,
2364                           "Failed to init vlan address.\n");
2365         }
2366         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2367 }
2368
2369 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2370 {
2371         struct ql_adapter *qdev = netdev_priv(ndev);
2372         u32 enable_bit = 0;
2373         int status;
2374
2375         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376         if (status)
2377                 return;
2378
2379         if (ql_set_mac_addr_reg
2380             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2381                 netif_err(qdev, ifup, qdev->ndev,
2382                           "Failed to clear vlan address.\n");
2383         }
2384         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2385
2386 }
2387
2388 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2389 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2390 {
2391         struct rx_ring *rx_ring = dev_id;
2392         napi_schedule(&rx_ring->napi);
2393         return IRQ_HANDLED;
2394 }
2395
2396 /* This handles a fatal error, MPI activity, and the default
2397  * rx_ring in an MSI-X multiple vector environment.
2398  * In MSI/Legacy environment it also process the rest of
2399  * the rx_rings.
2400  */
2401 static irqreturn_t qlge_isr(int irq, void *dev_id)
2402 {
2403         struct rx_ring *rx_ring = dev_id;
2404         struct ql_adapter *qdev = rx_ring->qdev;
2405         struct intr_context *intr_context = &qdev->intr_context[0];
2406         u32 var;
2407         int work_done = 0;
2408
2409         spin_lock(&qdev->hw_lock);
2410         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2411                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2412                              "Shared Interrupt, Not ours!\n");
2413                 spin_unlock(&qdev->hw_lock);
2414                 return IRQ_NONE;
2415         }
2416         spin_unlock(&qdev->hw_lock);
2417
2418         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2419
2420         /*
2421          * Check for fatal error.
2422          */
2423         if (var & STS_FE) {
2424                 ql_queue_asic_error(qdev);
2425                 netif_err(qdev, intr, qdev->ndev,
2426                           "Got fatal error, STS = %x.\n", var);
2427                 var = ql_read32(qdev, ERR_STS);
2428                 netif_err(qdev, intr, qdev->ndev,
2429                           "Resetting chip. Error Status Register = 0x%x\n", var);
2430                 return IRQ_HANDLED;
2431         }
2432
2433         /*
2434          * Check MPI processor activity.
2435          */
2436         if ((var & STS_PI) &&
2437                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2438                 /*
2439                  * We've got an async event or mailbox completion.
2440                  * Handle it and clear the source of the interrupt.
2441                  */
2442                 netif_err(qdev, intr, qdev->ndev,
2443                           "Got MPI processor interrupt.\n");
2444                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2445                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2446                 queue_delayed_work_on(smp_processor_id(),
2447                                 qdev->workqueue, &qdev->mpi_work, 0);
2448                 work_done++;
2449         }
2450
2451         /*
2452          * Get the bit-mask that shows the active queues for this
2453          * pass.  Compare it to the queues that this irq services
2454          * and call napi if there's a match.
2455          */
2456         var = ql_read32(qdev, ISR1);
2457         if (var & intr_context->irq_mask) {
2458                 netif_info(qdev, intr, qdev->ndev,
2459                            "Waking handler for rx_ring[0].\n");
2460                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2461                 napi_schedule(&rx_ring->napi);
2462                 work_done++;
2463         }
2464         ql_enable_completion_interrupt(qdev, intr_context->intr);
2465         return work_done ? IRQ_HANDLED : IRQ_NONE;
2466 }
2467
2468 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2469 {
2470
2471         if (skb_is_gso(skb)) {
2472                 int err;
2473                 if (skb_header_cloned(skb)) {
2474                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2475                         if (err)
2476                                 return err;
2477                 }
2478
2479                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2480                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2481                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2482                 mac_iocb_ptr->total_hdrs_len =
2483                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2484                 mac_iocb_ptr->net_trans_offset =
2485                     cpu_to_le16(skb_network_offset(skb) |
2486                                 skb_transport_offset(skb)
2487                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2488                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2489                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2490                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2491                         struct iphdr *iph = ip_hdr(skb);
2492                         iph->check = 0;
2493                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2494                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2495                                                                  iph->daddr, 0,
2496                                                                  IPPROTO_TCP,
2497                                                                  0);
2498                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2499                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2500                         tcp_hdr(skb)->check =
2501                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2502                                              &ipv6_hdr(skb)->daddr,
2503                                              0, IPPROTO_TCP, 0);
2504                 }
2505                 return 1;
2506         }
2507         return 0;
2508 }
2509
2510 static void ql_hw_csum_setup(struct sk_buff *skb,
2511                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2512 {
2513         int len;
2514         struct iphdr *iph = ip_hdr(skb);
2515         __sum16 *check;
2516         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2517         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2518         mac_iocb_ptr->net_trans_offset =
2519                 cpu_to_le16(skb_network_offset(skb) |
2520                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2521
2522         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2523         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2524         if (likely(iph->protocol == IPPROTO_TCP)) {
2525                 check = &(tcp_hdr(skb)->check);
2526                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2527                 mac_iocb_ptr->total_hdrs_len =
2528                     cpu_to_le16(skb_transport_offset(skb) +
2529                                 (tcp_hdr(skb)->doff << 2));
2530         } else {
2531                 check = &(udp_hdr(skb)->check);
2532                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2533                 mac_iocb_ptr->total_hdrs_len =
2534                     cpu_to_le16(skb_transport_offset(skb) +
2535                                 sizeof(struct udphdr));
2536         }
2537         *check = ~csum_tcpudp_magic(iph->saddr,
2538                                     iph->daddr, len, iph->protocol, 0);
2539 }
2540
2541 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2542 {
2543         struct tx_ring_desc *tx_ring_desc;
2544         struct ob_mac_iocb_req *mac_iocb_ptr;
2545         struct ql_adapter *qdev = netdev_priv(ndev);
2546         int tso;
2547         struct tx_ring *tx_ring;
2548         u32 tx_ring_idx = (u32) skb->queue_mapping;
2549
2550         tx_ring = &qdev->tx_ring[tx_ring_idx];
2551
2552         if (skb_padto(skb, ETH_ZLEN))
2553                 return NETDEV_TX_OK;
2554
2555         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2556                 netif_info(qdev, tx_queued, qdev->ndev,
2557                            "%s: shutting down tx queue %d du to lack of resources.\n",
2558                            __func__, tx_ring_idx);
2559                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2560                 atomic_inc(&tx_ring->queue_stopped);
2561                 tx_ring->tx_errors++;
2562                 return NETDEV_TX_BUSY;
2563         }
2564         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2565         mac_iocb_ptr = tx_ring_desc->queue_entry;
2566         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2567
2568         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2569         mac_iocb_ptr->tid = tx_ring_desc->index;
2570         /* We use the upper 32-bits to store the tx queue for this IO.
2571          * When we get the completion we can use it to establish the context.
2572          */
2573         mac_iocb_ptr->txq_idx = tx_ring_idx;
2574         tx_ring_desc->skb = skb;
2575
2576         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2577
2578         if (vlan_tx_tag_present(skb)) {
2579                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2580                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2581                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2582                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2583         }
2584         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2585         if (tso < 0) {
2586                 dev_kfree_skb_any(skb);
2587                 return NETDEV_TX_OK;
2588         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2589                 ql_hw_csum_setup(skb,
2590                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2591         }
2592         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2593                         NETDEV_TX_OK) {
2594                 netif_err(qdev, tx_queued, qdev->ndev,
2595                           "Could not map the segments.\n");
2596                 tx_ring->tx_errors++;
2597                 return NETDEV_TX_BUSY;
2598         }
2599         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2600         tx_ring->prod_idx++;
2601         if (tx_ring->prod_idx == tx_ring->wq_len)
2602                 tx_ring->prod_idx = 0;
2603         wmb();
2604
2605         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2606         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2607                      "tx queued, slot %d, len %d\n",
2608                      tx_ring->prod_idx, skb->len);
2609
2610         atomic_dec(&tx_ring->tx_count);
2611         return NETDEV_TX_OK;
2612 }
2613
2614
2615 static void ql_free_shadow_space(struct ql_adapter *qdev)
2616 {
2617         if (qdev->rx_ring_shadow_reg_area) {
2618                 pci_free_consistent(qdev->pdev,
2619                                     PAGE_SIZE,
2620                                     qdev->rx_ring_shadow_reg_area,
2621                                     qdev->rx_ring_shadow_reg_dma);
2622                 qdev->rx_ring_shadow_reg_area = NULL;
2623         }
2624         if (qdev->tx_ring_shadow_reg_area) {
2625                 pci_free_consistent(qdev->pdev,
2626                                     PAGE_SIZE,
2627                                     qdev->tx_ring_shadow_reg_area,
2628                                     qdev->tx_ring_shadow_reg_dma);
2629                 qdev->tx_ring_shadow_reg_area = NULL;
2630         }
2631 }
2632
2633 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2634 {
2635         qdev->rx_ring_shadow_reg_area =
2636             pci_alloc_consistent(qdev->pdev,
2637                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2638         if (qdev->rx_ring_shadow_reg_area == NULL) {
2639                 netif_err(qdev, ifup, qdev->ndev,
2640                           "Allocation of RX shadow space failed.\n");
2641                 return -ENOMEM;
2642         }
2643         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2644         qdev->tx_ring_shadow_reg_area =
2645             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2646                                  &qdev->tx_ring_shadow_reg_dma);
2647         if (qdev->tx_ring_shadow_reg_area == NULL) {
2648                 netif_err(qdev, ifup, qdev->ndev,
2649                           "Allocation of TX shadow space failed.\n");
2650                 goto err_wqp_sh_area;
2651         }
2652         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2653         return 0;
2654
2655 err_wqp_sh_area:
2656         pci_free_consistent(qdev->pdev,
2657                             PAGE_SIZE,
2658                             qdev->rx_ring_shadow_reg_area,
2659                             qdev->rx_ring_shadow_reg_dma);
2660         return -ENOMEM;
2661 }
2662
2663 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2664 {
2665         struct tx_ring_desc *tx_ring_desc;
2666         int i;
2667         struct ob_mac_iocb_req *mac_iocb_ptr;
2668
2669         mac_iocb_ptr = tx_ring->wq_base;
2670         tx_ring_desc = tx_ring->q;
2671         for (i = 0; i < tx_ring->wq_len; i++) {
2672                 tx_ring_desc->index = i;
2673                 tx_ring_desc->skb = NULL;
2674                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2675                 mac_iocb_ptr++;
2676                 tx_ring_desc++;
2677         }
2678         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2679         atomic_set(&tx_ring->queue_stopped, 0);
2680 }
2681
2682 static void ql_free_tx_resources(struct ql_adapter *qdev,
2683                                  struct tx_ring *tx_ring)
2684 {
2685         if (tx_ring->wq_base) {
2686                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2687                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2688                 tx_ring->wq_base = NULL;
2689         }
2690         kfree(tx_ring->q);
2691         tx_ring->q = NULL;
2692 }
2693
2694 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2695                                  struct tx_ring *tx_ring)
2696 {
2697         tx_ring->wq_base =
2698             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2699                                  &tx_ring->wq_base_dma);
2700
2701         if ((tx_ring->wq_base == NULL) ||
2702             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2703                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2704                 return -ENOMEM;
2705         }
2706         tx_ring->q =
2707             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2708         if (tx_ring->q == NULL)
2709                 goto err;
2710
2711         return 0;
2712 err:
2713         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2714                             tx_ring->wq_base, tx_ring->wq_base_dma);
2715         return -ENOMEM;
2716 }
2717
2718 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2719 {
2720         struct bq_desc *lbq_desc;
2721
2722         uint32_t  curr_idx, clean_idx;
2723
2724         curr_idx = rx_ring->lbq_curr_idx;
2725         clean_idx = rx_ring->lbq_clean_idx;
2726         while (curr_idx != clean_idx) {
2727                 lbq_desc = &rx_ring->lbq[curr_idx];
2728
2729                 if (lbq_desc->p.pg_chunk.last_flag) {
2730                         pci_unmap_page(qdev->pdev,
2731                                 lbq_desc->p.pg_chunk.map,
2732                                 ql_lbq_block_size(qdev),
2733                                        PCI_DMA_FROMDEVICE);
2734                         lbq_desc->p.pg_chunk.last_flag = 0;
2735                 }
2736
2737                 put_page(lbq_desc->p.pg_chunk.page);
2738                 lbq_desc->p.pg_chunk.page = NULL;
2739
2740                 if (++curr_idx == rx_ring->lbq_len)
2741                         curr_idx = 0;
2742
2743         }
2744 }
2745
2746 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2747 {
2748         int i;
2749         struct bq_desc *sbq_desc;
2750
2751         for (i = 0; i < rx_ring->sbq_len; i++) {
2752                 sbq_desc = &rx_ring->sbq[i];
2753                 if (sbq_desc == NULL) {
2754                         netif_err(qdev, ifup, qdev->ndev,
2755                                   "sbq_desc %d is NULL.\n", i);
2756                         return;
2757                 }
2758                 if (sbq_desc->p.skb) {
2759                         pci_unmap_single(qdev->pdev,
2760                                          dma_unmap_addr(sbq_desc, mapaddr),
2761                                          dma_unmap_len(sbq_desc, maplen),
2762                                          PCI_DMA_FROMDEVICE);
2763                         dev_kfree_skb(sbq_desc->p.skb);
2764                         sbq_desc->p.skb = NULL;
2765                 }
2766         }
2767 }
2768
2769 /* Free all large and small rx buffers associated
2770  * with the completion queues for this device.
2771  */
2772 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2773 {
2774         int i;
2775         struct rx_ring *rx_ring;
2776
2777         for (i = 0; i < qdev->rx_ring_count; i++) {
2778                 rx_ring = &qdev->rx_ring[i];
2779                 if (rx_ring->lbq)
2780                         ql_free_lbq_buffers(qdev, rx_ring);
2781                 if (rx_ring->sbq)
2782                         ql_free_sbq_buffers(qdev, rx_ring);
2783         }
2784 }
2785
2786 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2787 {
2788         struct rx_ring *rx_ring;
2789         int i;
2790
2791         for (i = 0; i < qdev->rx_ring_count; i++) {
2792                 rx_ring = &qdev->rx_ring[i];
2793                 if (rx_ring->type != TX_Q)
2794                         ql_update_buffer_queues(qdev, rx_ring);
2795         }
2796 }
2797
2798 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2799                                 struct rx_ring *rx_ring)
2800 {
2801         int i;
2802         struct bq_desc *lbq_desc;
2803         __le64 *bq = rx_ring->lbq_base;
2804
2805         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2806         for (i = 0; i < rx_ring->lbq_len; i++) {
2807                 lbq_desc = &rx_ring->lbq[i];
2808                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2809                 lbq_desc->index = i;
2810                 lbq_desc->addr = bq;
2811                 bq++;
2812         }
2813 }
2814
2815 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2816                                 struct rx_ring *rx_ring)
2817 {
2818         int i;
2819         struct bq_desc *sbq_desc;
2820         __le64 *bq = rx_ring->sbq_base;
2821
2822         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2823         for (i = 0; i < rx_ring->sbq_len; i++) {
2824                 sbq_desc = &rx_ring->sbq[i];
2825                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2826                 sbq_desc->index = i;
2827                 sbq_desc->addr = bq;
2828                 bq++;
2829         }
2830 }
2831
2832 static void ql_free_rx_resources(struct ql_adapter *qdev,
2833                                  struct rx_ring *rx_ring)
2834 {
2835         /* Free the small buffer queue. */
2836         if (rx_ring->sbq_base) {
2837                 pci_free_consistent(qdev->pdev,
2838                                     rx_ring->sbq_size,
2839                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2840                 rx_ring->sbq_base = NULL;
2841         }
2842
2843         /* Free the small buffer queue control blocks. */
2844         kfree(rx_ring->sbq);
2845         rx_ring->sbq = NULL;
2846
2847         /* Free the large buffer queue. */
2848         if (rx_ring->lbq_base) {
2849                 pci_free_consistent(qdev->pdev,
2850                                     rx_ring->lbq_size,
2851                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2852                 rx_ring->lbq_base = NULL;
2853         }
2854
2855         /* Free the large buffer queue control blocks. */
2856         kfree(rx_ring->lbq);
2857         rx_ring->lbq = NULL;
2858
2859         /* Free the rx queue. */
2860         if (rx_ring->cq_base) {
2861                 pci_free_consistent(qdev->pdev,
2862                                     rx_ring->cq_size,
2863                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2864                 rx_ring->cq_base = NULL;
2865         }
2866 }
2867
2868 /* Allocate queues and buffers for this completions queue based
2869  * on the values in the parameter structure. */
2870 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2871                                  struct rx_ring *rx_ring)
2872 {
2873
2874         /*
2875          * Allocate the completion queue for this rx_ring.
2876          */
2877         rx_ring->cq_base =
2878             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2879                                  &rx_ring->cq_base_dma);
2880
2881         if (rx_ring->cq_base == NULL) {
2882                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2883                 return -ENOMEM;
2884         }
2885
2886         if (rx_ring->sbq_len) {
2887                 /*
2888                  * Allocate small buffer queue.
2889                  */
2890                 rx_ring->sbq_base =
2891                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2892                                          &rx_ring->sbq_base_dma);
2893
2894                 if (rx_ring->sbq_base == NULL) {
2895                         netif_err(qdev, ifup, qdev->ndev,
2896                                   "Small buffer queue allocation failed.\n");
2897                         goto err_mem;
2898                 }
2899
2900                 /*
2901                  * Allocate small buffer queue control blocks.
2902                  */
2903                 rx_ring->sbq =
2904                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2905                             GFP_KERNEL);
2906                 if (rx_ring->sbq == NULL) {
2907                         netif_err(qdev, ifup, qdev->ndev,
2908                                   "Small buffer queue control block allocation failed.\n");
2909                         goto err_mem;
2910                 }
2911
2912                 ql_init_sbq_ring(qdev, rx_ring);
2913         }
2914
2915         if (rx_ring->lbq_len) {
2916                 /*
2917                  * Allocate large buffer queue.
2918                  */
2919                 rx_ring->lbq_base =
2920                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2921                                          &rx_ring->lbq_base_dma);
2922
2923                 if (rx_ring->lbq_base == NULL) {
2924                         netif_err(qdev, ifup, qdev->ndev,
2925                                   "Large buffer queue allocation failed.\n");
2926                         goto err_mem;
2927                 }
2928                 /*
2929                  * Allocate large buffer queue control blocks.
2930                  */
2931                 rx_ring->lbq =
2932                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2933                             GFP_KERNEL);
2934                 if (rx_ring->lbq == NULL) {
2935                         netif_err(qdev, ifup, qdev->ndev,
2936                                   "Large buffer queue control block allocation failed.\n");
2937                         goto err_mem;
2938                 }
2939
2940                 ql_init_lbq_ring(qdev, rx_ring);
2941         }
2942
2943         return 0;
2944
2945 err_mem:
2946         ql_free_rx_resources(qdev, rx_ring);
2947         return -ENOMEM;
2948 }
2949
2950 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2951 {
2952         struct tx_ring *tx_ring;
2953         struct tx_ring_desc *tx_ring_desc;
2954         int i, j;
2955
2956         /*
2957          * Loop through all queues and free
2958          * any resources.
2959          */
2960         for (j = 0; j < qdev->tx_ring_count; j++) {
2961                 tx_ring = &qdev->tx_ring[j];
2962                 for (i = 0; i < tx_ring->wq_len; i++) {
2963                         tx_ring_desc = &tx_ring->q[i];
2964                         if (tx_ring_desc && tx_ring_desc->skb) {
2965                                 netif_err(qdev, ifdown, qdev->ndev,
2966                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2967                                           tx_ring_desc->skb, j,
2968                                           tx_ring_desc->index);
2969                                 ql_unmap_send(qdev, tx_ring_desc,
2970                                               tx_ring_desc->map_cnt);
2971                                 dev_kfree_skb(tx_ring_desc->skb);
2972                                 tx_ring_desc->skb = NULL;
2973                         }
2974                 }
2975         }
2976 }
2977
2978 static void ql_free_mem_resources(struct ql_adapter *qdev)
2979 {
2980         int i;
2981
2982         for (i = 0; i < qdev->tx_ring_count; i++)
2983                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2984         for (i = 0; i < qdev->rx_ring_count; i++)
2985                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2986         ql_free_shadow_space(qdev);
2987 }
2988
2989 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2990 {
2991         int i;
2992
2993         /* Allocate space for our shadow registers and such. */
2994         if (ql_alloc_shadow_space(qdev))
2995                 return -ENOMEM;
2996
2997         for (i = 0; i < qdev->rx_ring_count; i++) {
2998                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2999                         netif_err(qdev, ifup, qdev->ndev,
3000                                   "RX resource allocation failed.\n");
3001                         goto err_mem;
3002                 }
3003         }
3004         /* Allocate tx queue resources */
3005         for (i = 0; i < qdev->tx_ring_count; i++) {
3006                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3007                         netif_err(qdev, ifup, qdev->ndev,
3008                                   "TX resource allocation failed.\n");
3009                         goto err_mem;
3010                 }
3011         }
3012         return 0;
3013
3014 err_mem:
3015         ql_free_mem_resources(qdev);
3016         return -ENOMEM;
3017 }
3018
3019 /* Set up the rx ring control block and pass it to the chip.
3020  * The control block is defined as
3021  * "Completion Queue Initialization Control Block", or cqicb.
3022  */
3023 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3024 {
3025         struct cqicb *cqicb = &rx_ring->cqicb;
3026         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3027                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3028         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3029                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3030         void __iomem *doorbell_area =
3031             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3032         int err = 0;
3033         u16 bq_len;
3034         u64 tmp;
3035         __le64 *base_indirect_ptr;
3036         int page_entries;
3037
3038         /* Set up the shadow registers for this ring. */
3039         rx_ring->prod_idx_sh_reg = shadow_reg;
3040         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3041         *rx_ring->prod_idx_sh_reg = 0;
3042         shadow_reg += sizeof(u64);
3043         shadow_reg_dma += sizeof(u64);
3044         rx_ring->lbq_base_indirect = shadow_reg;
3045         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3046         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3047         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3048         rx_ring->sbq_base_indirect = shadow_reg;
3049         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3050
3051         /* PCI doorbell mem area + 0x00 for consumer index register */
3052         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3053         rx_ring->cnsmr_idx = 0;
3054         rx_ring->curr_entry = rx_ring->cq_base;
3055
3056         /* PCI doorbell mem area + 0x04 for valid register */
3057         rx_ring->valid_db_reg = doorbell_area + 0x04;
3058
3059         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3060         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3061
3062         /* PCI doorbell mem area + 0x1c */
3063         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3064
3065         memset((void *)cqicb, 0, sizeof(struct cqicb));
3066         cqicb->msix_vect = rx_ring->irq;
3067
3068         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3069         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3070
3071         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3072
3073         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3074
3075         /*
3076          * Set up the control block load flags.
3077          */
3078         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3079             FLAGS_LV |          /* Load MSI-X vector */
3080             FLAGS_LI;           /* Load irq delay values */
3081         if (rx_ring->lbq_len) {
3082                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3083                 tmp = (u64)rx_ring->lbq_base_dma;
3084                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3085                 page_entries = 0;
3086                 do {
3087                         *base_indirect_ptr = cpu_to_le64(tmp);
3088                         tmp += DB_PAGE_SIZE;
3089                         base_indirect_ptr++;
3090                         page_entries++;
3091                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3092                 cqicb->lbq_addr =
3093                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3094                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3095                         (u16) rx_ring->lbq_buf_size;
3096                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3097                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3098                         (u16) rx_ring->lbq_len;
3099                 cqicb->lbq_len = cpu_to_le16(bq_len);
3100                 rx_ring->lbq_prod_idx = 0;
3101                 rx_ring->lbq_curr_idx = 0;
3102                 rx_ring->lbq_clean_idx = 0;
3103                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3104         }
3105         if (rx_ring->sbq_len) {
3106                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3107                 tmp = (u64)rx_ring->sbq_base_dma;
3108                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3109                 page_entries = 0;
3110                 do {
3111                         *base_indirect_ptr = cpu_to_le64(tmp);
3112                         tmp += DB_PAGE_SIZE;
3113                         base_indirect_ptr++;
3114                         page_entries++;
3115                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3116                 cqicb->sbq_addr =
3117                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3118                 cqicb->sbq_buf_size =
3119                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3120                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3121                         (u16) rx_ring->sbq_len;
3122                 cqicb->sbq_len = cpu_to_le16(bq_len);
3123                 rx_ring->sbq_prod_idx = 0;
3124                 rx_ring->sbq_curr_idx = 0;
3125                 rx_ring->sbq_clean_idx = 0;
3126                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3127         }
3128         switch (rx_ring->type) {
3129         case TX_Q:
3130                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3131                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3132                 break;
3133         case RX_Q:
3134                 /* Inbound completion handling rx_rings run in
3135                  * separate NAPI contexts.
3136                  */
3137                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3138                                64);
3139                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3140                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3141                 break;
3142         default:
3143                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3144                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3145         }
3146         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3147                      "Initializing rx work queue.\n");
3148         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3149                            CFG_LCQ, rx_ring->cq_id);
3150         if (err) {
3151                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3152                 return err;
3153         }
3154         return err;
3155 }
3156
3157 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3158 {
3159         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3160         void __iomem *doorbell_area =
3161             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3162         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3163             (tx_ring->wq_id * sizeof(u64));
3164         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3165             (tx_ring->wq_id * sizeof(u64));
3166         int err = 0;
3167
3168         /*
3169          * Assign doorbell registers for this tx_ring.
3170          */
3171         /* TX PCI doorbell mem area for tx producer index */
3172         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3173         tx_ring->prod_idx = 0;
3174         /* TX PCI doorbell mem area + 0x04 */
3175         tx_ring->valid_db_reg = doorbell_area + 0x04;
3176
3177         /*
3178          * Assign shadow registers for this tx_ring.
3179          */
3180         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3181         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3182
3183         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3184         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3185                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3186         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3187         wqicb->rid = 0;
3188         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3189
3190         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3191
3192         ql_init_tx_ring(qdev, tx_ring);
3193
3194         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3195                            (u16) tx_ring->wq_id);
3196         if (err) {
3197                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3198                 return err;
3199         }
3200         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3201                      "Successfully loaded WQICB.\n");
3202         return err;
3203 }
3204
3205 static void ql_disable_msix(struct ql_adapter *qdev)
3206 {
3207         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3208                 pci_disable_msix(qdev->pdev);
3209                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3210                 kfree(qdev->msi_x_entry);
3211                 qdev->msi_x_entry = NULL;
3212         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3213                 pci_disable_msi(qdev->pdev);
3214                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3215         }
3216 }
3217
3218 /* We start by trying to get the number of vectors
3219  * stored in qdev->intr_count. If we don't get that
3220  * many then we reduce the count and try again.
3221  */
3222 static void ql_enable_msix(struct ql_adapter *qdev)
3223 {
3224         int i, err;
3225
3226         /* Get the MSIX vectors. */
3227         if (qlge_irq_type == MSIX_IRQ) {
3228                 /* Try to alloc space for the msix struct,
3229                  * if it fails then go to MSI/legacy.
3230                  */
3231                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3232                                             sizeof(struct msix_entry),
3233                                             GFP_KERNEL);
3234                 if (!qdev->msi_x_entry) {
3235                         qlge_irq_type = MSI_IRQ;
3236                         goto msi;
3237                 }
3238
3239                 for (i = 0; i < qdev->intr_count; i++)
3240                         qdev->msi_x_entry[i].entry = i;
3241
3242                 /* Loop to get our vectors.  We start with
3243                  * what we want and settle for what we get.
3244                  */
3245                 do {
3246                         err = pci_enable_msix(qdev->pdev,
3247                                 qdev->msi_x_entry, qdev->intr_count);
3248                         if (err > 0)
3249                                 qdev->intr_count = err;
3250                 } while (err > 0);
3251
3252                 if (err < 0) {
3253                         kfree(qdev->msi_x_entry);
3254                         qdev->msi_x_entry = NULL;
3255                         netif_warn(qdev, ifup, qdev->ndev,
3256                                    "MSI-X Enable failed, trying MSI.\n");
3257                         qdev->intr_count = 1;
3258                         qlge_irq_type = MSI_IRQ;
3259                 } else if (err == 0) {
3260                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3261                         netif_info(qdev, ifup, qdev->ndev,
3262                                    "MSI-X Enabled, got %d vectors.\n",
3263                                    qdev->intr_count);
3264                         return;
3265                 }
3266         }
3267 msi:
3268         qdev->intr_count = 1;
3269         if (qlge_irq_type == MSI_IRQ) {
3270                 if (!pci_enable_msi(qdev->pdev)) {
3271                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3272                         netif_info(qdev, ifup, qdev->ndev,
3273                                    "Running with MSI interrupts.\n");
3274                         return;
3275                 }
3276         }
3277         qlge_irq_type = LEG_IRQ;
3278         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3279                      "Running with legacy interrupts.\n");
3280 }
3281
3282 /* Each vector services 1 RSS ring and and 1 or more
3283  * TX completion rings.  This function loops through
3284  * the TX completion rings and assigns the vector that
3285  * will service it.  An example would be if there are
3286  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3287  * This would mean that vector 0 would service RSS ring 0
3288  * and TX competion rings 0,1,2 and 3.  Vector 1 would
3289  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3290  */
3291 static void ql_set_tx_vect(struct ql_adapter *qdev)
3292 {
3293         int i, j, vect;
3294         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3295
3296         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3297                 /* Assign irq vectors to TX rx_rings.*/
3298                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3299                                          i < qdev->rx_ring_count; i++) {
3300                         if (j == tx_rings_per_vector) {
3301                                 vect++;
3302                                 j = 0;
3303                         }
3304                         qdev->rx_ring[i].irq = vect;
3305                         j++;
3306                 }
3307         } else {
3308                 /* For single vector all rings have an irq
3309                  * of zero.
3310                  */
3311                 for (i = 0; i < qdev->rx_ring_count; i++)
3312                         qdev->rx_ring[i].irq = 0;
3313         }
3314 }
3315
3316 /* Set the interrupt mask for this vector.  Each vector
3317  * will service 1 RSS ring and 1 or more TX completion
3318  * rings.  This function sets up a bit mask per vector
3319  * that indicates which rings it services.
3320  */
3321 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3322 {
3323         int j, vect = ctx->intr;
3324         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3325
3326         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327                 /* Add the RSS ring serviced by this vector
3328                  * to the mask.
3329                  */
3330                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3331                 /* Add the TX ring(s) serviced by this vector
3332                  * to the mask. */
3333                 for (j = 0; j < tx_rings_per_vector; j++) {
3334                         ctx->irq_mask |=
3335                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3336                         (vect * tx_rings_per_vector) + j].cq_id);
3337                 }
3338         } else {
3339                 /* For single vector we just shift each queue's
3340                  * ID into the mask.
3341                  */
3342                 for (j = 0; j < qdev->rx_ring_count; j++)
3343                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3344         }
3345 }
3346
3347 /*
3348  * Here we build the intr_context structures based on
3349  * our rx_ring count and intr vector count.
3350  * The intr_context structure is used to hook each vector
3351  * to possibly different handlers.
3352  */
3353 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3354 {
3355         int i = 0;
3356         struct intr_context *intr_context = &qdev->intr_context[0];
3357
3358         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359                 /* Each rx_ring has it's
3360                  * own intr_context since we have separate
3361                  * vectors for each queue.
3362                  */
3363                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3364                         qdev->rx_ring[i].irq = i;
3365                         intr_context->intr = i;
3366                         intr_context->qdev = qdev;
3367                         /* Set up this vector's bit-mask that indicates
3368                          * which queues it services.
3369                          */
3370                         ql_set_irq_mask(qdev, intr_context);
3371                         /*
3372                          * We set up each vectors enable/disable/read bits so
3373                          * there's no bit/mask calculations in the critical path.
3374                          */
3375                         intr_context->intr_en_mask =
3376                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3377                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3378                             | i;
3379                         intr_context->intr_dis_mask =
3380                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3382                             INTR_EN_IHD | i;
3383                         intr_context->intr_read_mask =
3384                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3386                             i;
3387                         if (i == 0) {
3388                                 /* The first vector/queue handles
3389                                  * broadcast/multicast, fatal errors,
3390                                  * and firmware events.  This in addition
3391                                  * to normal inbound NAPI processing.
3392                                  */
3393                                 intr_context->handler = qlge_isr;
3394                                 sprintf(intr_context->name, "%s-rx-%d",
3395                                         qdev->ndev->name, i);
3396                         } else {
3397                                 /*
3398                                  * Inbound queues handle unicast frames only.
3399                                  */
3400                                 intr_context->handler = qlge_msix_rx_isr;
3401                                 sprintf(intr_context->name, "%s-rx-%d",
3402                                         qdev->ndev->name, i);
3403                         }
3404                 }
3405         } else {
3406                 /*
3407                  * All rx_rings use the same intr_context since
3408                  * there is only one vector.
3409                  */
3410                 intr_context->intr = 0;
3411                 intr_context->qdev = qdev;
3412                 /*
3413                  * We set up each vectors enable/disable/read bits so
3414                  * there's no bit/mask calculations in the critical path.
3415                  */
3416                 intr_context->intr_en_mask =
3417                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3418                 intr_context->intr_dis_mask =
3419                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420                     INTR_EN_TYPE_DISABLE;
3421                 intr_context->intr_read_mask =
3422                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3423                 /*
3424                  * Single interrupt means one handler for all rings.
3425                  */
3426                 intr_context->handler = qlge_isr;
3427                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3428                 /* Set up this vector's bit-mask that indicates
3429                  * which queues it services. In this case there is
3430                  * a single vector so it will service all RSS and
3431                  * TX completion rings.
3432                  */
3433                 ql_set_irq_mask(qdev, intr_context);
3434         }
3435         /* Tell the TX completion rings which MSIx vector
3436          * they will be using.
3437          */
3438         ql_set_tx_vect(qdev);
3439 }
3440
3441 static void ql_free_irq(struct ql_adapter *qdev)
3442 {
3443         int i;
3444         struct intr_context *intr_context = &qdev->intr_context[0];
3445
3446         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3447                 if (intr_context->hooked) {
3448                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3449                                 free_irq(qdev->msi_x_entry[i].vector,
3450                                          &qdev->rx_ring[i]);
3451                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3452                                              "freeing msix interrupt %d.\n", i);
3453                         } else {
3454                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3455                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3456                                              "freeing msi interrupt %d.\n", i);
3457                         }
3458                 }
3459         }
3460         ql_disable_msix(qdev);
3461 }
3462
3463 static int ql_request_irq(struct ql_adapter *qdev)
3464 {
3465         int i;
3466         int status = 0;
3467         struct pci_dev *pdev = qdev->pdev;
3468         struct intr_context *intr_context = &qdev->intr_context[0];
3469
3470         ql_resolve_queues_to_irqs(qdev);
3471
3472         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3473                 atomic_set(&intr_context->irq_cnt, 0);
3474                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3475                         status = request_irq(qdev->msi_x_entry[i].vector,
3476                                              intr_context->handler,
3477                                              0,
3478                                              intr_context->name,
3479                                              &qdev->rx_ring[i]);
3480                         if (status) {
3481                                 netif_err(qdev, ifup, qdev->ndev,
3482                                           "Failed request for MSIX interrupt %d.\n",
3483                                           i);
3484                                 goto err_irq;
3485                         } else {
3486                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3487                                              "Hooked intr %d, queue type %s, with name %s.\n",
3488                                              i,
3489                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3490                                              "DEFAULT_Q" :
3491                                              qdev->rx_ring[i].type == TX_Q ?
3492                                              "TX_Q" :
3493                                              qdev->rx_ring[i].type == RX_Q ?
3494                                              "RX_Q" : "",
3495                                              intr_context->name);
3496                         }
3497                 } else {
3498                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3499                                      "trying msi or legacy interrupts.\n");
3500                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501                                      "%s: irq = %d.\n", __func__, pdev->irq);
3502                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3503                                      "%s: context->name = %s.\n", __func__,
3504                                      intr_context->name);
3505                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3506                                      "%s: dev_id = 0x%p.\n", __func__,
3507                                      &qdev->rx_ring[0]);
3508                         status =
3509                             request_irq(pdev->irq, qlge_isr,
3510                                         test_bit(QL_MSI_ENABLED,
3511                                                  &qdev->
3512                                                  flags) ? 0 : IRQF_SHARED,
3513                                         intr_context->name, &qdev->rx_ring[0]);
3514                         if (status)
3515                                 goto err_irq;
3516
3517                         netif_err(qdev, ifup, qdev->ndev,
3518                                   "Hooked intr %d, queue type %s, with name %s.\n",
3519                                   i,
3520                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3521                                   "DEFAULT_Q" :
3522                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3523                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3524                                   intr_context->name);
3525                 }
3526                 intr_context->hooked = 1;
3527         }
3528         return status;
3529 err_irq:
3530         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3531         ql_free_irq(qdev);
3532         return status;
3533 }
3534
3535 static int ql_start_rss(struct ql_adapter *qdev)
3536 {
3537         u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3538                                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3539                                 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3540                                 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3541                                 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3542                                 0xbe, 0xac, 0x01, 0xfa};
3543         struct ricb *ricb = &qdev->ricb;
3544         int status = 0;
3545         int i;
3546         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3547
3548         memset((void *)ricb, 0, sizeof(*ricb));
3549
3550         ricb->base_cq = RSS_L4K;
3551         ricb->flags =
3552                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3553         ricb->mask = cpu_to_le16((u16)(0x3ff));
3554
3555         /*
3556          * Fill out the Indirection Table.
3557          */
3558         for (i = 0; i < 1024; i++)
3559                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3560
3561         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3562         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3563
3564         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3565
3566         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3567         if (status) {
3568                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3569                 return status;
3570         }
3571         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3572                      "Successfully loaded RICB.\n");
3573         return status;
3574 }
3575
3576 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3577 {
3578         int i, status = 0;
3579
3580         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3581         if (status)
3582                 return status;
3583         /* Clear all the entries in the routing table. */
3584         for (i = 0; i < 16; i++) {
3585                 status = ql_set_routing_reg(qdev, i, 0, 0);
3586                 if (status) {
3587                         netif_err(qdev, ifup, qdev->ndev,
3588                                   "Failed to init routing register for CAM packets.\n");
3589                         break;
3590                 }
3591         }
3592         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3593         return status;
3594 }
3595
3596 /* Initialize the frame-to-queue routing. */
3597 static int ql_route_initialize(struct ql_adapter *qdev)
3598 {
3599         int status = 0;
3600
3601         /* Clear all the entries in the routing table. */
3602         status = ql_clear_routing_entries(qdev);
3603         if (status)
3604                 return status;
3605
3606         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3607         if (status)
3608                 return status;
3609
3610         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3611                                                 RT_IDX_IP_CSUM_ERR, 1);
3612         if (status) {
3613                 netif_err(qdev, ifup, qdev->ndev,
3614                         "Failed to init routing register "
3615                         "for IP CSUM error packets.\n");
3616                 goto exit;
3617         }
3618         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3619                                                 RT_IDX_TU_CSUM_ERR, 1);
3620         if (status) {
3621                 netif_err(qdev, ifup, qdev->ndev,
3622                         "Failed to init routing register "
3623                         "for TCP/UDP CSUM error packets.\n");
3624                 goto exit;
3625         }
3626         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3627         if (status) {
3628                 netif_err(qdev, ifup, qdev->ndev,
3629                           "Failed to init routing register for broadcast packets.\n");
3630                 goto exit;
3631         }
3632         /* If we have more than one inbound queue, then turn on RSS in the
3633          * routing block.
3634          */
3635         if (qdev->rss_ring_count > 1) {
3636                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3637                                         RT_IDX_RSS_MATCH, 1);
3638                 if (status) {
3639                         netif_err(qdev, ifup, qdev->ndev,
3640                                   "Failed to init routing register for MATCH RSS packets.\n");
3641                         goto exit;
3642                 }
3643         }
3644
3645         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3646                                     RT_IDX_CAM_HIT, 1);
3647         if (status)
3648                 netif_err(qdev, ifup, qdev->ndev,
3649                           "Failed to init routing register for CAM packets.\n");
3650 exit:
3651         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3652         return status;
3653 }
3654
3655 int ql_cam_route_initialize(struct ql_adapter *qdev)
3656 {
3657         int status, set;
3658
3659         /* If check if the link is up and use to
3660          * determine if we are setting or clearing
3661          * the MAC address in the CAM.
3662          */
3663         set = ql_read32(qdev, STS);
3664         set &= qdev->port_link_up;
3665         status = ql_set_mac_addr(qdev, set);
3666         if (status) {
3667                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3668                 return status;
3669         }
3670
3671         status = ql_route_initialize(qdev);
3672         if (status)
3673                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3674
3675         return status;
3676 }
3677
3678 static int ql_adapter_initialize(struct ql_adapter *qdev)
3679 {
3680         u32 value, mask;
3681         int i;
3682         int status = 0;
3683
3684         /*
3685          * Set up the System register to halt on errors.
3686          */
3687         value = SYS_EFE | SYS_FAE;
3688         mask = value << 16;
3689         ql_write32(qdev, SYS, mask | value);
3690
3691         /* Set the default queue, and VLAN behavior. */
3692         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3693         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3694         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3695
3696         /* Set the MPI interrupt to enabled. */
3697         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3698
3699         /* Enable the function, set pagesize, enable error checking. */
3700         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3701             FSC_EC | FSC_VM_PAGE_4K;
3702         value |= SPLT_SETTING;
3703
3704         /* Set/clear header splitting. */
3705         mask = FSC_VM_PAGESIZE_MASK |
3706             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3707         ql_write32(qdev, FSC, mask | value);
3708
3709         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3710
3711         /* Set RX packet routing to use port/pci function on which the
3712          * packet arrived on in addition to usual frame routing.
3713          * This is helpful on bonding where both interfaces can have
3714          * the same MAC address.
3715          */
3716         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3717         /* Reroute all packets to our Interface.
3718          * They may have been routed to MPI firmware
3719          * due to WOL.
3720          */
3721         value = ql_read32(qdev, MGMT_RCV_CFG);
3722         value &= ~MGMT_RCV_CFG_RM;
3723         mask = 0xffff0000;
3724
3725         /* Sticky reg needs clearing due to WOL. */
3726         ql_write32(qdev, MGMT_RCV_CFG, mask);
3727         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3728
3729         /* Default WOL is enable on Mezz cards */
3730         if (qdev->pdev->subsystem_device == 0x0068 ||
3731                         qdev->pdev->subsystem_device == 0x0180)
3732                 qdev->wol = WAKE_MAGIC;
3733
3734         /* Start up the rx queues. */
3735         for (i = 0; i < qdev->rx_ring_count; i++) {
3736                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3737                 if (status) {
3738                         netif_err(qdev, ifup, qdev->ndev,
3739                                   "Failed to start rx ring[%d].\n", i);
3740                         return status;
3741                 }
3742         }
3743
3744         /* If there is more than one inbound completion queue
3745          * then download a RICB to configure RSS.
3746          */
3747         if (qdev->rss_ring_count > 1) {
3748                 status = ql_start_rss(qdev);
3749                 if (status) {
3750                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3751                         return status;
3752                 }
3753         }
3754
3755         /* Start up the tx queues. */
3756         for (i = 0; i < qdev->tx_ring_count; i++) {
3757                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3758                 if (status) {
3759                         netif_err(qdev, ifup, qdev->ndev,
3760                                   "Failed to start tx ring[%d].\n", i);
3761                         return status;
3762                 }
3763         }
3764
3765         /* Initialize the port and set the max framesize. */
3766         status = qdev->nic_ops->port_initialize(qdev);
3767         if (status)
3768                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3769
3770         /* Set up the MAC address and frame routing filter. */
3771         status = ql_cam_route_initialize(qdev);
3772         if (status) {
3773                 netif_err(qdev, ifup, qdev->ndev,
3774                           "Failed to init CAM/Routing tables.\n");
3775                 return status;
3776         }
3777
3778         /* Start NAPI for the RSS queues. */
3779         for (i = 0; i < qdev->rss_ring_count; i++) {
3780                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3781                              "Enabling NAPI for rx_ring[%d].\n", i);
3782                 napi_enable(&qdev->rx_ring[i].napi);
3783         }
3784
3785         return status;
3786 }
3787
3788 /* Issue soft reset to chip. */
3789 static int ql_adapter_reset(struct ql_adapter *qdev)
3790 {
3791         u32 value;
3792         int status = 0;
3793         unsigned long end_jiffies;
3794
3795         /* Clear all the entries in the routing table. */
3796         status = ql_clear_routing_entries(qdev);
3797         if (status) {
3798                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3799                 return status;
3800         }
3801
3802         end_jiffies = jiffies +
3803                 max((unsigned long)1, usecs_to_jiffies(30));
3804
3805         /* Stop management traffic. */
3806         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3807
3808         /* Wait for the NIC and MGMNT FIFOs to empty. */
3809         ql_wait_fifo_empty(qdev);
3810
3811         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3812
3813         do {
3814                 value = ql_read32(qdev, RST_FO);
3815                 if ((value & RST_FO_FR) == 0)
3816                         break;
3817                 cpu_relax();
3818         } while (time_before(jiffies, end_jiffies));
3819
3820         if (value & RST_FO_FR) {
3821                 netif_err(qdev, ifdown, qdev->ndev,
3822                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3823                 status = -ETIMEDOUT;
3824         }
3825
3826         /* Resume management traffic. */
3827         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3828         return status;
3829 }
3830
3831 static void ql_display_dev_info(struct net_device *ndev)
3832 {
3833         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3834
3835         netif_info(qdev, probe, qdev->ndev,
3836                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3837                    "XG Roll = %d, XG Rev = %d.\n",
3838                    qdev->func,
3839                    qdev->port,
3840                    qdev->chip_rev_id & 0x0000000f,
3841                    qdev->chip_rev_id >> 4 & 0x0000000f,
3842                    qdev->chip_rev_id >> 8 & 0x0000000f,
3843                    qdev->chip_rev_id >> 12 & 0x0000000f);
3844         netif_info(qdev, probe, qdev->ndev,
3845                    "MAC address %pM\n", ndev->dev_addr);
3846 }
3847
3848 static int ql_wol(struct ql_adapter *qdev)
3849 {
3850         int status = 0;
3851         u32 wol = MB_WOL_DISABLE;
3852
3853         /* The CAM is still intact after a reset, but if we
3854          * are doing WOL, then we may need to program the
3855          * routing regs. We would also need to issue the mailbox
3856          * commands to instruct the MPI what to do per the ethtool
3857          * settings.
3858          */
3859
3860         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3861                         WAKE_MCAST | WAKE_BCAST)) {
3862                 netif_err(qdev, ifdown, qdev->ndev,
3863                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3864                           qdev->wol);
3865                 return -EINVAL;
3866         }
3867
3868         if (qdev->wol & WAKE_MAGIC) {
3869                 status = ql_mb_wol_set_magic(qdev, 1);
3870                 if (status) {
3871                         netif_err(qdev, ifdown, qdev->ndev,
3872                                   "Failed to set magic packet on %s.\n",
3873                                   qdev->ndev->name);
3874                         return status;
3875                 } else
3876                         netif_info(qdev, drv, qdev->ndev,
3877                                    "Enabled magic packet successfully on %s.\n",
3878                                    qdev->ndev->name);
3879
3880                 wol |= MB_WOL_MAGIC_PKT;
3881         }
3882
3883         if (qdev->wol) {
3884                 wol |= MB_WOL_MODE_ON;
3885                 status = ql_mb_wol_mode(qdev, wol);
3886                 netif_err(qdev, drv, qdev->ndev,
3887                           "WOL %s (wol code 0x%x) on %s\n",
3888                           (status == 0) ? "Successfully set" : "Failed",
3889                           wol, qdev->ndev->name);
3890         }
3891
3892         return status;
3893 }
3894
3895 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3896 {
3897
3898         /* Don't kill the reset worker thread if we
3899          * are in the process of recovery.
3900          */
3901         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3902                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3903         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3904         cancel_delayed_work_sync(&qdev->mpi_work);
3905         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3906         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3907         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3908 }
3909
3910 static int ql_adapter_down(struct ql_adapter *qdev)
3911 {
3912         int i, status = 0;
3913
3914         ql_link_off(qdev);
3915
3916         ql_cancel_all_work_sync(qdev);
3917
3918         for (i = 0; i < qdev->rss_ring_count; i++)
3919                 napi_disable(&qdev->rx_ring[i].napi);
3920
3921         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3922
3923         ql_disable_interrupts(qdev);
3924
3925         ql_tx_ring_clean(qdev);
3926
3927         /* Call netif_napi_del() from common point.
3928          */
3929         for (i = 0; i < qdev->rss_ring_count; i++)
3930                 netif_napi_del(&qdev->rx_ring[i].napi);
3931
3932         status = ql_adapter_reset(qdev);
3933         if (status)
3934                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3935                           qdev->func);
3936         ql_free_rx_buffers(qdev);
3937
3938         return status;
3939 }
3940
3941 static int ql_adapter_up(struct ql_adapter *qdev)
3942 {
3943         int err = 0;
3944
3945         err = ql_adapter_initialize(qdev);
3946         if (err) {
3947                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3948                 goto err_init;
3949         }
3950         set_bit(QL_ADAPTER_UP, &qdev->flags);
3951         ql_alloc_rx_buffers(qdev);
3952         /* If the port is initialized and the
3953          * link is up the turn on the carrier.
3954          */
3955         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3956                         (ql_read32(qdev, STS) & qdev->port_link_up))
3957                 ql_link_on(qdev);
3958         /* Restore rx mode. */
3959         clear_bit(QL_ALLMULTI, &qdev->flags);
3960         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3961         qlge_set_multicast_list(qdev->ndev);
3962
3963         ql_enable_interrupts(qdev);
3964         ql_enable_all_completion_interrupts(qdev);
3965         netif_tx_start_all_queues(qdev->ndev);
3966
3967         return 0;
3968 err_init:
3969         ql_adapter_reset(qdev);
3970         return err;
3971 }
3972
3973 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3974 {
3975         ql_free_mem_resources(qdev);
3976         ql_free_irq(qdev);
3977 }
3978
3979 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3980 {
3981         int status = 0;
3982
3983         if (ql_alloc_mem_resources(qdev)) {
3984                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3985                 return -ENOMEM;
3986         }
3987         status = ql_request_irq(qdev);
3988         return status;
3989 }
3990
3991 static int qlge_close(struct net_device *ndev)
3992 {
3993         struct ql_adapter *qdev = netdev_priv(ndev);
3994
3995         /* If we hit pci_channel_io_perm_failure
3996          * failure condition, then we already
3997          * brought the adapter down.
3998          */
3999         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4000                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4001                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4002                 return 0;
4003         }
4004
4005         /*
4006          * Wait for device to recover from a reset.
4007          * (Rarely happens, but possible.)
4008          */
4009         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4010                 msleep(1);
4011         ql_adapter_down(qdev);
4012         ql_release_adapter_resources(qdev);
4013         return 0;
4014 }
4015
4016 static int ql_configure_rings(struct ql_adapter *qdev)
4017 {
4018         int i;
4019         struct rx_ring *rx_ring;
4020         struct tx_ring *tx_ring;
4021         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4022         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4023                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4024
4025         qdev->lbq_buf_order = get_order(lbq_buf_len);
4026
4027         /* In a perfect world we have one RSS ring for each CPU
4028          * and each has it's own vector.  To do that we ask for
4029          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4030          * vector count to what we actually get.  We then
4031          * allocate an RSS ring for each.
4032          * Essentially, we are doing min(cpu_count, msix_vector_count).
4033          */
4034         qdev->intr_count = cpu_cnt;
4035         ql_enable_msix(qdev);
4036         /* Adjust the RSS ring count to the actual vector count. */
4037         qdev->rss_ring_count = qdev->intr_count;
4038         qdev->tx_ring_count = cpu_cnt;
4039         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4040
4041         for (i = 0; i < qdev->tx_ring_count; i++) {
4042                 tx_ring = &qdev->tx_ring[i];
4043                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4044                 tx_ring->qdev = qdev;
4045                 tx_ring->wq_id = i;
4046                 tx_ring->wq_len = qdev->tx_ring_size;
4047                 tx_ring->wq_size =
4048                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4049
4050                 /*
4051                  * The completion queue ID for the tx rings start
4052                  * immediately after the rss rings.
4053                  */
4054                 tx_ring->cq_id = qdev->rss_ring_count + i;
4055         }
4056
4057         for (i = 0; i < qdev->rx_ring_count; i++) {
4058                 rx_ring = &qdev->rx_ring[i];
4059                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4060                 rx_ring->qdev = qdev;
4061                 rx_ring->cq_id = i;
4062                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4063                 if (i < qdev->rss_ring_count) {
4064                         /*
4065                          * Inbound (RSS) queues.
4066                          */
4067                         rx_ring->cq_len = qdev->rx_ring_size;
4068                         rx_ring->cq_size =
4069                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4070                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4071                         rx_ring->lbq_size =
4072                             rx_ring->lbq_len * sizeof(__le64);
4073                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4074                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4075                                      "lbq_buf_size %d, order = %d\n",
4076                                      rx_ring->lbq_buf_size,
4077                                      qdev->lbq_buf_order);
4078                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4079                         rx_ring->sbq_size =
4080                             rx_ring->sbq_len * sizeof(__le64);
4081                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4082                         rx_ring->type = RX_Q;
4083                 } else {
4084                         /*
4085                          * Outbound queue handles outbound completions only.
4086                          */
4087                         /* outbound cq is same size as tx_ring it services. */
4088                         rx_ring->cq_len = qdev->tx_ring_size;
4089                         rx_ring->cq_size =
4090                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4091                         rx_ring->lbq_len = 0;
4092                         rx_ring->lbq_size = 0;
4093                         rx_ring->lbq_buf_size = 0;
4094                         rx_ring->sbq_len = 0;
4095                         rx_ring->sbq_size = 0;
4096                         rx_ring->sbq_buf_size = 0;
4097                         rx_ring->type = TX_Q;
4098                 }
4099         }
4100         return 0;
4101 }
4102
4103 static int qlge_open(struct net_device *ndev)
4104 {
4105         int err = 0;
4106         struct ql_adapter *qdev = netdev_priv(ndev);
4107
4108         err = ql_adapter_reset(qdev);
4109         if (err)
4110                 return err;
4111
4112         err = ql_configure_rings(qdev);
4113         if (err)
4114                 return err;
4115
4116         err = ql_get_adapter_resources(qdev);
4117         if (err)
4118                 goto error_up;
4119
4120         err = ql_adapter_up(qdev);
4121         if (err)
4122                 goto error_up;
4123
4124         return err;
4125
4126 error_up:
4127         ql_release_adapter_resources(qdev);
4128         return err;
4129 }
4130
4131 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4132 {
4133         struct rx_ring *rx_ring;
4134         int i, status;
4135         u32 lbq_buf_len;
4136
4137         /* Wait for an oustanding reset to complete. */
4138         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4139                 int i = 3;
4140                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4141                         netif_err(qdev, ifup, qdev->ndev,
4142                                   "Waiting for adapter UP...\n");
4143                         ssleep(1);
4144                 }
4145
4146                 if (!i) {
4147                         netif_err(qdev, ifup, qdev->ndev,
4148                                   "Timed out waiting for adapter UP\n");
4149                         return -ETIMEDOUT;
4150                 }
4151         }
4152
4153         status = ql_adapter_down(qdev);
4154         if (status)
4155                 goto error;
4156
4157         /* Get the new rx buffer size. */
4158         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4159                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4160         qdev->lbq_buf_order = get_order(lbq_buf_len);
4161
4162         for (i = 0; i < qdev->rss_ring_count; i++) {
4163                 rx_ring = &qdev->rx_ring[i];
4164                 /* Set the new size. */
4165                 rx_ring->lbq_buf_size = lbq_buf_len;
4166         }
4167
4168         status = ql_adapter_up(qdev);
4169         if (status)
4170                 goto error;
4171
4172         return status;
4173 error:
4174         netif_alert(qdev, ifup, qdev->ndev,
4175                     "Driver up/down cycle failed, closing device.\n");
4176         set_bit(QL_ADAPTER_UP, &qdev->flags);
4177         dev_close(qdev->ndev);
4178         return status;
4179 }
4180
4181 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4182 {
4183         struct ql_adapter *qdev = netdev_priv(ndev);
4184         int status;
4185
4186         if (ndev->mtu == 1500 && new_mtu == 9000) {
4187                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4188         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4189                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4190         } else
4191                 return -EINVAL;
4192
4193         queue_delayed_work(qdev->workqueue,
4194                         &qdev->mpi_port_cfg_work, 3*HZ);
4195
4196         ndev->mtu = new_mtu;
4197
4198         if (!netif_running(qdev->ndev)) {
4199                 return 0;
4200         }
4201
4202         status = ql_change_rx_buffers(qdev);
4203         if (status) {
4204                 netif_err(qdev, ifup, qdev->ndev,
4205                           "Changing MTU failed.\n");
4206         }
4207
4208         return status;
4209 }
4210
4211 static struct net_device_stats *qlge_get_stats(struct net_device
4212                                                *ndev)
4213 {
4214         struct ql_adapter *qdev = netdev_priv(ndev);
4215         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4216         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4217         unsigned long pkts, mcast, dropped, errors, bytes;
4218         int i;
4219
4220         /* Get RX stats. */
4221         pkts = mcast = dropped = errors = bytes = 0;
4222         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4223                         pkts += rx_ring->rx_packets;
4224                         bytes += rx_ring->rx_bytes;
4225                         dropped += rx_ring->rx_dropped;
4226                         errors += rx_ring->rx_errors;
4227                         mcast += rx_ring->rx_multicast;
4228         }
4229         ndev->stats.rx_packets = pkts;
4230         ndev->stats.rx_bytes = bytes;
4231         ndev->stats.rx_dropped = dropped;
4232         ndev->stats.rx_errors = errors;
4233         ndev->stats.multicast = mcast;
4234
4235         /* Get TX stats. */
4236         pkts = errors = bytes = 0;
4237         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4238                         pkts += tx_ring->tx_packets;
4239                         bytes += tx_ring->tx_bytes;
4240                         errors += tx_ring->tx_errors;
4241         }
4242         ndev->stats.tx_packets = pkts;
4243         ndev->stats.tx_bytes = bytes;
4244         ndev->stats.tx_errors = errors;
4245         return &ndev->stats;
4246 }
4247
4248 static void qlge_set_multicast_list(struct net_device *ndev)
4249 {
4250         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4251         struct netdev_hw_addr *ha;
4252         int i, status;
4253
4254         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4255         if (status)
4256                 return;
4257         /*
4258          * Set or clear promiscuous mode if a
4259          * transition is taking place.
4260          */
4261         if (ndev->flags & IFF_PROMISC) {
4262                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4263                         if (ql_set_routing_reg
4264                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4265                                 netif_err(qdev, hw, qdev->ndev,
4266                                           "Failed to set promiscous mode.\n");
4267                         } else {
4268                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4269                         }
4270                 }
4271         } else {
4272                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4273                         if (ql_set_routing_reg
4274                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4275                                 netif_err(qdev, hw, qdev->ndev,
4276                                           "Failed to clear promiscous mode.\n");
4277                         } else {
4278                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4279                         }
4280                 }
4281         }
4282
4283         /*
4284          * Set or clear all multicast mode if a
4285          * transition is taking place.
4286          */
4287         if ((ndev->flags & IFF_ALLMULTI) ||
4288             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4289                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4290                         if (ql_set_routing_reg
4291                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4292                                 netif_err(qdev, hw, qdev->ndev,
4293                                           "Failed to set all-multi mode.\n");
4294                         } else {
4295                                 set_bit(QL_ALLMULTI, &qdev->flags);
4296                         }
4297                 }
4298         } else {
4299                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4300                         if (ql_set_routing_reg
4301                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4302                                 netif_err(qdev, hw, qdev->ndev,
4303                                           "Failed to clear all-multi mode.\n");
4304                         } else {
4305                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4306                         }
4307                 }
4308         }
4309
4310         if (!netdev_mc_empty(ndev)) {
4311                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4312                 if (status)
4313                         goto exit;
4314                 i = 0;
4315                 netdev_for_each_mc_addr(ha, ndev) {
4316                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4317                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4318                                 netif_err(qdev, hw, qdev->ndev,
4319                                           "Failed to loadmulticast address.\n");
4320                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4321                                 goto exit;
4322                         }
4323                         i++;
4324                 }
4325                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4326                 if (ql_set_routing_reg
4327                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4328                         netif_err(qdev, hw, qdev->ndev,
4329                                   "Failed to set multicast match mode.\n");
4330                 } else {
4331                         set_bit(QL_ALLMULTI, &qdev->flags);
4332                 }
4333         }
4334 exit:
4335         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4336 }
4337
4338 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4339 {
4340         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4341         struct sockaddr *addr = p;
4342         int status;
4343
4344         if (!is_valid_ether_addr(addr->sa_data))
4345                 return -EADDRNOTAVAIL;
4346         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4347         /* Update local copy of current mac address. */
4348         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4349
4350         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4351         if (status)
4352                 return status;
4353         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4354                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4355         if (status)
4356                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4357         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4358         return status;
4359 }
4360
4361 static void qlge_tx_timeout(struct net_device *ndev)
4362 {
4363         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4364         ql_queue_asic_error(qdev);
4365 }
4366
4367 static void ql_asic_reset_work(struct work_struct *work)
4368 {
4369         struct ql_adapter *qdev =
4370             container_of(work, struct ql_adapter, asic_reset_work.work);
4371         int status;
4372         rtnl_lock();
4373         status = ql_adapter_down(qdev);
4374         if (status)
4375                 goto error;
4376
4377         status = ql_adapter_up(qdev);
4378         if (status)
4379                 goto error;
4380
4381         /* Restore rx mode. */
4382         clear_bit(QL_ALLMULTI, &qdev->flags);
4383         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4384         qlge_set_multicast_list(qdev->ndev);
4385
4386         rtnl_unlock();
4387         return;
4388 error:
4389         netif_alert(qdev, ifup, qdev->ndev,
4390                     "Driver up/down cycle failed, closing device\n");
4391
4392         set_bit(QL_ADAPTER_UP, &qdev->flags);
4393         dev_close(qdev->ndev);
4394         rtnl_unlock();
4395 }
4396
4397 static struct nic_operations qla8012_nic_ops = {
4398         .get_flash              = ql_get_8012_flash_params,
4399         .port_initialize        = ql_8012_port_initialize,
4400 };
4401
4402 static struct nic_operations qla8000_nic_ops = {
4403         .get_flash              = ql_get_8000_flash_params,
4404         .port_initialize        = ql_8000_port_initialize,
4405 };
4406
4407 /* Find the pcie function number for the other NIC
4408  * on this chip.  Since both NIC functions share a
4409  * common firmware we have the lowest enabled function
4410  * do any common work.  Examples would be resetting
4411  * after a fatal firmware error, or doing a firmware
4412  * coredump.
4413  */
4414 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4415 {
4416         int status = 0;
4417         u32 temp;
4418         u32 nic_func1, nic_func2;
4419
4420         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4421                         &temp);
4422         if (status)
4423                 return status;
4424
4425         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4426                         MPI_TEST_NIC_FUNC_MASK);
4427         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4428                         MPI_TEST_NIC_FUNC_MASK);
4429
4430         if (qdev->func == nic_func1)
4431                 qdev->alt_func = nic_func2;
4432         else if (qdev->func == nic_func2)
4433                 qdev->alt_func = nic_func1;
4434         else
4435                 status = -EIO;
4436
4437         return status;
4438 }
4439
4440 static int ql_get_board_info(struct ql_adapter *qdev)
4441 {
4442         int status;
4443         qdev->func =
4444             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4445         if (qdev->func > 3)
4446                 return -EIO;
4447
4448         status = ql_get_alt_pcie_func(qdev);
4449         if (status)
4450                 return status;
4451
4452         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4453         if (qdev->port) {
4454                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4455                 qdev->port_link_up = STS_PL1;
4456                 qdev->port_init = STS_PI1;
4457                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4458                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4459         } else {
4460                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4461                 qdev->port_link_up = STS_PL0;
4462                 qdev->port_init = STS_PI0;
4463                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4464                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4465         }
4466         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4467         qdev->device_id = qdev->pdev->device;
4468         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4469                 qdev->nic_ops = &qla8012_nic_ops;
4470         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4471                 qdev->nic_ops = &qla8000_nic_ops;
4472         return status;
4473 }
4474
4475 static void ql_release_all(struct pci_dev *pdev)
4476 {
4477         struct net_device *ndev = pci_get_drvdata(pdev);
4478         struct ql_adapter *qdev = netdev_priv(ndev);
4479
4480         if (qdev->workqueue) {
4481                 destroy_workqueue(qdev->workqueue);
4482                 qdev->workqueue = NULL;
4483         }
4484
4485         if (qdev->reg_base)
4486                 iounmap(qdev->reg_base);
4487         if (qdev->doorbell_area)
4488                 iounmap(qdev->doorbell_area);
4489         vfree(qdev->mpi_coredump);
4490         pci_release_regions(pdev);
4491         pci_set_drvdata(pdev, NULL);
4492 }
4493
4494 static int __devinit ql_init_device(struct pci_dev *pdev,
4495                                     struct net_device *ndev, int cards_found)
4496 {
4497         struct ql_adapter *qdev = netdev_priv(ndev);
4498         int err = 0;
4499
4500         memset((void *)qdev, 0, sizeof(*qdev));
4501         err = pci_enable_device(pdev);
4502         if (err) {
4503                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4504                 return err;
4505         }
4506
4507         qdev->ndev = ndev;
4508         qdev->pdev = pdev;
4509         pci_set_drvdata(pdev, ndev);
4510
4511         /* Set PCIe read request size */
4512         err = pcie_set_readrq(pdev, 4096);
4513         if (err) {
4514                 dev_err(&pdev->dev, "Set readrq failed.\n");
4515                 goto err_out1;
4516         }
4517
4518         err = pci_request_regions(pdev, DRV_NAME);
4519         if (err) {
4520                 dev_err(&pdev->dev, "PCI region request failed.\n");
4521                 return err;
4522         }
4523
4524         pci_set_master(pdev);
4525         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4526                 set_bit(QL_DMA64, &qdev->flags);
4527                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4528         } else {
4529                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4530                 if (!err)
4531                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4532         }
4533
4534         if (err) {
4535                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4536                 goto err_out2;
4537         }
4538
4539         /* Set PCIe reset type for EEH to fundamental. */
4540         pdev->needs_freset = 1;
4541         pci_save_state(pdev);
4542         qdev->reg_base =
4543             ioremap_nocache(pci_resource_start(pdev, 1),
4544                             pci_resource_len(pdev, 1));
4545         if (!qdev->reg_base) {
4546                 dev_err(&pdev->dev, "Register mapping failed.\n");
4547                 err = -ENOMEM;
4548                 goto err_out2;
4549         }
4550
4551         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4552         qdev->doorbell_area =
4553             ioremap_nocache(pci_resource_start(pdev, 3),
4554                             pci_resource_len(pdev, 3));
4555         if (!qdev->doorbell_area) {
4556                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4557                 err = -ENOMEM;
4558                 goto err_out2;
4559         }
4560
4561         err = ql_get_board_info(qdev);
4562         if (err) {
4563                 dev_err(&pdev->dev, "Register access failed.\n");
4564                 err = -EIO;
4565                 goto err_out2;
4566         }
4567         qdev->msg_enable = netif_msg_init(debug, default_msg);
4568         spin_lock_init(&qdev->hw_lock);
4569         spin_lock_init(&qdev->stats_lock);
4570
4571         if (qlge_mpi_coredump) {
4572                 qdev->mpi_coredump =
4573                         vmalloc(sizeof(struct ql_mpi_coredump));
4574                 if (qdev->mpi_coredump == NULL) {
4575                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4576                         err = -ENOMEM;
4577                         goto err_out2;
4578                 }
4579                 if (qlge_force_coredump)
4580                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4581         }
4582         /* make sure the EEPROM is good */
4583         err = qdev->nic_ops->get_flash(qdev);
4584         if (err) {
4585                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4586                 goto err_out2;
4587         }
4588
4589         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4590         /* Keep local copy of current mac address. */
4591         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4592
4593         /* Set up the default ring sizes. */
4594         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4595         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4596
4597         /* Set up the coalescing parameters. */
4598         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4599         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4600         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4601         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4602
4603         /*
4604          * Set up the operating parameters.
4605          */
4606         qdev->rx_csum = 1;
4607         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4608         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4609         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4610         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4611         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4612         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4613         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4614         init_completion(&qdev->ide_completion);
4615
4616         if (!cards_found) {
4617                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4618                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4619                          DRV_NAME, DRV_VERSION);
4620         }
4621         return 0;
4622 err_out2:
4623         ql_release_all(pdev);
4624 err_out1:
4625         pci_disable_device(pdev);
4626         return err;
4627 }
4628
4629 static const struct net_device_ops qlge_netdev_ops = {
4630         .ndo_open               = qlge_open,
4631         .ndo_stop               = qlge_close,
4632         .ndo_start_xmit         = qlge_send,
4633         .ndo_change_mtu         = qlge_change_mtu,
4634         .ndo_get_stats          = qlge_get_stats,
4635         .ndo_set_multicast_list = qlge_set_multicast_list,
4636         .ndo_set_mac_address    = qlge_set_mac_address,
4637         .ndo_validate_addr      = eth_validate_addr,
4638         .ndo_tx_timeout         = qlge_tx_timeout,
4639         .ndo_vlan_rx_register   = qlge_vlan_rx_register,
4640         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4641         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4642 };
4643
4644 static void ql_timer(unsigned long data)
4645 {
4646         struct ql_adapter *qdev = (struct ql_adapter *)data;
4647         u32 var = 0;
4648
4649         var = ql_read32(qdev, STS);
4650         if (pci_channel_offline(qdev->pdev)) {
4651                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4652                 return;
4653         }
4654
4655         mod_timer(&qdev->timer, jiffies + (5*HZ));
4656 }
4657
4658 static int __devinit qlge_probe(struct pci_dev *pdev,
4659                                 const struct pci_device_id *pci_entry)
4660 {
4661         struct net_device *ndev = NULL;
4662         struct ql_adapter *qdev = NULL;
4663         static int cards_found = 0;
4664         int err = 0;
4665
4666         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4667                         min(MAX_CPUS, (int)num_online_cpus()));
4668         if (!ndev)
4669                 return -ENOMEM;
4670
4671         err = ql_init_device(pdev, ndev, cards_found);
4672         if (err < 0) {
4673                 free_netdev(ndev);
4674                 return err;
4675         }
4676
4677         qdev = netdev_priv(ndev);
4678         SET_NETDEV_DEV(ndev, &pdev->dev);
4679         ndev->features = (0
4680                           | NETIF_F_IP_CSUM
4681                           | NETIF_F_SG
4682                           | NETIF_F_TSO
4683                           | NETIF_F_TSO6
4684                           | NETIF_F_TSO_ECN
4685                           | NETIF_F_HW_VLAN_TX
4686                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4687         ndev->features |= NETIF_F_GRO;
4688
4689         if (test_bit(QL_DMA64, &qdev->flags))
4690                 ndev->features |= NETIF_F_HIGHDMA;
4691
4692         /*
4693          * Set up net_device structure.
4694          */
4695         ndev->tx_queue_len = qdev->tx_ring_size;
4696         ndev->irq = pdev->irq;
4697
4698         ndev->netdev_ops = &qlge_netdev_ops;
4699         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4700         ndev->watchdog_timeo = 10 * HZ;
4701
4702         err = register_netdev(ndev);
4703         if (err) {
4704                 dev_err(&pdev->dev, "net device registration failed.\n");
4705                 ql_release_all(pdev);
4706                 pci_disable_device(pdev);
4707                 return err;
4708         }
4709         /* Start up the timer to trigger EEH if
4710          * the bus goes dead
4711          */
4712         init_timer_deferrable(&qdev->timer);
4713         qdev->timer.data = (unsigned long)qdev;
4714         qdev->timer.function = ql_timer;
4715         qdev->timer.expires = jiffies + (5*HZ);
4716         add_timer(&qdev->timer);
4717         ql_link_off(qdev);
4718         ql_display_dev_info(ndev);
4719         atomic_set(&qdev->lb_count, 0);
4720         cards_found++;
4721         return 0;
4722 }
4723
4724 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4725 {
4726         return qlge_send(skb, ndev);
4727 }
4728
4729 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4730 {
4731         return ql_clean_inbound_rx_ring(rx_ring, budget);
4732 }
4733
4734 static void __devexit qlge_remove(struct pci_dev *pdev)
4735 {
4736         struct net_device *ndev = pci_get_drvdata(pdev);
4737         struct ql_adapter *qdev = netdev_priv(ndev);
4738         del_timer_sync(&qdev->timer);
4739         ql_cancel_all_work_sync(qdev);
4740         unregister_netdev(ndev);
4741         ql_release_all(pdev);
4742         pci_disable_device(pdev);
4743         free_netdev(ndev);
4744 }
4745
4746 /* Clean up resources without touching hardware. */
4747 static void ql_eeh_close(struct net_device *ndev)
4748 {
4749         int i;
4750         struct ql_adapter *qdev = netdev_priv(ndev);
4751
4752         if (netif_carrier_ok(ndev)) {
4753                 netif_carrier_off(ndev);
4754                 netif_stop_queue(ndev);
4755         }
4756
4757         /* Disabling the timer */
4758         del_timer_sync(&qdev->timer);
4759         ql_cancel_all_work_sync(qdev);
4760
4761         for (i = 0; i < qdev->rss_ring_count; i++)
4762                 netif_napi_del(&qdev->rx_ring[i].napi);
4763
4764         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4765         ql_tx_ring_clean(qdev);
4766         ql_free_rx_buffers(qdev);
4767         ql_release_adapter_resources(qdev);
4768 }
4769
4770 /*
4771  * This callback is called by the PCI subsystem whenever
4772  * a PCI bus error is detected.
4773  */
4774 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4775                                                enum pci_channel_state state)
4776 {
4777         struct net_device *ndev = pci_get_drvdata(pdev);
4778         struct ql_adapter *qdev = netdev_priv(ndev);
4779
4780         switch (state) {
4781         case pci_channel_io_normal:
4782                 return PCI_ERS_RESULT_CAN_RECOVER;
4783         case pci_channel_io_frozen:
4784                 netif_device_detach(ndev);
4785                 if (netif_running(ndev))
4786                         ql_eeh_close(ndev);
4787                 pci_disable_device(pdev);
4788                 return PCI_ERS_RESULT_NEED_RESET;
4789         case pci_channel_io_perm_failure:
4790                 dev_err(&pdev->dev,
4791                         "%s: pci_channel_io_perm_failure.\n", __func__);
4792                 ql_eeh_close(ndev);
4793                 set_bit(QL_EEH_FATAL, &qdev->flags);
4794                 return PCI_ERS_RESULT_DISCONNECT;
4795         }
4796
4797         /* Request a slot reset. */
4798         return PCI_ERS_RESULT_NEED_RESET;
4799 }
4800
4801 /*
4802  * This callback is called after the PCI buss has been reset.
4803  * Basically, this tries to restart the card from scratch.
4804  * This is a shortened version of the device probe/discovery code,
4805  * it resembles the first-half of the () routine.
4806  */
4807 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4808 {
4809         struct net_device *ndev = pci_get_drvdata(pdev);
4810         struct ql_adapter *qdev = netdev_priv(ndev);
4811
4812         pdev->error_state = pci_channel_io_normal;
4813
4814         pci_restore_state(pdev);
4815         if (pci_enable_device(pdev)) {
4816                 netif_err(qdev, ifup, qdev->ndev,
4817                           "Cannot re-enable PCI device after reset.\n");
4818                 return PCI_ERS_RESULT_DISCONNECT;
4819         }
4820         pci_set_master(pdev);
4821
4822         if (ql_adapter_reset(qdev)) {
4823                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4824                 set_bit(QL_EEH_FATAL, &qdev->flags);
4825                 return PCI_ERS_RESULT_DISCONNECT;
4826         }
4827
4828         return PCI_ERS_RESULT_RECOVERED;
4829 }
4830
4831 static void qlge_io_resume(struct pci_dev *pdev)
4832 {
4833         struct net_device *ndev = pci_get_drvdata(pdev);
4834         struct ql_adapter *qdev = netdev_priv(ndev);
4835         int err = 0;
4836
4837         if (netif_running(ndev)) {
4838                 err = qlge_open(ndev);
4839                 if (err) {
4840                         netif_err(qdev, ifup, qdev->ndev,
4841                                   "Device initialization failed after reset.\n");
4842                         return;
4843                 }
4844         } else {
4845                 netif_err(qdev, ifup, qdev->ndev,
4846                           "Device was not running prior to EEH.\n");
4847         }
4848         mod_timer(&qdev->timer, jiffies + (5*HZ));
4849         netif_device_attach(ndev);
4850 }
4851
4852 static struct pci_error_handlers qlge_err_handler = {
4853         .error_detected = qlge_io_error_detected,
4854         .slot_reset = qlge_io_slot_reset,
4855         .resume = qlge_io_resume,
4856 };
4857
4858 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4859 {
4860         struct net_device *ndev = pci_get_drvdata(pdev);
4861         struct ql_adapter *qdev = netdev_priv(ndev);
4862         int err;
4863
4864         netif_device_detach(ndev);
4865         del_timer_sync(&qdev->timer);
4866
4867         if (netif_running(ndev)) {
4868                 err = ql_adapter_down(qdev);
4869                 if (!err)
4870                         return err;
4871         }
4872
4873         ql_wol(qdev);
4874         err = pci_save_state(pdev);
4875         if (err)
4876                 return err;
4877
4878         pci_disable_device(pdev);
4879
4880         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4881
4882         return 0;
4883 }
4884
4885 #ifdef CONFIG_PM
4886 static int qlge_resume(struct pci_dev *pdev)
4887 {
4888         struct net_device *ndev = pci_get_drvdata(pdev);
4889         struct ql_adapter *qdev = netdev_priv(ndev);
4890         int err;
4891
4892         pci_set_power_state(pdev, PCI_D0);
4893         pci_restore_state(pdev);
4894         err = pci_enable_device(pdev);
4895         if (err) {
4896                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4897                 return err;
4898         }
4899         pci_set_master(pdev);
4900
4901         pci_enable_wake(pdev, PCI_D3hot, 0);
4902         pci_enable_wake(pdev, PCI_D3cold, 0);
4903
4904         if (netif_running(ndev)) {
4905                 err = ql_adapter_up(qdev);
4906                 if (err)
4907                         return err;
4908         }
4909
4910         mod_timer(&qdev->timer, jiffies + (5*HZ));
4911         netif_device_attach(ndev);
4912
4913         return 0;
4914 }
4915 #endif /* CONFIG_PM */
4916
4917 static void qlge_shutdown(struct pci_dev *pdev)
4918 {
4919         qlge_suspend(pdev, PMSG_SUSPEND);
4920 }
4921
4922 static struct pci_driver qlge_driver = {
4923         .name = DRV_NAME,
4924         .id_table = qlge_pci_tbl,
4925         .probe = qlge_probe,
4926         .remove = __devexit_p(qlge_remove),
4927 #ifdef CONFIG_PM
4928         .suspend = qlge_suspend,
4929         .resume = qlge_resume,
4930 #endif
4931         .shutdown = qlge_shutdown,
4932         .err_handler = &qlge_err_handler
4933 };
4934
4935 static int __init qlge_init_module(void)
4936 {
4937         return pci_register_driver(&qlge_driver);
4938 }
4939
4940 static void __exit qlge_exit(void)
4941 {
4942         pci_unregister_driver(&qlge_driver);
4943 }
4944
4945 module_init(qlge_init_module);
4946 module_exit(qlge_exit);