c92afcd912e23fe48a17c686e9355ba350585f15
[pandora-kernel.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378
379                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380                                      "Adding %s address %pM at index %d in the CAM.\n",
381                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
382                                      "MULTICAST" : "UNICAST",
383                                      addr, index);
384
385                         status =
386                             ql_wait_reg_rdy(qdev,
387                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388                         if (status)
389                                 goto exit;
390                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
392                                    type);       /* type */
393                         ql_write32(qdev, MAC_ADDR_DATA, lower);
394                         status =
395                             ql_wait_reg_rdy(qdev,
396                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397                         if (status)
398                                 goto exit;
399                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
401                                    type);       /* type */
402                         ql_write32(qdev, MAC_ADDR_DATA, upper);
403                         status =
404                             ql_wait_reg_rdy(qdev,
405                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406                         if (status)
407                                 goto exit;
408                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
409                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
410                                    type);       /* type */
411                         /* This field should also include the queue id
412                            and possibly the function id.  Right now we hardcode
413                            the route field to NIC core.
414                          */
415                         cam_output = (CAM_OUT_ROUTE_NIC |
416                                       (qdev->
417                                        func << CAM_OUT_FUNC_SHIFT) |
418                                         (0 << CAM_OUT_CQ_ID_SHIFT));
419                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420                                 cam_output |= CAM_OUT_RV;
421                         /* route to NIC core */
422                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423                         break;
424                 }
425         case MAC_ADDR_TYPE_VLAN:
426                 {
427                         u32 enable_bit = *((u32 *) &addr[0]);
428                         /* For VLAN, the addr actually holds a bit that
429                          * either enables or disables the vlan id we are
430                          * addressing. It's either MAC_ADDR_E on or off.
431                          * That's bit-27 we're talking about.
432                          */
433                         netif_info(qdev, ifup, qdev->ndev,
434                                    "%s VLAN ID %d %s the CAM.\n",
435                                    enable_bit ? "Adding" : "Removing",
436                                    index,
437                                    enable_bit ? "to" : "from");
438
439                         status =
440                             ql_wait_reg_rdy(qdev,
441                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442                         if (status)
443                                 goto exit;
444                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
446                                    type |       /* type */
447                                    enable_bit); /* enable/disable */
448                         break;
449                 }
450         case MAC_ADDR_TYPE_MULTI_FLTR:
451         default:
452                 netif_crit(qdev, ifup, qdev->ndev,
453                            "Address type %d not yet supported.\n", type);
454                 status = -EPERM;
455         }
456 exit:
457         return status;
458 }
459
460 /* Set or clear MAC address in hardware. We sometimes
461  * have to clear it to prevent wrong frame routing
462  * especially in a bonding environment.
463  */
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 {
466         int status;
467         char zero_mac_addr[ETH_ALEN];
468         char *addr;
469
470         if (set) {
471                 addr = &qdev->current_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Set Mac addr %pM\n", addr);
474         } else {
475                 memset(zero_mac_addr, 0, ETH_ALEN);
476                 addr = &zero_mac_addr[0];
477                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478                              "Clearing MAC address\n");
479         }
480         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 return status;
483         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486         if (status)
487                 netif_err(qdev, ifup, qdev->ndev,
488                           "Failed to init mac address.\n");
489         return status;
490 }
491
492 void ql_link_on(struct ql_adapter *qdev)
493 {
494         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495         netif_carrier_on(qdev->ndev);
496         ql_set_mac_addr(qdev, 1);
497 }
498
499 void ql_link_off(struct ql_adapter *qdev)
500 {
501         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502         netif_carrier_off(qdev->ndev);
503         ql_set_mac_addr(qdev, 0);
504 }
505
506 /* Get a specific frame routing value from the CAM.
507  * Used for debug and reg dump.
508  */
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510 {
511         int status = 0;
512
513         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514         if (status)
515                 goto exit;
516
517         ql_write32(qdev, RT_IDX,
518                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520         if (status)
521                 goto exit;
522         *value = ql_read32(qdev, RT_DATA);
523 exit:
524         return status;
525 }
526
527 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
528  * to route different frame types to various inbound queues.  We send broadcast/
529  * multicast/error frames to the default queue for slow handling,
530  * and CAM hit/RSS frames to the fast handling queues.
531  */
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533                               int enable)
534 {
535         int status = -EINVAL; /* Return error if no mask match. */
536         u32 value = 0;
537
538         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539                      "%s %s mask %s the routing reg.\n",
540                      enable ? "Adding" : "Removing",
541                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
555                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
556                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557                      "(Bad index != RT_IDX)",
558                      enable ? "to" : "from");
559
560         switch (mask) {
561         case RT_IDX_CAM_HIT:
562                 {
563                         value = RT_IDX_DST_CAM_Q |      /* dest */
564                             RT_IDX_TYPE_NICQ |  /* type */
565                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566                         break;
567                 }
568         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
569                 {
570                         value = RT_IDX_DST_DFLT_Q |     /* dest */
571                             RT_IDX_TYPE_NICQ |  /* type */
572                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573                         break;
574                 }
575         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
576                 {
577                         value = RT_IDX_DST_DFLT_Q |     /* dest */
578                             RT_IDX_TYPE_NICQ |  /* type */
579                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580                         break;
581                 }
582         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583                 {
584                         value = RT_IDX_DST_DFLT_Q | /* dest */
585                                 RT_IDX_TYPE_NICQ | /* type */
586                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
587                                 RT_IDX_IDX_SHIFT); /* index */
588                         break;
589                 }
590         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591                 {
592                         value = RT_IDX_DST_DFLT_Q | /* dest */
593                                 RT_IDX_TYPE_NICQ | /* type */
594                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595                                 RT_IDX_IDX_SHIFT); /* index */
596                         break;
597                 }
598         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
599                 {
600                         value = RT_IDX_DST_DFLT_Q |     /* dest */
601                             RT_IDX_TYPE_NICQ |  /* type */
602                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603                         break;
604                 }
605         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
606                 {
607                         value = RT_IDX_DST_DFLT_Q |     /* dest */
608                             RT_IDX_TYPE_NICQ |  /* type */
609                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610                         break;
611                 }
612         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
613                 {
614                         value = RT_IDX_DST_DFLT_Q |     /* dest */
615                             RT_IDX_TYPE_NICQ |  /* type */
616                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617                         break;
618                 }
619         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
620                 {
621                         value = RT_IDX_DST_RSS |        /* dest */
622                             RT_IDX_TYPE_NICQ |  /* type */
623                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624                         break;
625                 }
626         case 0:         /* Clear the E-bit on an entry. */
627                 {
628                         value = RT_IDX_DST_DFLT_Q |     /* dest */
629                             RT_IDX_TYPE_NICQ |  /* type */
630                             (index << RT_IDX_IDX_SHIFT);/* index */
631                         break;
632                 }
633         default:
634                 netif_err(qdev, ifup, qdev->ndev,
635                           "Mask type %d not yet supported.\n", mask);
636                 status = -EPERM;
637                 goto exit;
638         }
639
640         if (value) {
641                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642                 if (status)
643                         goto exit;
644                 value |= (enable ? RT_IDX_E : 0);
645                 ql_write32(qdev, RT_IDX, value);
646                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647         }
648 exit:
649         return status;
650 }
651
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
653 {
654         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 }
656
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
658 {
659         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 }
661
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663  * Otherwise, we may have multiple outstanding workers and don't want to
664  * enable until the last one finishes. In this case, the irq_cnt gets
665  * incremented every time we queue a worker and decremented every time
666  * a worker finishes.  Once it hits zero we enable the interrupt.
667  */
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 {
670         u32 var = 0;
671         unsigned long hw_flags = 0;
672         struct intr_context *ctx = qdev->intr_context + intr;
673
674         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675                 /* Always enable if we're MSIX multi interrupts and
676                  * it's not the default (zeroeth) interrupt.
677                  */
678                 ql_write32(qdev, INTR_EN,
679                            ctx->intr_en_mask);
680                 var = ql_read32(qdev, STS);
681                 return var;
682         }
683
684         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685         if (atomic_dec_and_test(&ctx->irq_cnt)) {
686                 ql_write32(qdev, INTR_EN,
687                            ctx->intr_en_mask);
688                 var = ql_read32(qdev, STS);
689         }
690         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691         return var;
692 }
693
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 {
696         u32 var = 0;
697         struct intr_context *ctx;
698
699         /* HW disables for us if we're MSIX multi interrupts and
700          * it's not the default (zeroeth) interrupt.
701          */
702         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703                 return 0;
704
705         ctx = qdev->intr_context + intr;
706         spin_lock(&qdev->hw_lock);
707         if (!atomic_read(&ctx->irq_cnt)) {
708                 ql_write32(qdev, INTR_EN,
709                 ctx->intr_dis_mask);
710                 var = ql_read32(qdev, STS);
711         }
712         atomic_inc(&ctx->irq_cnt);
713         spin_unlock(&qdev->hw_lock);
714         return var;
715 }
716
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 {
719         int i;
720         for (i = 0; i < qdev->intr_count; i++) {
721                 /* The enable call does a atomic_dec_and_test
722                  * and enables only if the result is zero.
723                  * So we precharge it here.
724                  */
725                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726                         i == 0))
727                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728                 ql_enable_completion_interrupt(qdev, i);
729         }
730
731 }
732
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734 {
735         int status, i;
736         u16 csum = 0;
737         __le16 *flash = (__le16 *)&qdev->flash;
738
739         status = strncmp((char *)&qdev->flash, str, 4);
740         if (status) {
741                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742                 return  status;
743         }
744
745         for (i = 0; i < size; i++)
746                 csum += le16_to_cpu(*flash++);
747
748         if (csum)
749                 netif_err(qdev, ifup, qdev->ndev,
750                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751
752         return csum;
753 }
754
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 {
757         int status = 0;
758         /* wait for reg to come ready */
759         status = ql_wait_reg_rdy(qdev,
760                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761         if (status)
762                 goto exit;
763         /* set up for reg read */
764         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765         /* wait for reg to come ready */
766         status = ql_wait_reg_rdy(qdev,
767                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768         if (status)
769                 goto exit;
770          /* This data is stored on flash as an array of
771          * __le32.  Since ql_read32() returns cpu endian
772          * we need to swap it back.
773          */
774         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775 exit:
776         return status;
777 }
778
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780 {
781         u32 i, size;
782         int status;
783         __le32 *p = (__le32 *)&qdev->flash;
784         u32 offset;
785         u8 mac_addr[6];
786
787         /* Get flash offset for function and adjust
788          * for dword access.
789          */
790         if (!qdev->port)
791                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792         else
793                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796                 return -ETIMEDOUT;
797
798         size = sizeof(struct flash_params_8000) / sizeof(u32);
799         for (i = 0; i < size; i++, p++) {
800                 status = ql_read_flash_word(qdev, i+offset, p);
801                 if (status) {
802                         netif_err(qdev, ifup, qdev->ndev,
803                                   "Error reading flash.\n");
804                         goto exit;
805                 }
806         }
807
808         status = ql_validate_flash(qdev,
809                         sizeof(struct flash_params_8000) / sizeof(u16),
810                         "8000");
811         if (status) {
812                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813                 status = -EINVAL;
814                 goto exit;
815         }
816
817         /* Extract either manufacturer or BOFM modified
818          * MAC address.
819          */
820         if (qdev->flash.flash_params_8000.data_type1 == 2)
821                 memcpy(mac_addr,
822                         qdev->flash.flash_params_8000.mac_addr1,
823                         qdev->ndev->addr_len);
824         else
825                 memcpy(mac_addr,
826                         qdev->flash.flash_params_8000.mac_addr,
827                         qdev->ndev->addr_len);
828
829         if (!is_valid_ether_addr(mac_addr)) {
830                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831                 status = -EINVAL;
832                 goto exit;
833         }
834
835         memcpy(qdev->ndev->dev_addr,
836                 mac_addr,
837                 qdev->ndev->addr_len);
838
839 exit:
840         ql_sem_unlock(qdev, SEM_FLASH_MASK);
841         return status;
842 }
843
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845 {
846         int i;
847         int status;
848         __le32 *p = (__le32 *)&qdev->flash;
849         u32 offset = 0;
850         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851
852         /* Second function's parameters follow the first
853          * function's.
854          */
855         if (qdev->port)
856                 offset = size;
857
858         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859                 return -ETIMEDOUT;
860
861         for (i = 0; i < size; i++, p++) {
862                 status = ql_read_flash_word(qdev, i+offset, p);
863                 if (status) {
864                         netif_err(qdev, ifup, qdev->ndev,
865                                   "Error reading flash.\n");
866                         goto exit;
867                 }
868
869         }
870
871         status = ql_validate_flash(qdev,
872                         sizeof(struct flash_params_8012) / sizeof(u16),
873                         "8012");
874         if (status) {
875                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876                 status = -EINVAL;
877                 goto exit;
878         }
879
880         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881                 status = -EINVAL;
882                 goto exit;
883         }
884
885         memcpy(qdev->ndev->dev_addr,
886                 qdev->flash.flash_params_8012.mac_addr,
887                 qdev->ndev->addr_len);
888
889 exit:
890         ql_sem_unlock(qdev, SEM_FLASH_MASK);
891         return status;
892 }
893
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895  * register pair.  Each read/write requires us to wait for the ready
896  * bit before reading/writing the data.
897  */
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 {
900         int status;
901         /* wait for reg to come ready */
902         status = ql_wait_reg_rdy(qdev,
903                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904         if (status)
905                 return status;
906         /* write the data to the data reg */
907         ql_write32(qdev, XGMAC_DATA, data);
908         /* trigger the write */
909         ql_write32(qdev, XGMAC_ADDR, reg);
910         return status;
911 }
912
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914  * register pair.  Each read/write requires us to wait for the ready
915  * bit before reading/writing the data.
916  */
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 {
919         int status = 0;
920         /* wait for reg to come ready */
921         status = ql_wait_reg_rdy(qdev,
922                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923         if (status)
924                 goto exit;
925         /* set up for reg read */
926         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927         /* wait for reg to come ready */
928         status = ql_wait_reg_rdy(qdev,
929                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930         if (status)
931                 goto exit;
932         /* get the data */
933         *data = ql_read32(qdev, XGMAC_DATA);
934 exit:
935         return status;
936 }
937
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 {
941         int status = 0;
942         u32 hi = 0;
943         u32 lo = 0;
944
945         status = ql_read_xgmac_reg(qdev, reg, &lo);
946         if (status)
947                 goto exit;
948
949         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950         if (status)
951                 goto exit;
952
953         *data = (u64) lo | ((u64) hi << 32);
954
955 exit:
956         return status;
957 }
958
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
960 {
961         int status;
962         /*
963          * Get MPI firmware version for driver banner
964          * and ethool info.
965          */
966         status = ql_mb_about_fw(qdev);
967         if (status)
968                 goto exit;
969         status = ql_mb_get_fw_state(qdev);
970         if (status)
971                 goto exit;
972         /* Wake up a worker to get/set the TX/RX frame sizes. */
973         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974 exit:
975         return status;
976 }
977
978 /* Take the MAC Core out of reset.
979  * Enable statistics counting.
980  * Take the transmitter/receiver out of reset.
981  * This functionality may be done in the MPI firmware at a
982  * later date.
983  */
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
985 {
986         int status = 0;
987         u32 data;
988
989         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990                 /* Another function has the semaphore, so
991                  * wait for the port init bit to come ready.
992                  */
993                 netif_info(qdev, link, qdev->ndev,
994                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996                 if (status) {
997                         netif_crit(qdev, link, qdev->ndev,
998                                    "Port initialize timed out.\n");
999                 }
1000                 return status;
1001         }
1002
1003         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004         /* Set the core reset. */
1005         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006         if (status)
1007                 goto end;
1008         data |= GLOBAL_CFG_RESET;
1009         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010         if (status)
1011                 goto end;
1012
1013         /* Clear the core reset and turn on jumbo for receiver. */
1014         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1015         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1016         data |= GLOBAL_CFG_TX_STAT_EN;
1017         data |= GLOBAL_CFG_RX_STAT_EN;
1018         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019         if (status)
1020                 goto end;
1021
1022         /* Enable transmitter, and clear it's reset. */
1023         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024         if (status)
1025                 goto end;
1026         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1027         data |= TX_CFG_EN;      /* Enable the transmitter. */
1028         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029         if (status)
1030                 goto end;
1031
1032         /* Enable receiver and clear it's reset. */
1033         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034         if (status)
1035                 goto end;
1036         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1037         data |= RX_CFG_EN;      /* Enable the receiver. */
1038         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039         if (status)
1040                 goto end;
1041
1042         /* Turn on jumbo. */
1043         status =
1044             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045         if (status)
1046                 goto end;
1047         status =
1048             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049         if (status)
1050                 goto end;
1051
1052         /* Signal to the world that the port is enabled.        */
1053         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054 end:
1055         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056         return status;
1057 }
1058
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060 {
1061         return PAGE_SIZE << qdev->lbq_buf_order;
1062 }
1063
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066 {
1067         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068         rx_ring->lbq_curr_idx++;
1069         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070                 rx_ring->lbq_curr_idx = 0;
1071         rx_ring->lbq_free_cnt++;
1072         return lbq_desc;
1073 }
1074
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076                 struct rx_ring *rx_ring)
1077 {
1078         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080         pci_dma_sync_single_for_cpu(qdev->pdev,
1081                                         dma_unmap_addr(lbq_desc, mapaddr),
1082                                     rx_ring->lbq_buf_size,
1083                                         PCI_DMA_FROMDEVICE);
1084
1085         /* If it's the last chunk of our master page then
1086          * we unmap it.
1087          */
1088         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089                                         == ql_lbq_block_size(qdev))
1090                 pci_unmap_page(qdev->pdev,
1091                                 lbq_desc->p.pg_chunk.map,
1092                                 ql_lbq_block_size(qdev),
1093                                 PCI_DMA_FROMDEVICE);
1094         return lbq_desc;
1095 }
1096
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099 {
1100         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101         rx_ring->sbq_curr_idx++;
1102         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103                 rx_ring->sbq_curr_idx = 0;
1104         rx_ring->sbq_free_cnt++;
1105         return sbq_desc;
1106 }
1107
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1110 {
1111         rx_ring->cnsmr_idx++;
1112         rx_ring->curr_entry++;
1113         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114                 rx_ring->cnsmr_idx = 0;
1115                 rx_ring->curr_entry = rx_ring->cq_base;
1116         }
1117 }
1118
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120 {
1121         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 }
1123
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125                                                 struct bq_desc *lbq_desc)
1126 {
1127         if (!rx_ring->pg_chunk.page) {
1128                 u64 map;
1129                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130                                                 GFP_ATOMIC,
1131                                                 qdev->lbq_buf_order);
1132                 if (unlikely(!rx_ring->pg_chunk.page)) {
1133                         netif_err(qdev, drv, qdev->ndev,
1134                                   "page allocation failed.\n");
1135                         return -ENOMEM;
1136                 }
1137                 rx_ring->pg_chunk.offset = 0;
1138                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139                                         0, ql_lbq_block_size(qdev),
1140                                         PCI_DMA_FROMDEVICE);
1141                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142                         __free_pages(rx_ring->pg_chunk.page,
1143                                         qdev->lbq_buf_order);
1144                         netif_err(qdev, drv, qdev->ndev,
1145                                   "PCI mapping failed.\n");
1146                         return -ENOMEM;
1147                 }
1148                 rx_ring->pg_chunk.map = map;
1149                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150         }
1151
1152         /* Copy the current master pg_chunk info
1153          * to the current descriptor.
1154          */
1155         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157         /* Adjust the master page chunk for next
1158          * buffer get.
1159          */
1160         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162                 rx_ring->pg_chunk.page = NULL;
1163                 lbq_desc->p.pg_chunk.last_flag = 1;
1164         } else {
1165                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166                 get_page(rx_ring->pg_chunk.page);
1167                 lbq_desc->p.pg_chunk.last_flag = 0;
1168         }
1169         return 0;
1170 }
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173 {
1174         u32 clean_idx = rx_ring->lbq_clean_idx;
1175         u32 start_idx = clean_idx;
1176         struct bq_desc *lbq_desc;
1177         u64 map;
1178         int i;
1179
1180         while (rx_ring->lbq_free_cnt > 32) {
1181                 for (i = 0; i < 16; i++) {
1182                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                                      "lbq: try cleaning clean_idx = %d.\n",
1184                                      clean_idx);
1185                         lbq_desc = &rx_ring->lbq[clean_idx];
1186                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187                                 netif_err(qdev, ifup, qdev->ndev,
1188                                           "Could not get a page chunk.\n");
1189                                 return;
1190                         }
1191
1192                         map = lbq_desc->p.pg_chunk.map +
1193                                 lbq_desc->p.pg_chunk.offset;
1194                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195                         dma_unmap_len_set(lbq_desc, maplen,
1196                                         rx_ring->lbq_buf_size);
1197                                 *lbq_desc->addr = cpu_to_le64(map);
1198
1199                         pci_dma_sync_single_for_device(qdev->pdev, map,
1200                                                 rx_ring->lbq_buf_size,
1201                                                 PCI_DMA_FROMDEVICE);
1202                         clean_idx++;
1203                         if (clean_idx == rx_ring->lbq_len)
1204                                 clean_idx = 0;
1205                 }
1206
1207                 rx_ring->lbq_clean_idx = clean_idx;
1208                 rx_ring->lbq_prod_idx += 16;
1209                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210                         rx_ring->lbq_prod_idx = 0;
1211                 rx_ring->lbq_free_cnt -= 16;
1212         }
1213
1214         if (start_idx != clean_idx) {
1215                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216                              "lbq: updating prod idx = %d.\n",
1217                              rx_ring->lbq_prod_idx);
1218                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219                                 rx_ring->lbq_prod_idx_db_reg);
1220         }
1221 }
1222
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225 {
1226         u32 clean_idx = rx_ring->sbq_clean_idx;
1227         u32 start_idx = clean_idx;
1228         struct bq_desc *sbq_desc;
1229         u64 map;
1230         int i;
1231
1232         while (rx_ring->sbq_free_cnt > 16) {
1233                 for (i = 0; i < 16; i++) {
1234                         sbq_desc = &rx_ring->sbq[clean_idx];
1235                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236                                      "sbq: try cleaning clean_idx = %d.\n",
1237                                      clean_idx);
1238                         if (sbq_desc->p.skb == NULL) {
1239                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1240                                              qdev->ndev,
1241                                              "sbq: getting new skb for index %d.\n",
1242                                              sbq_desc->index);
1243                                 sbq_desc->p.skb =
1244                                     netdev_alloc_skb(qdev->ndev,
1245                                                      SMALL_BUFFER_SIZE);
1246                                 if (sbq_desc->p.skb == NULL) {
1247                                         netif_err(qdev, probe, qdev->ndev,
1248                                                   "Couldn't get an skb.\n");
1249                                         rx_ring->sbq_clean_idx = clean_idx;
1250                                         return;
1251                                 }
1252                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253                                 map = pci_map_single(qdev->pdev,
1254                                                      sbq_desc->p.skb->data,
1255                                                      rx_ring->sbq_buf_size,
1256                                                      PCI_DMA_FROMDEVICE);
1257                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258                                         netif_err(qdev, ifup, qdev->ndev,
1259                                                   "PCI mapping failed.\n");
1260                                         rx_ring->sbq_clean_idx = clean_idx;
1261                                         dev_kfree_skb_any(sbq_desc->p.skb);
1262                                         sbq_desc->p.skb = NULL;
1263                                         return;
1264                                 }
1265                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266                                 dma_unmap_len_set(sbq_desc, maplen,
1267                                                   rx_ring->sbq_buf_size);
1268                                 *sbq_desc->addr = cpu_to_le64(map);
1269                         }
1270
1271                         clean_idx++;
1272                         if (clean_idx == rx_ring->sbq_len)
1273                                 clean_idx = 0;
1274                 }
1275                 rx_ring->sbq_clean_idx = clean_idx;
1276                 rx_ring->sbq_prod_idx += 16;
1277                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278                         rx_ring->sbq_prod_idx = 0;
1279                 rx_ring->sbq_free_cnt -= 16;
1280         }
1281
1282         if (start_idx != clean_idx) {
1283                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284                              "sbq: updating prod idx = %d.\n",
1285                              rx_ring->sbq_prod_idx);
1286                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287                                 rx_ring->sbq_prod_idx_db_reg);
1288         }
1289 }
1290
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292                                     struct rx_ring *rx_ring)
1293 {
1294         ql_update_sbq(qdev, rx_ring);
1295         ql_update_lbq(qdev, rx_ring);
1296 }
1297
1298 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1299  * fails at some stage, or from the interrupt when a tx completes.
1300  */
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302                           struct tx_ring_desc *tx_ring_desc, int mapped)
1303 {
1304         int i;
1305         for (i = 0; i < mapped; i++) {
1306                 if (i == 0 || (i == 7 && mapped > 7)) {
1307                         /*
1308                          * Unmap the skb->data area, or the
1309                          * external sglist (AKA the Outbound
1310                          * Address List (OAL)).
1311                          * If its the zeroeth element, then it's
1312                          * the skb->data area.  If it's the 7th
1313                          * element and there is more than 6 frags,
1314                          * then its an OAL.
1315                          */
1316                         if (i == 7) {
1317                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1318                                              qdev->ndev,
1319                                              "unmapping OAL area.\n");
1320                         }
1321                         pci_unmap_single(qdev->pdev,
1322                                          dma_unmap_addr(&tx_ring_desc->map[i],
1323                                                         mapaddr),
1324                                          dma_unmap_len(&tx_ring_desc->map[i],
1325                                                        maplen),
1326                                          PCI_DMA_TODEVICE);
1327                 } else {
1328                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329                                      "unmapping frag %d.\n", i);
1330                         pci_unmap_page(qdev->pdev,
1331                                        dma_unmap_addr(&tx_ring_desc->map[i],
1332                                                       mapaddr),
1333                                        dma_unmap_len(&tx_ring_desc->map[i],
1334                                                      maplen), PCI_DMA_TODEVICE);
1335                 }
1336         }
1337
1338 }
1339
1340 /* Map the buffers for this transmit.  This will return
1341  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342  */
1343 static int ql_map_send(struct ql_adapter *qdev,
1344                        struct ob_mac_iocb_req *mac_iocb_ptr,
1345                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346 {
1347         int len = skb_headlen(skb);
1348         dma_addr_t map;
1349         int frag_idx, err, map_idx = 0;
1350         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351         int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353         if (frag_cnt) {
1354                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355                              "frag_cnt = %d.\n", frag_cnt);
1356         }
1357         /*
1358          * Map the skb buffer first.
1359          */
1360         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362         err = pci_dma_mapping_error(qdev->pdev, map);
1363         if (err) {
1364                 netif_err(qdev, tx_queued, qdev->ndev,
1365                           "PCI mapping failed with error: %d\n", err);
1366
1367                 return NETDEV_TX_BUSY;
1368         }
1369
1370         tbd->len = cpu_to_le32(len);
1371         tbd->addr = cpu_to_le64(map);
1372         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374         map_idx++;
1375
1376         /*
1377          * This loop fills the remainder of the 8 address descriptors
1378          * in the IOCB.  If there are more than 7 fragments, then the
1379          * eighth address desc will point to an external list (OAL).
1380          * When this happens, the remainder of the frags will be stored
1381          * in this list.
1382          */
1383         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385                 tbd++;
1386                 if (frag_idx == 6 && frag_cnt > 7) {
1387                         /* Let's tack on an sglist.
1388                          * Our control block will now
1389                          * look like this:
1390                          * iocb->seg[0] = skb->data
1391                          * iocb->seg[1] = frag[0]
1392                          * iocb->seg[2] = frag[1]
1393                          * iocb->seg[3] = frag[2]
1394                          * iocb->seg[4] = frag[3]
1395                          * iocb->seg[5] = frag[4]
1396                          * iocb->seg[6] = frag[5]
1397                          * iocb->seg[7] = ptr to OAL (external sglist)
1398                          * oal->seg[0] = frag[6]
1399                          * oal->seg[1] = frag[7]
1400                          * oal->seg[2] = frag[8]
1401                          * oal->seg[3] = frag[9]
1402                          * oal->seg[4] = frag[10]
1403                          *      etc...
1404                          */
1405                         /* Tack on the OAL in the eighth segment of IOCB. */
1406                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407                                              sizeof(struct oal),
1408                                              PCI_DMA_TODEVICE);
1409                         err = pci_dma_mapping_error(qdev->pdev, map);
1410                         if (err) {
1411                                 netif_err(qdev, tx_queued, qdev->ndev,
1412                                           "PCI mapping outbound address list with error: %d\n",
1413                                           err);
1414                                 goto map_error;
1415                         }
1416
1417                         tbd->addr = cpu_to_le64(map);
1418                         /*
1419                          * The length is the number of fragments
1420                          * that remain to be mapped times the length
1421                          * of our sglist (OAL).
1422                          */
1423                         tbd->len =
1424                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1425                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1426                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427                                            map);
1428                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429                                           sizeof(struct oal));
1430                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431                         map_idx++;
1432                 }
1433
1434                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1435                                        DMA_TO_DEVICE);
1436
1437                 err = dma_mapping_error(&qdev->pdev->dev, map);
1438                 if (err) {
1439                         netif_err(qdev, tx_queued, qdev->ndev,
1440                                   "PCI mapping frags failed with error: %d.\n",
1441                                   err);
1442                         goto map_error;
1443                 }
1444
1445                 tbd->addr = cpu_to_le64(map);
1446                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449                                   skb_frag_size(frag));
1450
1451         }
1452         /* Save the number of segments we've mapped. */
1453         tx_ring_desc->map_cnt = map_idx;
1454         /* Terminate the last segment. */
1455         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456         return NETDEV_TX_OK;
1457
1458 map_error:
1459         /*
1460          * If the first frag mapping failed, then i will be zero.
1461          * This causes the unmap of the skb->data area.  Otherwise
1462          * we pass in the number of frags that mapped successfully
1463          * so they can be umapped.
1464          */
1465         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466         return NETDEV_TX_BUSY;
1467 }
1468
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471                                         struct rx_ring *rx_ring,
1472                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                         u32 length,
1474                                         u16 vlan_id)
1475 {
1476         struct sk_buff *skb;
1477         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478         struct napi_struct *napi = &rx_ring->napi;
1479
1480         napi->dev = qdev->ndev;
1481
1482         skb = napi_get_frags(napi);
1483         if (!skb) {
1484                 netif_err(qdev, drv, qdev->ndev,
1485                           "Couldn't get an skb, exiting.\n");
1486                 rx_ring->rx_dropped++;
1487                 put_page(lbq_desc->p.pg_chunk.page);
1488                 return;
1489         }
1490         prefetch(lbq_desc->p.pg_chunk.va);
1491         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492                              lbq_desc->p.pg_chunk.page,
1493                              lbq_desc->p.pg_chunk.offset,
1494                              length);
1495
1496         skb->len += length;
1497         skb->data_len += length;
1498         skb->truesize += length;
1499         skb_shinfo(skb)->nr_frags++;
1500
1501         rx_ring->rx_packets++;
1502         rx_ring->rx_bytes += length;
1503         skb->ip_summed = CHECKSUM_UNNECESSARY;
1504         skb_record_rx_queue(skb, rx_ring->cq_id);
1505         if (vlan_id != 0xffff)
1506                 __vlan_hwaccel_put_tag(skb, vlan_id);
1507         napi_gro_frags(napi);
1508 }
1509
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512                                         struct rx_ring *rx_ring,
1513                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1514                                         u32 length,
1515                                         u16 vlan_id)
1516 {
1517         struct net_device *ndev = qdev->ndev;
1518         struct sk_buff *skb = NULL;
1519         void *addr;
1520         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521         struct napi_struct *napi = &rx_ring->napi;
1522
1523         skb = netdev_alloc_skb(ndev, length);
1524         if (!skb) {
1525                 netif_err(qdev, drv, qdev->ndev,
1526                           "Couldn't get an skb, need to unwind!.\n");
1527                 rx_ring->rx_dropped++;
1528                 put_page(lbq_desc->p.pg_chunk.page);
1529                 return;
1530         }
1531
1532         addr = lbq_desc->p.pg_chunk.va;
1533         prefetch(addr);
1534
1535
1536         /* Frame error, so drop the packet. */
1537         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538                 netif_info(qdev, drv, qdev->ndev,
1539                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540                 rx_ring->rx_errors++;
1541                 goto err_out;
1542         }
1543
1544         /* The max framesize filter on this chip is set higher than
1545          * MTU since FCoE uses 2k frames.
1546          */
1547         if (skb->len > ndev->mtu + ETH_HLEN) {
1548                 netif_err(qdev, drv, qdev->ndev,
1549                           "Segment too small, dropping.\n");
1550                 rx_ring->rx_dropped++;
1551                 goto err_out;
1552         }
1553         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556                      length);
1557         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559                                 length-ETH_HLEN);
1560         skb->len += length-ETH_HLEN;
1561         skb->data_len += length-ETH_HLEN;
1562         skb->truesize += length-ETH_HLEN;
1563
1564         rx_ring->rx_packets++;
1565         rx_ring->rx_bytes += skb->len;
1566         skb->protocol = eth_type_trans(skb, ndev);
1567         skb_checksum_none_assert(skb);
1568
1569         if ((ndev->features & NETIF_F_RXCSUM) &&
1570                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571                 /* TCP frame. */
1572                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574                                      "TCP checksum done!\n");
1575                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1576                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578                         /* Unfragmented ipv4 UDP frame. */
1579                         struct iphdr *iph = (struct iphdr *) skb->data;
1580                         if (!(iph->frag_off &
1581                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1584                                              qdev->ndev,
1585                                              "TCP checksum done!\n");
1586                         }
1587                 }
1588         }
1589
1590         skb_record_rx_queue(skb, rx_ring->cq_id);
1591         if (vlan_id != 0xffff)
1592                 __vlan_hwaccel_put_tag(skb, vlan_id);
1593         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594                 napi_gro_receive(napi, skb);
1595         else
1596                 netif_receive_skb(skb);
1597         return;
1598 err_out:
1599         dev_kfree_skb_any(skb);
1600         put_page(lbq_desc->p.pg_chunk.page);
1601 }
1602
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605                                         struct rx_ring *rx_ring,
1606                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1607                                         u32 length,
1608                                         u16 vlan_id)
1609 {
1610         struct net_device *ndev = qdev->ndev;
1611         struct sk_buff *skb = NULL;
1612         struct sk_buff *new_skb = NULL;
1613         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615         skb = sbq_desc->p.skb;
1616         /* Allocate new_skb and copy */
1617         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618         if (new_skb == NULL) {
1619                 netif_err(qdev, probe, qdev->ndev,
1620                           "No skb available, drop the packet.\n");
1621                 rx_ring->rx_dropped++;
1622                 return;
1623         }
1624         skb_reserve(new_skb, NET_IP_ALIGN);
1625         memcpy(skb_put(new_skb, length), skb->data, length);
1626         skb = new_skb;
1627
1628         /* Frame error, so drop the packet. */
1629         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1630                 netif_info(qdev, drv, qdev->ndev,
1631                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1632                 dev_kfree_skb_any(skb);
1633                 rx_ring->rx_errors++;
1634                 return;
1635         }
1636
1637         /* loopback self test for ethtool */
1638         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639                 ql_check_lb_frame(qdev, skb);
1640                 dev_kfree_skb_any(skb);
1641                 return;
1642         }
1643
1644         /* The max framesize filter on this chip is set higher than
1645          * MTU since FCoE uses 2k frames.
1646          */
1647         if (skb->len > ndev->mtu + ETH_HLEN) {
1648                 dev_kfree_skb_any(skb);
1649                 rx_ring->rx_dropped++;
1650                 return;
1651         }
1652
1653         prefetch(skb->data);
1654         skb->dev = ndev;
1655         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657                              "%s Multicast.\n",
1658                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1664         }
1665         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667                              "Promiscuous Packet.\n");
1668
1669         rx_ring->rx_packets++;
1670         rx_ring->rx_bytes += skb->len;
1671         skb->protocol = eth_type_trans(skb, ndev);
1672         skb_checksum_none_assert(skb);
1673
1674         /* If rx checksum is on, and there are no
1675          * csum or frame errors.
1676          */
1677         if ((ndev->features & NETIF_F_RXCSUM) &&
1678                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679                 /* TCP frame. */
1680                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682                                      "TCP checksum done!\n");
1683                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1684                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686                         /* Unfragmented ipv4 UDP frame. */
1687                         struct iphdr *iph = (struct iphdr *) skb->data;
1688                         if (!(iph->frag_off &
1689                                 ntohs(IP_MF|IP_OFFSET))) {
1690                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1692                                              qdev->ndev,
1693                                              "TCP checksum done!\n");
1694                         }
1695                 }
1696         }
1697
1698         skb_record_rx_queue(skb, rx_ring->cq_id);
1699         if (vlan_id != 0xffff)
1700                 __vlan_hwaccel_put_tag(skb, vlan_id);
1701         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702                 napi_gro_receive(&rx_ring->napi, skb);
1703         else
1704                 netif_receive_skb(skb);
1705 }
1706
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1708 {
1709         void *temp_addr = skb->data;
1710
1711         /* Undo the skb_reserve(skb,32) we did before
1712          * giving to hardware, and realign data on
1713          * a 2-byte boundary.
1714          */
1715         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717         skb_copy_to_linear_data(skb, temp_addr,
1718                 (unsigned int)len);
1719 }
1720
1721 /*
1722  * This function builds an skb for the given inbound
1723  * completion.  It will be rewritten for readability in the near
1724  * future, but for not it works well.
1725  */
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727                                        struct rx_ring *rx_ring,
1728                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1729 {
1730         struct bq_desc *lbq_desc;
1731         struct bq_desc *sbq_desc;
1732         struct sk_buff *skb = NULL;
1733         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736         /*
1737          * Handle the header buffer if present.
1738          */
1739         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742                              "Header of %d bytes in small buffer.\n", hdr_len);
1743                 /*
1744                  * Headers fit nicely into a small buffer.
1745                  */
1746                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747                 pci_unmap_single(qdev->pdev,
1748                                 dma_unmap_addr(sbq_desc, mapaddr),
1749                                 dma_unmap_len(sbq_desc, maplen),
1750                                 PCI_DMA_FROMDEVICE);
1751                 skb = sbq_desc->p.skb;
1752                 ql_realign_skb(skb, hdr_len);
1753                 skb_put(skb, hdr_len);
1754                 sbq_desc->p.skb = NULL;
1755         }
1756
1757         /*
1758          * Handle the data buffer(s).
1759          */
1760         if (unlikely(!length)) {        /* Is there data too? */
1761                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762                              "No Data buffer in this packet.\n");
1763                 return skb;
1764         }
1765
1766         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769                                      "Headers in small, data of %d bytes in small, combine them.\n",
1770                                      length);
1771                         /*
1772                          * Data is less than small buffer size so it's
1773                          * stuffed in a small buffer.
1774                          * For this case we append the data
1775                          * from the "data" small buffer to the "header" small
1776                          * buffer.
1777                          */
1778                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1779                         pci_dma_sync_single_for_cpu(qdev->pdev,
1780                                                     dma_unmap_addr
1781                                                     (sbq_desc, mapaddr),
1782                                                     dma_unmap_len
1783                                                     (sbq_desc, maplen),
1784                                                     PCI_DMA_FROMDEVICE);
1785                         memcpy(skb_put(skb, length),
1786                                sbq_desc->p.skb->data, length);
1787                         pci_dma_sync_single_for_device(qdev->pdev,
1788                                                        dma_unmap_addr
1789                                                        (sbq_desc,
1790                                                         mapaddr),
1791                                                        dma_unmap_len
1792                                                        (sbq_desc,
1793                                                         maplen),
1794                                                        PCI_DMA_FROMDEVICE);
1795                 } else {
1796                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797                                      "%d bytes in a single small buffer.\n",
1798                                      length);
1799                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1800                         skb = sbq_desc->p.skb;
1801                         ql_realign_skb(skb, length);
1802                         skb_put(skb, length);
1803                         pci_unmap_single(qdev->pdev,
1804                                          dma_unmap_addr(sbq_desc,
1805                                                         mapaddr),
1806                                          dma_unmap_len(sbq_desc,
1807                                                        maplen),
1808                                          PCI_DMA_FROMDEVICE);
1809                         sbq_desc->p.skb = NULL;
1810                 }
1811         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814                                      "Header in small, %d bytes in large. Chain large to small!\n",
1815                                      length);
1816                         /*
1817                          * The data is in a single large buffer.  We
1818                          * chain it to the header buffer's skb and let
1819                          * it rip.
1820                          */
1821                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1824                                      lbq_desc->p.pg_chunk.offset, length);
1825                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826                                                 lbq_desc->p.pg_chunk.offset,
1827                                                 length);
1828                         skb->len += length;
1829                         skb->data_len += length;
1830                         skb->truesize += length;
1831                 } else {
1832                         /*
1833                          * The headers and data are in a single large buffer. We
1834                          * copy it to a new skb and let it go. This can happen with
1835                          * jumbo mtu on a non-TCP/UDP frame.
1836                          */
1837                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838                         skb = netdev_alloc_skb(qdev->ndev, length);
1839                         if (skb == NULL) {
1840                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841                                              "No skb available, drop the packet.\n");
1842                                 return NULL;
1843                         }
1844                         pci_unmap_page(qdev->pdev,
1845                                        dma_unmap_addr(lbq_desc,
1846                                                       mapaddr),
1847                                        dma_unmap_len(lbq_desc, maplen),
1848                                        PCI_DMA_FROMDEVICE);
1849                         skb_reserve(skb, NET_IP_ALIGN);
1850                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852                                      length);
1853                         skb_fill_page_desc(skb, 0,
1854                                                 lbq_desc->p.pg_chunk.page,
1855                                                 lbq_desc->p.pg_chunk.offset,
1856                                                 length);
1857                         skb->len += length;
1858                         skb->data_len += length;
1859                         skb->truesize += length;
1860                         length -= length;
1861                         __pskb_pull_tail(skb,
1862                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863                                 VLAN_ETH_HLEN : ETH_HLEN);
1864                 }
1865         } else {
1866                 /*
1867                  * The data is in a chain of large buffers
1868                  * pointed to by a small buffer.  We loop
1869                  * thru and chain them to the our small header
1870                  * buffer's skb.
1871                  * frags:  There are 18 max frags and our small
1872                  *         buffer will hold 32 of them. The thing is,
1873                  *         we'll use 3 max for our 9000 byte jumbo
1874                  *         frames.  If the MTU goes up we could
1875                  *          eventually be in trouble.
1876                  */
1877                 int size, i = 0;
1878                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879                 pci_unmap_single(qdev->pdev,
1880                                  dma_unmap_addr(sbq_desc, mapaddr),
1881                                  dma_unmap_len(sbq_desc, maplen),
1882                                  PCI_DMA_FROMDEVICE);
1883                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884                         /*
1885                          * This is an non TCP/UDP IP frame, so
1886                          * the headers aren't split into a small
1887                          * buffer.  We have to use the small buffer
1888                          * that contains our sg list as our skb to
1889                          * send upstairs. Copy the sg list here to
1890                          * a local buffer and use it to find the
1891                          * pages to chain.
1892                          */
1893                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894                                      "%d bytes of headers & data in chain of large.\n",
1895                                      length);
1896                         skb = sbq_desc->p.skb;
1897                         sbq_desc->p.skb = NULL;
1898                         skb_reserve(skb, NET_IP_ALIGN);
1899                 }
1900                 while (length > 0) {
1901                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902                         size = (length < rx_ring->lbq_buf_size) ? length :
1903                                 rx_ring->lbq_buf_size;
1904
1905                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906                                      "Adding page %d to skb for %d bytes.\n",
1907                                      i, size);
1908                         skb_fill_page_desc(skb, i,
1909                                                 lbq_desc->p.pg_chunk.page,
1910                                                 lbq_desc->p.pg_chunk.offset,
1911                                                 size);
1912                         skb->len += size;
1913                         skb->data_len += size;
1914                         skb->truesize += size;
1915                         length -= size;
1916                         i++;
1917                 }
1918                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919                                 VLAN_ETH_HLEN : ETH_HLEN);
1920         }
1921         return skb;
1922 }
1923
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926                                    struct rx_ring *rx_ring,
1927                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1928                                    u16 vlan_id)
1929 {
1930         struct net_device *ndev = qdev->ndev;
1931         struct sk_buff *skb = NULL;
1932
1933         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936         if (unlikely(!skb)) {
1937                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938                              "No skb available, drop packet.\n");
1939                 rx_ring->rx_dropped++;
1940                 return;
1941         }
1942
1943         /* Frame error, so drop the packet. */
1944         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945                 netif_info(qdev, drv, qdev->ndev,
1946                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1947                 dev_kfree_skb_any(skb);
1948                 rx_ring->rx_errors++;
1949                 return;
1950         }
1951
1952         /* The max framesize filter on this chip is set higher than
1953          * MTU since FCoE uses 2k frames.
1954          */
1955         if (skb->len > ndev->mtu + ETH_HLEN) {
1956                 dev_kfree_skb_any(skb);
1957                 rx_ring->rx_dropped++;
1958                 return;
1959         }
1960
1961         /* loopback self test for ethtool */
1962         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963                 ql_check_lb_frame(qdev, skb);
1964                 dev_kfree_skb_any(skb);
1965                 return;
1966         }
1967
1968         prefetch(skb->data);
1969         skb->dev = ndev;
1970         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1971                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1978                 rx_ring->rx_multicast++;
1979         }
1980         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1981                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982                              "Promiscuous Packet.\n");
1983         }
1984
1985         skb->protocol = eth_type_trans(skb, ndev);
1986         skb_checksum_none_assert(skb);
1987
1988         /* If rx checksum is on, and there are no
1989          * csum or frame errors.
1990          */
1991         if ((ndev->features & NETIF_F_RXCSUM) &&
1992                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1993                 /* TCP frame. */
1994                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1995                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996                                      "TCP checksum done!\n");
1997                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1998                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000                 /* Unfragmented ipv4 UDP frame. */
2001                         struct iphdr *iph = (struct iphdr *) skb->data;
2002                         if (!(iph->frag_off &
2003                                 ntohs(IP_MF|IP_OFFSET))) {
2004                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2005                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006                                              "TCP checksum done!\n");
2007                         }
2008                 }
2009         }
2010
2011         rx_ring->rx_packets++;
2012         rx_ring->rx_bytes += skb->len;
2013         skb_record_rx_queue(skb, rx_ring->cq_id);
2014         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015                 __vlan_hwaccel_put_tag(skb, vlan_id);
2016         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017                 napi_gro_receive(&rx_ring->napi, skb);
2018         else
2019                 netif_receive_skb(skb);
2020 }
2021
2022 /* Process an inbound completion from an rx ring. */
2023 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024                                         struct rx_ring *rx_ring,
2025                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2026 {
2027         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2031
2032         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2033
2034         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035                 /* The data and headers are split into
2036                  * separate buffers.
2037                  */
2038                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2039                                                 vlan_id);
2040         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041                 /* The data fit in a single small buffer.
2042                  * Allocate a new skb, copy the data and
2043                  * return the buffer to the free pool.
2044                  */
2045                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2046                                                 length, vlan_id);
2047         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050                 /* TCP packet in a page chunk that's been checksummed.
2051                  * Tack it on to our GRO skb and let it go.
2052                  */
2053                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2054                                                 length, vlan_id);
2055         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056                 /* Non-TCP packet in a page chunk. Allocate an
2057                  * skb, tack it on frags, and send it up.
2058                  */
2059                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2060                                                 length, vlan_id);
2061         } else {
2062                 /* Non-TCP/UDP large frames that span multiple buffers
2063                  * can be processed corrrectly by the split frame logic.
2064                  */
2065                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066                                                 vlan_id);
2067         }
2068
2069         return (unsigned long)length;
2070 }
2071
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074                                    struct ob_mac_iocb_rsp *mac_rsp)
2075 {
2076         struct tx_ring *tx_ring;
2077         struct tx_ring_desc *tx_ring_desc;
2078
2079         QL_DUMP_OB_MAC_RSP(mac_rsp);
2080         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084         tx_ring->tx_packets++;
2085         dev_kfree_skb(tx_ring_desc->skb);
2086         tx_ring_desc->skb = NULL;
2087
2088         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089                                         OB_MAC_IOCB_RSP_S |
2090                                         OB_MAC_IOCB_RSP_L |
2091                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093                         netif_warn(qdev, tx_done, qdev->ndev,
2094                                    "Total descriptor length did not match transfer length.\n");
2095                 }
2096                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097                         netif_warn(qdev, tx_done, qdev->ndev,
2098                                    "Frame too short to be valid, not sent.\n");
2099                 }
2100                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101                         netif_warn(qdev, tx_done, qdev->ndev,
2102                                    "Frame too long, but sent anyway.\n");
2103                 }
2104                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105                         netif_warn(qdev, tx_done, qdev->ndev,
2106                                    "PCI backplane error. Frame not sent.\n");
2107                 }
2108         }
2109         atomic_inc(&tx_ring->tx_count);
2110 }
2111
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2114 {
2115         ql_link_off(qdev);
2116         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117 }
2118
2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2120 {
2121         ql_link_off(qdev);
2122         ql_disable_interrupts(qdev);
2123         /* Clear adapter up bit to signal the recovery
2124          * process that it shouldn't kill the reset worker
2125          * thread
2126          */
2127         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128         /* Set asic recovery bit to indicate reset process that we are
2129          * in fatal error recovery process rather than normal close
2130          */
2131         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2132         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2133 }
2134
2135 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2137 {
2138         switch (ib_ae_rsp->event) {
2139         case MGMT_ERR_EVENT:
2140                 netif_err(qdev, rx_err, qdev->ndev,
2141                           "Management Processor Fatal Error.\n");
2142                 ql_queue_fw_error(qdev);
2143                 return;
2144
2145         case CAM_LOOKUP_ERR_EVENT:
2146                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2148                 ql_queue_asic_error(qdev);
2149                 return;
2150
2151         case SOFT_ECC_ERROR_EVENT:
2152                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2153                 ql_queue_asic_error(qdev);
2154                 break;
2155
2156         case PCI_ERR_ANON_BUF_RD:
2157                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158                                         "anonymous buffers from rx_ring %d.\n",
2159                                         ib_ae_rsp->q_id);
2160                 ql_queue_asic_error(qdev);
2161                 break;
2162
2163         default:
2164                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2165                           ib_ae_rsp->event);
2166                 ql_queue_asic_error(qdev);
2167                 break;
2168         }
2169 }
2170
2171 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2172 {
2173         struct ql_adapter *qdev = rx_ring->qdev;
2174         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2175         struct ob_mac_iocb_rsp *net_rsp = NULL;
2176         int count = 0;
2177
2178         struct tx_ring *tx_ring;
2179         /* While there are entries in the completion queue. */
2180         while (prod != rx_ring->cnsmr_idx) {
2181
2182                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2185
2186                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2187                 rmb();
2188                 switch (net_rsp->opcode) {
2189
2190                 case OPCODE_OB_MAC_TSO_IOCB:
2191                 case OPCODE_OB_MAC_IOCB:
2192                         ql_process_mac_tx_intr(qdev, net_rsp);
2193                         break;
2194                 default:
2195                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197                                      net_rsp->opcode);
2198                 }
2199                 count++;
2200                 ql_update_cq(rx_ring);
2201                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2202         }
2203         if (!net_rsp)
2204                 return 0;
2205         ql_write_cq_idx(rx_ring);
2206         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2207         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2208                 if (atomic_read(&tx_ring->queue_stopped) &&
2209                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2210                         /*
2211                          * The queue got stopped because the tx_ring was full.
2212                          * Wake it up, because it's now at least 25% empty.
2213                          */
2214                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2215         }
2216
2217         return count;
2218 }
2219
2220 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2221 {
2222         struct ql_adapter *qdev = rx_ring->qdev;
2223         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224         struct ql_net_rsp_iocb *net_rsp;
2225         int count = 0;
2226
2227         /* While there are entries in the completion queue. */
2228         while (prod != rx_ring->cnsmr_idx) {
2229
2230                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2233
2234                 net_rsp = rx_ring->curr_entry;
2235                 rmb();
2236                 switch (net_rsp->opcode) {
2237                 case OPCODE_IB_MAC_IOCB:
2238                         ql_process_mac_rx_intr(qdev, rx_ring,
2239                                                (struct ib_mac_iocb_rsp *)
2240                                                net_rsp);
2241                         break;
2242
2243                 case OPCODE_IB_AE_IOCB:
2244                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2245                                                 net_rsp);
2246                         break;
2247                 default:
2248                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250                                      net_rsp->opcode);
2251                         break;
2252                 }
2253                 count++;
2254                 ql_update_cq(rx_ring);
2255                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256                 if (count == budget)
2257                         break;
2258         }
2259         ql_update_buffer_queues(qdev, rx_ring);
2260         ql_write_cq_idx(rx_ring);
2261         return count;
2262 }
2263
2264 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2265 {
2266         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267         struct ql_adapter *qdev = rx_ring->qdev;
2268         struct rx_ring *trx_ring;
2269         int i, work_done = 0;
2270         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2271
2272         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2274
2275         /* Service the TX rings first.  They start
2276          * right after the RSS rings. */
2277         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278                 trx_ring = &qdev->rx_ring[i];
2279                 /* If this TX completion ring belongs to this vector and
2280                  * it's not empty then service it.
2281                  */
2282                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284                                         trx_ring->cnsmr_idx)) {
2285                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286                                      "%s: Servicing TX completion ring %d.\n",
2287                                      __func__, trx_ring->cq_id);
2288                         ql_clean_outbound_rx_ring(trx_ring);
2289                 }
2290         }
2291
2292         /*
2293          * Now service the RSS ring if it's active.
2294          */
2295         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296                                         rx_ring->cnsmr_idx) {
2297                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298                              "%s: Servicing RX completion ring %d.\n",
2299                              __func__, rx_ring->cq_id);
2300                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2301         }
2302
2303         if (work_done < budget) {
2304                 napi_complete(napi);
2305                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306         }
2307         return work_done;
2308 }
2309
2310 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2311 {
2312         struct ql_adapter *qdev = netdev_priv(ndev);
2313
2314         if (features & NETIF_F_HW_VLAN_RX) {
2315                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2316                              "Turning on VLAN in NIC_RCV_CFG.\n");
2317                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2318                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2319         } else {
2320                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2321                              "Turning off VLAN in NIC_RCV_CFG.\n");
2322                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323         }
2324 }
2325
2326 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2327 {
2328         /*
2329          * Since there is no support for separate rx/tx vlan accel
2330          * enable/disable make sure tx flag is always in same state as rx.
2331          */
2332         if (features & NETIF_F_HW_VLAN_RX)
2333                 features |= NETIF_F_HW_VLAN_TX;
2334         else
2335                 features &= ~NETIF_F_HW_VLAN_TX;
2336
2337         return features;
2338 }
2339
2340 static int qlge_set_features(struct net_device *ndev, u32 features)
2341 {
2342         u32 changed = ndev->features ^ features;
2343
2344         if (changed & NETIF_F_HW_VLAN_RX)
2345                 qlge_vlan_mode(ndev, features);
2346
2347         return 0;
2348 }
2349
2350 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2351 {
2352         u32 enable_bit = MAC_ADDR_E;
2353
2354         if (ql_set_mac_addr_reg
2355             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2356                 netif_err(qdev, ifup, qdev->ndev,
2357                           "Failed to init vlan address.\n");
2358         }
2359 }
2360
2361 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2362 {
2363         struct ql_adapter *qdev = netdev_priv(ndev);
2364         int status;
2365
2366         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367         if (status)
2368                 return;
2369
2370         __qlge_vlan_rx_add_vid(qdev, vid);
2371         set_bit(vid, qdev->active_vlans);
2372
2373         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2374 }
2375
2376 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2377 {
2378         u32 enable_bit = 0;
2379
2380         if (ql_set_mac_addr_reg
2381             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2382                 netif_err(qdev, ifup, qdev->ndev,
2383                           "Failed to clear vlan address.\n");
2384         }
2385 }
2386
2387 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2388 {
2389         struct ql_adapter *qdev = netdev_priv(ndev);
2390         int status;
2391
2392         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2393         if (status)
2394                 return;
2395
2396         __qlge_vlan_rx_kill_vid(qdev, vid);
2397         clear_bit(vid, qdev->active_vlans);
2398
2399         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2400 }
2401
2402 static void qlge_restore_vlan(struct ql_adapter *qdev)
2403 {
2404         int status;
2405         u16 vid;
2406
2407         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2408         if (status)
2409                 return;
2410
2411         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2412                 __qlge_vlan_rx_add_vid(qdev, vid);
2413
2414         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2415 }
2416
2417 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2418 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2419 {
2420         struct rx_ring *rx_ring = dev_id;
2421         napi_schedule(&rx_ring->napi);
2422         return IRQ_HANDLED;
2423 }
2424
2425 /* This handles a fatal error, MPI activity, and the default
2426  * rx_ring in an MSI-X multiple vector environment.
2427  * In MSI/Legacy environment it also process the rest of
2428  * the rx_rings.
2429  */
2430 static irqreturn_t qlge_isr(int irq, void *dev_id)
2431 {
2432         struct rx_ring *rx_ring = dev_id;
2433         struct ql_adapter *qdev = rx_ring->qdev;
2434         struct intr_context *intr_context = &qdev->intr_context[0];
2435         u32 var;
2436         int work_done = 0;
2437
2438         spin_lock(&qdev->hw_lock);
2439         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2440                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2441                              "Shared Interrupt, Not ours!\n");
2442                 spin_unlock(&qdev->hw_lock);
2443                 return IRQ_NONE;
2444         }
2445         spin_unlock(&qdev->hw_lock);
2446
2447         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2448
2449         /*
2450          * Check for fatal error.
2451          */
2452         if (var & STS_FE) {
2453                 ql_queue_asic_error(qdev);
2454                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2455                 var = ql_read32(qdev, ERR_STS);
2456                 netdev_err(qdev->ndev, "Resetting chip. "
2457                                         "Error Status Register = 0x%x\n", var);
2458                 return IRQ_HANDLED;
2459         }
2460
2461         /*
2462          * Check MPI processor activity.
2463          */
2464         if ((var & STS_PI) &&
2465                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2466                 /*
2467                  * We've got an async event or mailbox completion.
2468                  * Handle it and clear the source of the interrupt.
2469                  */
2470                 netif_err(qdev, intr, qdev->ndev,
2471                           "Got MPI processor interrupt.\n");
2472                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2473                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2474                 queue_delayed_work_on(smp_processor_id(),
2475                                 qdev->workqueue, &qdev->mpi_work, 0);
2476                 work_done++;
2477         }
2478
2479         /*
2480          * Get the bit-mask that shows the active queues for this
2481          * pass.  Compare it to the queues that this irq services
2482          * and call napi if there's a match.
2483          */
2484         var = ql_read32(qdev, ISR1);
2485         if (var & intr_context->irq_mask) {
2486                 netif_info(qdev, intr, qdev->ndev,
2487                            "Waking handler for rx_ring[0].\n");
2488                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2489                 napi_schedule(&rx_ring->napi);
2490                 work_done++;
2491         }
2492         ql_enable_completion_interrupt(qdev, intr_context->intr);
2493         return work_done ? IRQ_HANDLED : IRQ_NONE;
2494 }
2495
2496 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2497 {
2498
2499         if (skb_is_gso(skb)) {
2500                 int err;
2501                 if (skb_header_cloned(skb)) {
2502                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2503                         if (err)
2504                                 return err;
2505                 }
2506
2507                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2508                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2509                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2510                 mac_iocb_ptr->total_hdrs_len =
2511                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2512                 mac_iocb_ptr->net_trans_offset =
2513                     cpu_to_le16(skb_network_offset(skb) |
2514                                 skb_transport_offset(skb)
2515                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2516                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2517                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2518                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2519                         struct iphdr *iph = ip_hdr(skb);
2520                         iph->check = 0;
2521                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2522                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2523                                                                  iph->daddr, 0,
2524                                                                  IPPROTO_TCP,
2525                                                                  0);
2526                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2527                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2528                         tcp_hdr(skb)->check =
2529                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2530                                              &ipv6_hdr(skb)->daddr,
2531                                              0, IPPROTO_TCP, 0);
2532                 }
2533                 return 1;
2534         }
2535         return 0;
2536 }
2537
2538 static void ql_hw_csum_setup(struct sk_buff *skb,
2539                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2540 {
2541         int len;
2542         struct iphdr *iph = ip_hdr(skb);
2543         __sum16 *check;
2544         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2545         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2546         mac_iocb_ptr->net_trans_offset =
2547                 cpu_to_le16(skb_network_offset(skb) |
2548                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2549
2550         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2551         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2552         if (likely(iph->protocol == IPPROTO_TCP)) {
2553                 check = &(tcp_hdr(skb)->check);
2554                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2555                 mac_iocb_ptr->total_hdrs_len =
2556                     cpu_to_le16(skb_transport_offset(skb) +
2557                                 (tcp_hdr(skb)->doff << 2));
2558         } else {
2559                 check = &(udp_hdr(skb)->check);
2560                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2561                 mac_iocb_ptr->total_hdrs_len =
2562                     cpu_to_le16(skb_transport_offset(skb) +
2563                                 sizeof(struct udphdr));
2564         }
2565         *check = ~csum_tcpudp_magic(iph->saddr,
2566                                     iph->daddr, len, iph->protocol, 0);
2567 }
2568
2569 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2570 {
2571         struct tx_ring_desc *tx_ring_desc;
2572         struct ob_mac_iocb_req *mac_iocb_ptr;
2573         struct ql_adapter *qdev = netdev_priv(ndev);
2574         int tso;
2575         struct tx_ring *tx_ring;
2576         u32 tx_ring_idx = (u32) skb->queue_mapping;
2577
2578         tx_ring = &qdev->tx_ring[tx_ring_idx];
2579
2580         if (skb_padto(skb, ETH_ZLEN))
2581                 return NETDEV_TX_OK;
2582
2583         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2584                 netif_info(qdev, tx_queued, qdev->ndev,
2585                            "%s: shutting down tx queue %d du to lack of resources.\n",
2586                            __func__, tx_ring_idx);
2587                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2588                 atomic_inc(&tx_ring->queue_stopped);
2589                 tx_ring->tx_errors++;
2590                 return NETDEV_TX_BUSY;
2591         }
2592         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2593         mac_iocb_ptr = tx_ring_desc->queue_entry;
2594         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2595
2596         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2597         mac_iocb_ptr->tid = tx_ring_desc->index;
2598         /* We use the upper 32-bits to store the tx queue for this IO.
2599          * When we get the completion we can use it to establish the context.
2600          */
2601         mac_iocb_ptr->txq_idx = tx_ring_idx;
2602         tx_ring_desc->skb = skb;
2603
2604         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2605
2606         if (vlan_tx_tag_present(skb)) {
2607                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2608                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2609                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2610                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2611         }
2612         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2613         if (tso < 0) {
2614                 dev_kfree_skb_any(skb);
2615                 return NETDEV_TX_OK;
2616         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2617                 ql_hw_csum_setup(skb,
2618                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2619         }
2620         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2621                         NETDEV_TX_OK) {
2622                 netif_err(qdev, tx_queued, qdev->ndev,
2623                           "Could not map the segments.\n");
2624                 tx_ring->tx_errors++;
2625                 return NETDEV_TX_BUSY;
2626         }
2627         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2628         tx_ring->prod_idx++;
2629         if (tx_ring->prod_idx == tx_ring->wq_len)
2630                 tx_ring->prod_idx = 0;
2631         wmb();
2632
2633         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2634         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2635                      "tx queued, slot %d, len %d\n",
2636                      tx_ring->prod_idx, skb->len);
2637
2638         atomic_dec(&tx_ring->tx_count);
2639         return NETDEV_TX_OK;
2640 }
2641
2642
2643 static void ql_free_shadow_space(struct ql_adapter *qdev)
2644 {
2645         if (qdev->rx_ring_shadow_reg_area) {
2646                 pci_free_consistent(qdev->pdev,
2647                                     PAGE_SIZE,
2648                                     qdev->rx_ring_shadow_reg_area,
2649                                     qdev->rx_ring_shadow_reg_dma);
2650                 qdev->rx_ring_shadow_reg_area = NULL;
2651         }
2652         if (qdev->tx_ring_shadow_reg_area) {
2653                 pci_free_consistent(qdev->pdev,
2654                                     PAGE_SIZE,
2655                                     qdev->tx_ring_shadow_reg_area,
2656                                     qdev->tx_ring_shadow_reg_dma);
2657                 qdev->tx_ring_shadow_reg_area = NULL;
2658         }
2659 }
2660
2661 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2662 {
2663         qdev->rx_ring_shadow_reg_area =
2664             pci_alloc_consistent(qdev->pdev,
2665                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2666         if (qdev->rx_ring_shadow_reg_area == NULL) {
2667                 netif_err(qdev, ifup, qdev->ndev,
2668                           "Allocation of RX shadow space failed.\n");
2669                 return -ENOMEM;
2670         }
2671         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2672         qdev->tx_ring_shadow_reg_area =
2673             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2674                                  &qdev->tx_ring_shadow_reg_dma);
2675         if (qdev->tx_ring_shadow_reg_area == NULL) {
2676                 netif_err(qdev, ifup, qdev->ndev,
2677                           "Allocation of TX shadow space failed.\n");
2678                 goto err_wqp_sh_area;
2679         }
2680         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2681         return 0;
2682
2683 err_wqp_sh_area:
2684         pci_free_consistent(qdev->pdev,
2685                             PAGE_SIZE,
2686                             qdev->rx_ring_shadow_reg_area,
2687                             qdev->rx_ring_shadow_reg_dma);
2688         return -ENOMEM;
2689 }
2690
2691 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2692 {
2693         struct tx_ring_desc *tx_ring_desc;
2694         int i;
2695         struct ob_mac_iocb_req *mac_iocb_ptr;
2696
2697         mac_iocb_ptr = tx_ring->wq_base;
2698         tx_ring_desc = tx_ring->q;
2699         for (i = 0; i < tx_ring->wq_len; i++) {
2700                 tx_ring_desc->index = i;
2701                 tx_ring_desc->skb = NULL;
2702                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2703                 mac_iocb_ptr++;
2704                 tx_ring_desc++;
2705         }
2706         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2707         atomic_set(&tx_ring->queue_stopped, 0);
2708 }
2709
2710 static void ql_free_tx_resources(struct ql_adapter *qdev,
2711                                  struct tx_ring *tx_ring)
2712 {
2713         if (tx_ring->wq_base) {
2714                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2715                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2716                 tx_ring->wq_base = NULL;
2717         }
2718         kfree(tx_ring->q);
2719         tx_ring->q = NULL;
2720 }
2721
2722 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2723                                  struct tx_ring *tx_ring)
2724 {
2725         tx_ring->wq_base =
2726             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2727                                  &tx_ring->wq_base_dma);
2728
2729         if ((tx_ring->wq_base == NULL) ||
2730             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2731                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2732                 return -ENOMEM;
2733         }
2734         tx_ring->q =
2735             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2736         if (tx_ring->q == NULL)
2737                 goto err;
2738
2739         return 0;
2740 err:
2741         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2742                             tx_ring->wq_base, tx_ring->wq_base_dma);
2743         return -ENOMEM;
2744 }
2745
2746 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2747 {
2748         struct bq_desc *lbq_desc;
2749
2750         uint32_t  curr_idx, clean_idx;
2751
2752         curr_idx = rx_ring->lbq_curr_idx;
2753         clean_idx = rx_ring->lbq_clean_idx;
2754         while (curr_idx != clean_idx) {
2755                 lbq_desc = &rx_ring->lbq[curr_idx];
2756
2757                 if (lbq_desc->p.pg_chunk.last_flag) {
2758                         pci_unmap_page(qdev->pdev,
2759                                 lbq_desc->p.pg_chunk.map,
2760                                 ql_lbq_block_size(qdev),
2761                                        PCI_DMA_FROMDEVICE);
2762                         lbq_desc->p.pg_chunk.last_flag = 0;
2763                 }
2764
2765                 put_page(lbq_desc->p.pg_chunk.page);
2766                 lbq_desc->p.pg_chunk.page = NULL;
2767
2768                 if (++curr_idx == rx_ring->lbq_len)
2769                         curr_idx = 0;
2770
2771         }
2772 }
2773
2774 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2775 {
2776         int i;
2777         struct bq_desc *sbq_desc;
2778
2779         for (i = 0; i < rx_ring->sbq_len; i++) {
2780                 sbq_desc = &rx_ring->sbq[i];
2781                 if (sbq_desc == NULL) {
2782                         netif_err(qdev, ifup, qdev->ndev,
2783                                   "sbq_desc %d is NULL.\n", i);
2784                         return;
2785                 }
2786                 if (sbq_desc->p.skb) {
2787                         pci_unmap_single(qdev->pdev,
2788                                          dma_unmap_addr(sbq_desc, mapaddr),
2789                                          dma_unmap_len(sbq_desc, maplen),
2790                                          PCI_DMA_FROMDEVICE);
2791                         dev_kfree_skb(sbq_desc->p.skb);
2792                         sbq_desc->p.skb = NULL;
2793                 }
2794         }
2795 }
2796
2797 /* Free all large and small rx buffers associated
2798  * with the completion queues for this device.
2799  */
2800 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2801 {
2802         int i;
2803         struct rx_ring *rx_ring;
2804
2805         for (i = 0; i < qdev->rx_ring_count; i++) {
2806                 rx_ring = &qdev->rx_ring[i];
2807                 if (rx_ring->lbq)
2808                         ql_free_lbq_buffers(qdev, rx_ring);
2809                 if (rx_ring->sbq)
2810                         ql_free_sbq_buffers(qdev, rx_ring);
2811         }
2812 }
2813
2814 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2815 {
2816         struct rx_ring *rx_ring;
2817         int i;
2818
2819         for (i = 0; i < qdev->rx_ring_count; i++) {
2820                 rx_ring = &qdev->rx_ring[i];
2821                 if (rx_ring->type != TX_Q)
2822                         ql_update_buffer_queues(qdev, rx_ring);
2823         }
2824 }
2825
2826 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2827                                 struct rx_ring *rx_ring)
2828 {
2829         int i;
2830         struct bq_desc *lbq_desc;
2831         __le64 *bq = rx_ring->lbq_base;
2832
2833         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2834         for (i = 0; i < rx_ring->lbq_len; i++) {
2835                 lbq_desc = &rx_ring->lbq[i];
2836                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2837                 lbq_desc->index = i;
2838                 lbq_desc->addr = bq;
2839                 bq++;
2840         }
2841 }
2842
2843 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2844                                 struct rx_ring *rx_ring)
2845 {
2846         int i;
2847         struct bq_desc *sbq_desc;
2848         __le64 *bq = rx_ring->sbq_base;
2849
2850         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2851         for (i = 0; i < rx_ring->sbq_len; i++) {
2852                 sbq_desc = &rx_ring->sbq[i];
2853                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2854                 sbq_desc->index = i;
2855                 sbq_desc->addr = bq;
2856                 bq++;
2857         }
2858 }
2859
2860 static void ql_free_rx_resources(struct ql_adapter *qdev,
2861                                  struct rx_ring *rx_ring)
2862 {
2863         /* Free the small buffer queue. */
2864         if (rx_ring->sbq_base) {
2865                 pci_free_consistent(qdev->pdev,
2866                                     rx_ring->sbq_size,
2867                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2868                 rx_ring->sbq_base = NULL;
2869         }
2870
2871         /* Free the small buffer queue control blocks. */
2872         kfree(rx_ring->sbq);
2873         rx_ring->sbq = NULL;
2874
2875         /* Free the large buffer queue. */
2876         if (rx_ring->lbq_base) {
2877                 pci_free_consistent(qdev->pdev,
2878                                     rx_ring->lbq_size,
2879                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2880                 rx_ring->lbq_base = NULL;
2881         }
2882
2883         /* Free the large buffer queue control blocks. */
2884         kfree(rx_ring->lbq);
2885         rx_ring->lbq = NULL;
2886
2887         /* Free the rx queue. */
2888         if (rx_ring->cq_base) {
2889                 pci_free_consistent(qdev->pdev,
2890                                     rx_ring->cq_size,
2891                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2892                 rx_ring->cq_base = NULL;
2893         }
2894 }
2895
2896 /* Allocate queues and buffers for this completions queue based
2897  * on the values in the parameter structure. */
2898 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2899                                  struct rx_ring *rx_ring)
2900 {
2901
2902         /*
2903          * Allocate the completion queue for this rx_ring.
2904          */
2905         rx_ring->cq_base =
2906             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2907                                  &rx_ring->cq_base_dma);
2908
2909         if (rx_ring->cq_base == NULL) {
2910                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2911                 return -ENOMEM;
2912         }
2913
2914         if (rx_ring->sbq_len) {
2915                 /*
2916                  * Allocate small buffer queue.
2917                  */
2918                 rx_ring->sbq_base =
2919                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2920                                          &rx_ring->sbq_base_dma);
2921
2922                 if (rx_ring->sbq_base == NULL) {
2923                         netif_err(qdev, ifup, qdev->ndev,
2924                                   "Small buffer queue allocation failed.\n");
2925                         goto err_mem;
2926                 }
2927
2928                 /*
2929                  * Allocate small buffer queue control blocks.
2930                  */
2931                 rx_ring->sbq =
2932                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2933                             GFP_KERNEL);
2934                 if (rx_ring->sbq == NULL) {
2935                         netif_err(qdev, ifup, qdev->ndev,
2936                                   "Small buffer queue control block allocation failed.\n");
2937                         goto err_mem;
2938                 }
2939
2940                 ql_init_sbq_ring(qdev, rx_ring);
2941         }
2942
2943         if (rx_ring->lbq_len) {
2944                 /*
2945                  * Allocate large buffer queue.
2946                  */
2947                 rx_ring->lbq_base =
2948                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2949                                          &rx_ring->lbq_base_dma);
2950
2951                 if (rx_ring->lbq_base == NULL) {
2952                         netif_err(qdev, ifup, qdev->ndev,
2953                                   "Large buffer queue allocation failed.\n");
2954                         goto err_mem;
2955                 }
2956                 /*
2957                  * Allocate large buffer queue control blocks.
2958                  */
2959                 rx_ring->lbq =
2960                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2961                             GFP_KERNEL);
2962                 if (rx_ring->lbq == NULL) {
2963                         netif_err(qdev, ifup, qdev->ndev,
2964                                   "Large buffer queue control block allocation failed.\n");
2965                         goto err_mem;
2966                 }
2967
2968                 ql_init_lbq_ring(qdev, rx_ring);
2969         }
2970
2971         return 0;
2972
2973 err_mem:
2974         ql_free_rx_resources(qdev, rx_ring);
2975         return -ENOMEM;
2976 }
2977
2978 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2979 {
2980         struct tx_ring *tx_ring;
2981         struct tx_ring_desc *tx_ring_desc;
2982         int i, j;
2983
2984         /*
2985          * Loop through all queues and free
2986          * any resources.
2987          */
2988         for (j = 0; j < qdev->tx_ring_count; j++) {
2989                 tx_ring = &qdev->tx_ring[j];
2990                 for (i = 0; i < tx_ring->wq_len; i++) {
2991                         tx_ring_desc = &tx_ring->q[i];
2992                         if (tx_ring_desc && tx_ring_desc->skb) {
2993                                 netif_err(qdev, ifdown, qdev->ndev,
2994                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2995                                           tx_ring_desc->skb, j,
2996                                           tx_ring_desc->index);
2997                                 ql_unmap_send(qdev, tx_ring_desc,
2998                                               tx_ring_desc->map_cnt);
2999                                 dev_kfree_skb(tx_ring_desc->skb);
3000                                 tx_ring_desc->skb = NULL;
3001                         }
3002                 }
3003         }
3004 }
3005
3006 static void ql_free_mem_resources(struct ql_adapter *qdev)
3007 {
3008         int i;
3009
3010         for (i = 0; i < qdev->tx_ring_count; i++)
3011                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3012         for (i = 0; i < qdev->rx_ring_count; i++)
3013                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3014         ql_free_shadow_space(qdev);
3015 }
3016
3017 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3018 {
3019         int i;
3020
3021         /* Allocate space for our shadow registers and such. */
3022         if (ql_alloc_shadow_space(qdev))
3023                 return -ENOMEM;
3024
3025         for (i = 0; i < qdev->rx_ring_count; i++) {
3026                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3027                         netif_err(qdev, ifup, qdev->ndev,
3028                                   "RX resource allocation failed.\n");
3029                         goto err_mem;
3030                 }
3031         }
3032         /* Allocate tx queue resources */
3033         for (i = 0; i < qdev->tx_ring_count; i++) {
3034                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3035                         netif_err(qdev, ifup, qdev->ndev,
3036                                   "TX resource allocation failed.\n");
3037                         goto err_mem;
3038                 }
3039         }
3040         return 0;
3041
3042 err_mem:
3043         ql_free_mem_resources(qdev);
3044         return -ENOMEM;
3045 }
3046
3047 /* Set up the rx ring control block and pass it to the chip.
3048  * The control block is defined as
3049  * "Completion Queue Initialization Control Block", or cqicb.
3050  */
3051 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3052 {
3053         struct cqicb *cqicb = &rx_ring->cqicb;
3054         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3055                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3056         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3057                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3058         void __iomem *doorbell_area =
3059             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3060         int err = 0;
3061         u16 bq_len;
3062         u64 tmp;
3063         __le64 *base_indirect_ptr;
3064         int page_entries;
3065
3066         /* Set up the shadow registers for this ring. */
3067         rx_ring->prod_idx_sh_reg = shadow_reg;
3068         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3069         *rx_ring->prod_idx_sh_reg = 0;
3070         shadow_reg += sizeof(u64);
3071         shadow_reg_dma += sizeof(u64);
3072         rx_ring->lbq_base_indirect = shadow_reg;
3073         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3074         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3075         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3076         rx_ring->sbq_base_indirect = shadow_reg;
3077         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3078
3079         /* PCI doorbell mem area + 0x00 for consumer index register */
3080         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3081         rx_ring->cnsmr_idx = 0;
3082         rx_ring->curr_entry = rx_ring->cq_base;
3083
3084         /* PCI doorbell mem area + 0x04 for valid register */
3085         rx_ring->valid_db_reg = doorbell_area + 0x04;
3086
3087         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3088         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3089
3090         /* PCI doorbell mem area + 0x1c */
3091         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3092
3093         memset((void *)cqicb, 0, sizeof(struct cqicb));
3094         cqicb->msix_vect = rx_ring->irq;
3095
3096         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3097         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3098
3099         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3100
3101         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3102
3103         /*
3104          * Set up the control block load flags.
3105          */
3106         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3107             FLAGS_LV |          /* Load MSI-X vector */
3108             FLAGS_LI;           /* Load irq delay values */
3109         if (rx_ring->lbq_len) {
3110                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3111                 tmp = (u64)rx_ring->lbq_base_dma;
3112                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3113                 page_entries = 0;
3114                 do {
3115                         *base_indirect_ptr = cpu_to_le64(tmp);
3116                         tmp += DB_PAGE_SIZE;
3117                         base_indirect_ptr++;
3118                         page_entries++;
3119                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3120                 cqicb->lbq_addr =
3121                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3122                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3123                         (u16) rx_ring->lbq_buf_size;
3124                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3125                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3126                         (u16) rx_ring->lbq_len;
3127                 cqicb->lbq_len = cpu_to_le16(bq_len);
3128                 rx_ring->lbq_prod_idx = 0;
3129                 rx_ring->lbq_curr_idx = 0;
3130                 rx_ring->lbq_clean_idx = 0;
3131                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3132         }
3133         if (rx_ring->sbq_len) {
3134                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3135                 tmp = (u64)rx_ring->sbq_base_dma;
3136                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3137                 page_entries = 0;
3138                 do {
3139                         *base_indirect_ptr = cpu_to_le64(tmp);
3140                         tmp += DB_PAGE_SIZE;
3141                         base_indirect_ptr++;
3142                         page_entries++;
3143                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3144                 cqicb->sbq_addr =
3145                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3146                 cqicb->sbq_buf_size =
3147                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3148                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3149                         (u16) rx_ring->sbq_len;
3150                 cqicb->sbq_len = cpu_to_le16(bq_len);
3151                 rx_ring->sbq_prod_idx = 0;
3152                 rx_ring->sbq_curr_idx = 0;
3153                 rx_ring->sbq_clean_idx = 0;
3154                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3155         }
3156         switch (rx_ring->type) {
3157         case TX_Q:
3158                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3159                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3160                 break;
3161         case RX_Q:
3162                 /* Inbound completion handling rx_rings run in
3163                  * separate NAPI contexts.
3164                  */
3165                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3166                                64);
3167                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3168                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3169                 break;
3170         default:
3171                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3172                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3173         }
3174         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3175                      "Initializing rx work queue.\n");
3176         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3177                            CFG_LCQ, rx_ring->cq_id);
3178         if (err) {
3179                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3180                 return err;
3181         }
3182         return err;
3183 }
3184
3185 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3186 {
3187         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3188         void __iomem *doorbell_area =
3189             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3190         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3191             (tx_ring->wq_id * sizeof(u64));
3192         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3193             (tx_ring->wq_id * sizeof(u64));
3194         int err = 0;
3195
3196         /*
3197          * Assign doorbell registers for this tx_ring.
3198          */
3199         /* TX PCI doorbell mem area for tx producer index */
3200         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3201         tx_ring->prod_idx = 0;
3202         /* TX PCI doorbell mem area + 0x04 */
3203         tx_ring->valid_db_reg = doorbell_area + 0x04;
3204
3205         /*
3206          * Assign shadow registers for this tx_ring.
3207          */
3208         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3209         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3210
3211         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3212         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3213                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3214         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3215         wqicb->rid = 0;
3216         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3217
3218         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3219
3220         ql_init_tx_ring(qdev, tx_ring);
3221
3222         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3223                            (u16) tx_ring->wq_id);
3224         if (err) {
3225                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3226                 return err;
3227         }
3228         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3229                      "Successfully loaded WQICB.\n");
3230         return err;
3231 }
3232
3233 static void ql_disable_msix(struct ql_adapter *qdev)
3234 {
3235         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3236                 pci_disable_msix(qdev->pdev);
3237                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3238                 kfree(qdev->msi_x_entry);
3239                 qdev->msi_x_entry = NULL;
3240         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3241                 pci_disable_msi(qdev->pdev);
3242                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3243         }
3244 }
3245
3246 /* We start by trying to get the number of vectors
3247  * stored in qdev->intr_count. If we don't get that
3248  * many then we reduce the count and try again.
3249  */
3250 static void ql_enable_msix(struct ql_adapter *qdev)
3251 {
3252         int i, err;
3253
3254         /* Get the MSIX vectors. */
3255         if (qlge_irq_type == MSIX_IRQ) {
3256                 /* Try to alloc space for the msix struct,
3257                  * if it fails then go to MSI/legacy.
3258                  */
3259                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3260                                             sizeof(struct msix_entry),
3261                                             GFP_KERNEL);
3262                 if (!qdev->msi_x_entry) {
3263                         qlge_irq_type = MSI_IRQ;
3264                         goto msi;
3265                 }
3266
3267                 for (i = 0; i < qdev->intr_count; i++)
3268                         qdev->msi_x_entry[i].entry = i;
3269
3270                 /* Loop to get our vectors.  We start with
3271                  * what we want and settle for what we get.
3272                  */
3273                 do {
3274                         err = pci_enable_msix(qdev->pdev,
3275                                 qdev->msi_x_entry, qdev->intr_count);
3276                         if (err > 0)
3277                                 qdev->intr_count = err;
3278                 } while (err > 0);
3279
3280                 if (err < 0) {
3281                         kfree(qdev->msi_x_entry);
3282                         qdev->msi_x_entry = NULL;
3283                         netif_warn(qdev, ifup, qdev->ndev,
3284                                    "MSI-X Enable failed, trying MSI.\n");
3285                         qdev->intr_count = 1;
3286                         qlge_irq_type = MSI_IRQ;
3287                 } else if (err == 0) {
3288                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3289                         netif_info(qdev, ifup, qdev->ndev,
3290                                    "MSI-X Enabled, got %d vectors.\n",
3291                                    qdev->intr_count);
3292                         return;
3293                 }
3294         }
3295 msi:
3296         qdev->intr_count = 1;
3297         if (qlge_irq_type == MSI_IRQ) {
3298                 if (!pci_enable_msi(qdev->pdev)) {
3299                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3300                         netif_info(qdev, ifup, qdev->ndev,
3301                                    "Running with MSI interrupts.\n");
3302                         return;
3303                 }
3304         }
3305         qlge_irq_type = LEG_IRQ;
3306         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3307                      "Running with legacy interrupts.\n");
3308 }
3309
3310 /* Each vector services 1 RSS ring and and 1 or more
3311  * TX completion rings.  This function loops through
3312  * the TX completion rings and assigns the vector that
3313  * will service it.  An example would be if there are
3314  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3315  * This would mean that vector 0 would service RSS ring 0
3316  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3317  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3318  */
3319 static void ql_set_tx_vect(struct ql_adapter *qdev)
3320 {
3321         int i, j, vect;
3322         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3323
3324         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3325                 /* Assign irq vectors to TX rx_rings.*/
3326                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3327                                          i < qdev->rx_ring_count; i++) {
3328                         if (j == tx_rings_per_vector) {
3329                                 vect++;
3330                                 j = 0;
3331                         }
3332                         qdev->rx_ring[i].irq = vect;
3333                         j++;
3334                 }
3335         } else {
3336                 /* For single vector all rings have an irq
3337                  * of zero.
3338                  */
3339                 for (i = 0; i < qdev->rx_ring_count; i++)
3340                         qdev->rx_ring[i].irq = 0;
3341         }
3342 }
3343
3344 /* Set the interrupt mask for this vector.  Each vector
3345  * will service 1 RSS ring and 1 or more TX completion
3346  * rings.  This function sets up a bit mask per vector
3347  * that indicates which rings it services.
3348  */
3349 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3350 {
3351         int j, vect = ctx->intr;
3352         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3353
3354         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3355                 /* Add the RSS ring serviced by this vector
3356                  * to the mask.
3357                  */
3358                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3359                 /* Add the TX ring(s) serviced by this vector
3360                  * to the mask. */
3361                 for (j = 0; j < tx_rings_per_vector; j++) {
3362                         ctx->irq_mask |=
3363                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3364                         (vect * tx_rings_per_vector) + j].cq_id);
3365                 }
3366         } else {
3367                 /* For single vector we just shift each queue's
3368                  * ID into the mask.
3369                  */
3370                 for (j = 0; j < qdev->rx_ring_count; j++)
3371                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3372         }
3373 }
3374
3375 /*
3376  * Here we build the intr_context structures based on
3377  * our rx_ring count and intr vector count.
3378  * The intr_context structure is used to hook each vector
3379  * to possibly different handlers.
3380  */
3381 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3382 {
3383         int i = 0;
3384         struct intr_context *intr_context = &qdev->intr_context[0];
3385
3386         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3387                 /* Each rx_ring has it's
3388                  * own intr_context since we have separate
3389                  * vectors for each queue.
3390                  */
3391                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3392                         qdev->rx_ring[i].irq = i;
3393                         intr_context->intr = i;
3394                         intr_context->qdev = qdev;
3395                         /* Set up this vector's bit-mask that indicates
3396                          * which queues it services.
3397                          */
3398                         ql_set_irq_mask(qdev, intr_context);
3399                         /*
3400                          * We set up each vectors enable/disable/read bits so
3401                          * there's no bit/mask calculations in the critical path.
3402                          */
3403                         intr_context->intr_en_mask =
3404                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3405                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3406                             | i;
3407                         intr_context->intr_dis_mask =
3408                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3409                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3410                             INTR_EN_IHD | i;
3411                         intr_context->intr_read_mask =
3412                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3413                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3414                             i;
3415                         if (i == 0) {
3416                                 /* The first vector/queue handles
3417                                  * broadcast/multicast, fatal errors,
3418                                  * and firmware events.  This in addition
3419                                  * to normal inbound NAPI processing.
3420                                  */
3421                                 intr_context->handler = qlge_isr;
3422                                 sprintf(intr_context->name, "%s-rx-%d",
3423                                         qdev->ndev->name, i);
3424                         } else {
3425                                 /*
3426                                  * Inbound queues handle unicast frames only.
3427                                  */
3428                                 intr_context->handler = qlge_msix_rx_isr;
3429                                 sprintf(intr_context->name, "%s-rx-%d",
3430                                         qdev->ndev->name, i);
3431                         }
3432                 }
3433         } else {
3434                 /*
3435                  * All rx_rings use the same intr_context since
3436                  * there is only one vector.
3437                  */
3438                 intr_context->intr = 0;
3439                 intr_context->qdev = qdev;
3440                 /*
3441                  * We set up each vectors enable/disable/read bits so
3442                  * there's no bit/mask calculations in the critical path.
3443                  */
3444                 intr_context->intr_en_mask =
3445                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3446                 intr_context->intr_dis_mask =
3447                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3448                     INTR_EN_TYPE_DISABLE;
3449                 intr_context->intr_read_mask =
3450                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3451                 /*
3452                  * Single interrupt means one handler for all rings.
3453                  */
3454                 intr_context->handler = qlge_isr;
3455                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3456                 /* Set up this vector's bit-mask that indicates
3457                  * which queues it services. In this case there is
3458                  * a single vector so it will service all RSS and
3459                  * TX completion rings.
3460                  */
3461                 ql_set_irq_mask(qdev, intr_context);
3462         }
3463         /* Tell the TX completion rings which MSIx vector
3464          * they will be using.
3465          */
3466         ql_set_tx_vect(qdev);
3467 }
3468
3469 static void ql_free_irq(struct ql_adapter *qdev)
3470 {
3471         int i;
3472         struct intr_context *intr_context = &qdev->intr_context[0];
3473
3474         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3475                 if (intr_context->hooked) {
3476                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3477                                 free_irq(qdev->msi_x_entry[i].vector,
3478                                          &qdev->rx_ring[i]);
3479                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3480                                              "freeing msix interrupt %d.\n", i);
3481                         } else {
3482                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3483                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3484                                              "freeing msi interrupt %d.\n", i);
3485                         }
3486                 }
3487         }
3488         ql_disable_msix(qdev);
3489 }
3490
3491 static int ql_request_irq(struct ql_adapter *qdev)
3492 {
3493         int i;
3494         int status = 0;
3495         struct pci_dev *pdev = qdev->pdev;
3496         struct intr_context *intr_context = &qdev->intr_context[0];
3497
3498         ql_resolve_queues_to_irqs(qdev);
3499
3500         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3501                 atomic_set(&intr_context->irq_cnt, 0);
3502                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3503                         status = request_irq(qdev->msi_x_entry[i].vector,
3504                                              intr_context->handler,
3505                                              0,
3506                                              intr_context->name,
3507                                              &qdev->rx_ring[i]);
3508                         if (status) {
3509                                 netif_err(qdev, ifup, qdev->ndev,
3510                                           "Failed request for MSIX interrupt %d.\n",
3511                                           i);
3512                                 goto err_irq;
3513                         } else {
3514                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3515                                              "Hooked intr %d, queue type %s, with name %s.\n",
3516                                              i,
3517                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3518                                              "DEFAULT_Q" :
3519                                              qdev->rx_ring[i].type == TX_Q ?
3520                                              "TX_Q" :
3521                                              qdev->rx_ring[i].type == RX_Q ?
3522                                              "RX_Q" : "",
3523                                              intr_context->name);
3524                         }
3525                 } else {
3526                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3527                                      "trying msi or legacy interrupts.\n");
3528                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3529                                      "%s: irq = %d.\n", __func__, pdev->irq);
3530                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3531                                      "%s: context->name = %s.\n", __func__,
3532                                      intr_context->name);
3533                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3534                                      "%s: dev_id = 0x%p.\n", __func__,
3535                                      &qdev->rx_ring[0]);
3536                         status =
3537                             request_irq(pdev->irq, qlge_isr,
3538                                         test_bit(QL_MSI_ENABLED,
3539                                                  &qdev->
3540                                                  flags) ? 0 : IRQF_SHARED,
3541                                         intr_context->name, &qdev->rx_ring[0]);
3542                         if (status)
3543                                 goto err_irq;
3544
3545                         netif_err(qdev, ifup, qdev->ndev,
3546                                   "Hooked intr %d, queue type %s, with name %s.\n",
3547                                   i,
3548                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3549                                   "DEFAULT_Q" :
3550                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3551                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3552                                   intr_context->name);
3553                 }
3554                 intr_context->hooked = 1;
3555         }
3556         return status;
3557 err_irq:
3558         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3559         ql_free_irq(qdev);
3560         return status;
3561 }
3562
3563 static int ql_start_rss(struct ql_adapter *qdev)
3564 {
3565         static const u8 init_hash_seed[] = {
3566                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3567                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3568                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3569                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3570                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3571         };
3572         struct ricb *ricb = &qdev->ricb;
3573         int status = 0;
3574         int i;
3575         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3576
3577         memset((void *)ricb, 0, sizeof(*ricb));
3578
3579         ricb->base_cq = RSS_L4K;
3580         ricb->flags =
3581                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3582         ricb->mask = cpu_to_le16((u16)(0x3ff));
3583
3584         /*
3585          * Fill out the Indirection Table.
3586          */
3587         for (i = 0; i < 1024; i++)
3588                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3589
3590         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3591         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3592
3593         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3594
3595         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3596         if (status) {
3597                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3598                 return status;
3599         }
3600         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3601                      "Successfully loaded RICB.\n");
3602         return status;
3603 }
3604
3605 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3606 {
3607         int i, status = 0;
3608
3609         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3610         if (status)
3611                 return status;
3612         /* Clear all the entries in the routing table. */
3613         for (i = 0; i < 16; i++) {
3614                 status = ql_set_routing_reg(qdev, i, 0, 0);
3615                 if (status) {
3616                         netif_err(qdev, ifup, qdev->ndev,
3617                                   "Failed to init routing register for CAM packets.\n");
3618                         break;
3619                 }
3620         }
3621         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3622         return status;
3623 }
3624
3625 /* Initialize the frame-to-queue routing. */
3626 static int ql_route_initialize(struct ql_adapter *qdev)
3627 {
3628         int status = 0;
3629
3630         /* Clear all the entries in the routing table. */
3631         status = ql_clear_routing_entries(qdev);
3632         if (status)
3633                 return status;
3634
3635         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3636         if (status)
3637                 return status;
3638
3639         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3640                                                 RT_IDX_IP_CSUM_ERR, 1);
3641         if (status) {
3642                 netif_err(qdev, ifup, qdev->ndev,
3643                         "Failed to init routing register "
3644                         "for IP CSUM error packets.\n");
3645                 goto exit;
3646         }
3647         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3648                                                 RT_IDX_TU_CSUM_ERR, 1);
3649         if (status) {
3650                 netif_err(qdev, ifup, qdev->ndev,
3651                         "Failed to init routing register "
3652                         "for TCP/UDP CSUM error packets.\n");
3653                 goto exit;
3654         }
3655         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3656         if (status) {
3657                 netif_err(qdev, ifup, qdev->ndev,
3658                           "Failed to init routing register for broadcast packets.\n");
3659                 goto exit;
3660         }
3661         /* If we have more than one inbound queue, then turn on RSS in the
3662          * routing block.
3663          */
3664         if (qdev->rss_ring_count > 1) {
3665                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3666                                         RT_IDX_RSS_MATCH, 1);
3667                 if (status) {
3668                         netif_err(qdev, ifup, qdev->ndev,
3669                                   "Failed to init routing register for MATCH RSS packets.\n");
3670                         goto exit;
3671                 }
3672         }
3673
3674         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3675                                     RT_IDX_CAM_HIT, 1);
3676         if (status)
3677                 netif_err(qdev, ifup, qdev->ndev,
3678                           "Failed to init routing register for CAM packets.\n");
3679 exit:
3680         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3681         return status;
3682 }
3683
3684 int ql_cam_route_initialize(struct ql_adapter *qdev)
3685 {
3686         int status, set;
3687
3688         /* If check if the link is up and use to
3689          * determine if we are setting or clearing
3690          * the MAC address in the CAM.
3691          */
3692         set = ql_read32(qdev, STS);
3693         set &= qdev->port_link_up;
3694         status = ql_set_mac_addr(qdev, set);
3695         if (status) {
3696                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3697                 return status;
3698         }
3699
3700         status = ql_route_initialize(qdev);
3701         if (status)
3702                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3703
3704         return status;
3705 }
3706
3707 static int ql_adapter_initialize(struct ql_adapter *qdev)
3708 {
3709         u32 value, mask;
3710         int i;
3711         int status = 0;
3712
3713         /*
3714          * Set up the System register to halt on errors.
3715          */
3716         value = SYS_EFE | SYS_FAE;
3717         mask = value << 16;
3718         ql_write32(qdev, SYS, mask | value);
3719
3720         /* Set the default queue, and VLAN behavior. */
3721         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3722         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3723         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3724
3725         /* Set the MPI interrupt to enabled. */
3726         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3727
3728         /* Enable the function, set pagesize, enable error checking. */
3729         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3730             FSC_EC | FSC_VM_PAGE_4K;
3731         value |= SPLT_SETTING;
3732
3733         /* Set/clear header splitting. */
3734         mask = FSC_VM_PAGESIZE_MASK |
3735             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3736         ql_write32(qdev, FSC, mask | value);
3737
3738         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3739
3740         /* Set RX packet routing to use port/pci function on which the
3741          * packet arrived on in addition to usual frame routing.
3742          * This is helpful on bonding where both interfaces can have
3743          * the same MAC address.
3744          */
3745         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3746         /* Reroute all packets to our Interface.
3747          * They may have been routed to MPI firmware
3748          * due to WOL.
3749          */
3750         value = ql_read32(qdev, MGMT_RCV_CFG);
3751         value &= ~MGMT_RCV_CFG_RM;
3752         mask = 0xffff0000;
3753
3754         /* Sticky reg needs clearing due to WOL. */
3755         ql_write32(qdev, MGMT_RCV_CFG, mask);
3756         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3757
3758         /* Default WOL is enable on Mezz cards */
3759         if (qdev->pdev->subsystem_device == 0x0068 ||
3760                         qdev->pdev->subsystem_device == 0x0180)
3761                 qdev->wol = WAKE_MAGIC;
3762
3763         /* Start up the rx queues. */
3764         for (i = 0; i < qdev->rx_ring_count; i++) {
3765                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3766                 if (status) {
3767                         netif_err(qdev, ifup, qdev->ndev,
3768                                   "Failed to start rx ring[%d].\n", i);
3769                         return status;
3770                 }
3771         }
3772
3773         /* If there is more than one inbound completion queue
3774          * then download a RICB to configure RSS.
3775          */
3776         if (qdev->rss_ring_count > 1) {
3777                 status = ql_start_rss(qdev);
3778                 if (status) {
3779                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3780                         return status;
3781                 }
3782         }
3783
3784         /* Start up the tx queues. */
3785         for (i = 0; i < qdev->tx_ring_count; i++) {
3786                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3787                 if (status) {
3788                         netif_err(qdev, ifup, qdev->ndev,
3789                                   "Failed to start tx ring[%d].\n", i);
3790                         return status;
3791                 }
3792         }
3793
3794         /* Initialize the port and set the max framesize. */
3795         status = qdev->nic_ops->port_initialize(qdev);
3796         if (status)
3797                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3798
3799         /* Set up the MAC address and frame routing filter. */
3800         status = ql_cam_route_initialize(qdev);
3801         if (status) {
3802                 netif_err(qdev, ifup, qdev->ndev,
3803                           "Failed to init CAM/Routing tables.\n");
3804                 return status;
3805         }
3806
3807         /* Start NAPI for the RSS queues. */
3808         for (i = 0; i < qdev->rss_ring_count; i++) {
3809                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3810                              "Enabling NAPI for rx_ring[%d].\n", i);
3811                 napi_enable(&qdev->rx_ring[i].napi);
3812         }
3813
3814         return status;
3815 }
3816
3817 /* Issue soft reset to chip. */
3818 static int ql_adapter_reset(struct ql_adapter *qdev)
3819 {
3820         u32 value;
3821         int status = 0;
3822         unsigned long end_jiffies;
3823
3824         /* Clear all the entries in the routing table. */
3825         status = ql_clear_routing_entries(qdev);
3826         if (status) {
3827                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3828                 return status;
3829         }
3830
3831         end_jiffies = jiffies +
3832                 max((unsigned long)1, usecs_to_jiffies(30));
3833
3834         /* Check if bit is set then skip the mailbox command and
3835          * clear the bit, else we are in normal reset process.
3836          */
3837         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3838                 /* Stop management traffic. */
3839                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3840
3841                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3842                 ql_wait_fifo_empty(qdev);
3843         } else
3844                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3845
3846         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3847
3848         do {
3849                 value = ql_read32(qdev, RST_FO);
3850                 if ((value & RST_FO_FR) == 0)
3851                         break;
3852                 cpu_relax();
3853         } while (time_before(jiffies, end_jiffies));
3854
3855         if (value & RST_FO_FR) {
3856                 netif_err(qdev, ifdown, qdev->ndev,
3857                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3858                 status = -ETIMEDOUT;
3859         }
3860
3861         /* Resume management traffic. */
3862         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3863         return status;
3864 }
3865
3866 static void ql_display_dev_info(struct net_device *ndev)
3867 {
3868         struct ql_adapter *qdev = netdev_priv(ndev);
3869
3870         netif_info(qdev, probe, qdev->ndev,
3871                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3872                    "XG Roll = %d, XG Rev = %d.\n",
3873                    qdev->func,
3874                    qdev->port,
3875                    qdev->chip_rev_id & 0x0000000f,
3876                    qdev->chip_rev_id >> 4 & 0x0000000f,
3877                    qdev->chip_rev_id >> 8 & 0x0000000f,
3878                    qdev->chip_rev_id >> 12 & 0x0000000f);
3879         netif_info(qdev, probe, qdev->ndev,
3880                    "MAC address %pM\n", ndev->dev_addr);
3881 }
3882
3883 static int ql_wol(struct ql_adapter *qdev)
3884 {
3885         int status = 0;
3886         u32 wol = MB_WOL_DISABLE;
3887
3888         /* The CAM is still intact after a reset, but if we
3889          * are doing WOL, then we may need to program the
3890          * routing regs. We would also need to issue the mailbox
3891          * commands to instruct the MPI what to do per the ethtool
3892          * settings.
3893          */
3894
3895         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3896                         WAKE_MCAST | WAKE_BCAST)) {
3897                 netif_err(qdev, ifdown, qdev->ndev,
3898                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3899                           qdev->wol);
3900                 return -EINVAL;
3901         }
3902
3903         if (qdev->wol & WAKE_MAGIC) {
3904                 status = ql_mb_wol_set_magic(qdev, 1);
3905                 if (status) {
3906                         netif_err(qdev, ifdown, qdev->ndev,
3907                                   "Failed to set magic packet on %s.\n",
3908                                   qdev->ndev->name);
3909                         return status;
3910                 } else
3911                         netif_info(qdev, drv, qdev->ndev,
3912                                    "Enabled magic packet successfully on %s.\n",
3913                                    qdev->ndev->name);
3914
3915                 wol |= MB_WOL_MAGIC_PKT;
3916         }
3917
3918         if (qdev->wol) {
3919                 wol |= MB_WOL_MODE_ON;
3920                 status = ql_mb_wol_mode(qdev, wol);
3921                 netif_err(qdev, drv, qdev->ndev,
3922                           "WOL %s (wol code 0x%x) on %s\n",
3923                           (status == 0) ? "Successfully set" : "Failed",
3924                           wol, qdev->ndev->name);
3925         }
3926
3927         return status;
3928 }
3929
3930 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3931 {
3932
3933         /* Don't kill the reset worker thread if we
3934          * are in the process of recovery.
3935          */
3936         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3937                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3938         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3939         cancel_delayed_work_sync(&qdev->mpi_work);
3940         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3941         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3942         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3943 }
3944
3945 static int ql_adapter_down(struct ql_adapter *qdev)
3946 {
3947         int i, status = 0;
3948
3949         ql_link_off(qdev);
3950
3951         ql_cancel_all_work_sync(qdev);
3952
3953         for (i = 0; i < qdev->rss_ring_count; i++)
3954                 napi_disable(&qdev->rx_ring[i].napi);
3955
3956         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3957
3958         ql_disable_interrupts(qdev);
3959
3960         ql_tx_ring_clean(qdev);
3961
3962         /* Call netif_napi_del() from common point.
3963          */
3964         for (i = 0; i < qdev->rss_ring_count; i++)
3965                 netif_napi_del(&qdev->rx_ring[i].napi);
3966
3967         status = ql_adapter_reset(qdev);
3968         if (status)
3969                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3970                           qdev->func);
3971         ql_free_rx_buffers(qdev);
3972
3973         return status;
3974 }
3975
3976 static int ql_adapter_up(struct ql_adapter *qdev)
3977 {
3978         int err = 0;
3979
3980         err = ql_adapter_initialize(qdev);
3981         if (err) {
3982                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3983                 goto err_init;
3984         }
3985         set_bit(QL_ADAPTER_UP, &qdev->flags);
3986         ql_alloc_rx_buffers(qdev);
3987         /* If the port is initialized and the
3988          * link is up the turn on the carrier.
3989          */
3990         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3991                         (ql_read32(qdev, STS) & qdev->port_link_up))
3992                 ql_link_on(qdev);
3993         /* Restore rx mode. */
3994         clear_bit(QL_ALLMULTI, &qdev->flags);
3995         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3996         qlge_set_multicast_list(qdev->ndev);
3997
3998         /* Restore vlan setting. */
3999         qlge_restore_vlan(qdev);
4000
4001         ql_enable_interrupts(qdev);
4002         ql_enable_all_completion_interrupts(qdev);
4003         netif_tx_start_all_queues(qdev->ndev);
4004
4005         return 0;
4006 err_init:
4007         ql_adapter_reset(qdev);
4008         return err;
4009 }
4010
4011 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4012 {
4013         ql_free_mem_resources(qdev);
4014         ql_free_irq(qdev);
4015 }
4016
4017 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4018 {
4019         int status = 0;
4020
4021         if (ql_alloc_mem_resources(qdev)) {
4022                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4023                 return -ENOMEM;
4024         }
4025         status = ql_request_irq(qdev);
4026         return status;
4027 }
4028
4029 static int qlge_close(struct net_device *ndev)
4030 {
4031         struct ql_adapter *qdev = netdev_priv(ndev);
4032
4033         /* If we hit pci_channel_io_perm_failure
4034          * failure condition, then we already
4035          * brought the adapter down.
4036          */
4037         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4038                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4039                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4040                 return 0;
4041         }
4042
4043         /*
4044          * Wait for device to recover from a reset.
4045          * (Rarely happens, but possible.)
4046          */
4047         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4048                 msleep(1);
4049         ql_adapter_down(qdev);
4050         ql_release_adapter_resources(qdev);
4051         return 0;
4052 }
4053
4054 static int ql_configure_rings(struct ql_adapter *qdev)
4055 {
4056         int i;
4057         struct rx_ring *rx_ring;
4058         struct tx_ring *tx_ring;
4059         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4060         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4061                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4062
4063         qdev->lbq_buf_order = get_order(lbq_buf_len);
4064
4065         /* In a perfect world we have one RSS ring for each CPU
4066          * and each has it's own vector.  To do that we ask for
4067          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4068          * vector count to what we actually get.  We then
4069          * allocate an RSS ring for each.
4070          * Essentially, we are doing min(cpu_count, msix_vector_count).
4071          */
4072         qdev->intr_count = cpu_cnt;
4073         ql_enable_msix(qdev);
4074         /* Adjust the RSS ring count to the actual vector count. */
4075         qdev->rss_ring_count = qdev->intr_count;
4076         qdev->tx_ring_count = cpu_cnt;
4077         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4078
4079         for (i = 0; i < qdev->tx_ring_count; i++) {
4080                 tx_ring = &qdev->tx_ring[i];
4081                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4082                 tx_ring->qdev = qdev;
4083                 tx_ring->wq_id = i;
4084                 tx_ring->wq_len = qdev->tx_ring_size;
4085                 tx_ring->wq_size =
4086                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4087
4088                 /*
4089                  * The completion queue ID for the tx rings start
4090                  * immediately after the rss rings.
4091                  */
4092                 tx_ring->cq_id = qdev->rss_ring_count + i;
4093         }
4094
4095         for (i = 0; i < qdev->rx_ring_count; i++) {
4096                 rx_ring = &qdev->rx_ring[i];
4097                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4098                 rx_ring->qdev = qdev;
4099                 rx_ring->cq_id = i;
4100                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4101                 if (i < qdev->rss_ring_count) {
4102                         /*
4103                          * Inbound (RSS) queues.
4104                          */
4105                         rx_ring->cq_len = qdev->rx_ring_size;
4106                         rx_ring->cq_size =
4107                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4108                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4109                         rx_ring->lbq_size =
4110                             rx_ring->lbq_len * sizeof(__le64);
4111                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4112                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4113                                      "lbq_buf_size %d, order = %d\n",
4114                                      rx_ring->lbq_buf_size,
4115                                      qdev->lbq_buf_order);
4116                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4117                         rx_ring->sbq_size =
4118                             rx_ring->sbq_len * sizeof(__le64);
4119                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4120                         rx_ring->type = RX_Q;
4121                 } else {
4122                         /*
4123                          * Outbound queue handles outbound completions only.
4124                          */
4125                         /* outbound cq is same size as tx_ring it services. */
4126                         rx_ring->cq_len = qdev->tx_ring_size;
4127                         rx_ring->cq_size =
4128                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4129                         rx_ring->lbq_len = 0;
4130                         rx_ring->lbq_size = 0;
4131                         rx_ring->lbq_buf_size = 0;
4132                         rx_ring->sbq_len = 0;
4133                         rx_ring->sbq_size = 0;
4134                         rx_ring->sbq_buf_size = 0;
4135                         rx_ring->type = TX_Q;
4136                 }
4137         }
4138         return 0;
4139 }
4140
4141 static int qlge_open(struct net_device *ndev)
4142 {
4143         int err = 0;
4144         struct ql_adapter *qdev = netdev_priv(ndev);
4145
4146         err = ql_adapter_reset(qdev);
4147         if (err)
4148                 return err;
4149
4150         err = ql_configure_rings(qdev);
4151         if (err)
4152                 return err;
4153
4154         err = ql_get_adapter_resources(qdev);
4155         if (err)
4156                 goto error_up;
4157
4158         err = ql_adapter_up(qdev);
4159         if (err)
4160                 goto error_up;
4161
4162         return err;
4163
4164 error_up:
4165         ql_release_adapter_resources(qdev);
4166         return err;
4167 }
4168
4169 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4170 {
4171         struct rx_ring *rx_ring;
4172         int i, status;
4173         u32 lbq_buf_len;
4174
4175         /* Wait for an outstanding reset to complete. */
4176         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4177                 int i = 3;
4178                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4179                         netif_err(qdev, ifup, qdev->ndev,
4180                                   "Waiting for adapter UP...\n");
4181                         ssleep(1);
4182                 }
4183
4184                 if (!i) {
4185                         netif_err(qdev, ifup, qdev->ndev,
4186                                   "Timed out waiting for adapter UP\n");
4187                         return -ETIMEDOUT;
4188                 }
4189         }
4190
4191         status = ql_adapter_down(qdev);
4192         if (status)
4193                 goto error;
4194
4195         /* Get the new rx buffer size. */
4196         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4197                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4198         qdev->lbq_buf_order = get_order(lbq_buf_len);
4199
4200         for (i = 0; i < qdev->rss_ring_count; i++) {
4201                 rx_ring = &qdev->rx_ring[i];
4202                 /* Set the new size. */
4203                 rx_ring->lbq_buf_size = lbq_buf_len;
4204         }
4205
4206         status = ql_adapter_up(qdev);
4207         if (status)
4208                 goto error;
4209
4210         return status;
4211 error:
4212         netif_alert(qdev, ifup, qdev->ndev,
4213                     "Driver up/down cycle failed, closing device.\n");
4214         set_bit(QL_ADAPTER_UP, &qdev->flags);
4215         dev_close(qdev->ndev);
4216         return status;
4217 }
4218
4219 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4220 {
4221         struct ql_adapter *qdev = netdev_priv(ndev);
4222         int status;
4223
4224         if (ndev->mtu == 1500 && new_mtu == 9000) {
4225                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4226         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4227                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4228         } else
4229                 return -EINVAL;
4230
4231         queue_delayed_work(qdev->workqueue,
4232                         &qdev->mpi_port_cfg_work, 3*HZ);
4233
4234         ndev->mtu = new_mtu;
4235
4236         if (!netif_running(qdev->ndev)) {
4237                 return 0;
4238         }
4239
4240         status = ql_change_rx_buffers(qdev);
4241         if (status) {
4242                 netif_err(qdev, ifup, qdev->ndev,
4243                           "Changing MTU failed.\n");
4244         }
4245
4246         return status;
4247 }
4248
4249 static struct net_device_stats *qlge_get_stats(struct net_device
4250                                                *ndev)
4251 {
4252         struct ql_adapter *qdev = netdev_priv(ndev);
4253         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4254         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4255         unsigned long pkts, mcast, dropped, errors, bytes;
4256         int i;
4257
4258         /* Get RX stats. */
4259         pkts = mcast = dropped = errors = bytes = 0;
4260         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4261                         pkts += rx_ring->rx_packets;
4262                         bytes += rx_ring->rx_bytes;
4263                         dropped += rx_ring->rx_dropped;
4264                         errors += rx_ring->rx_errors;
4265                         mcast += rx_ring->rx_multicast;
4266         }
4267         ndev->stats.rx_packets = pkts;
4268         ndev->stats.rx_bytes = bytes;
4269         ndev->stats.rx_dropped = dropped;
4270         ndev->stats.rx_errors = errors;
4271         ndev->stats.multicast = mcast;
4272
4273         /* Get TX stats. */
4274         pkts = errors = bytes = 0;
4275         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4276                         pkts += tx_ring->tx_packets;
4277                         bytes += tx_ring->tx_bytes;
4278                         errors += tx_ring->tx_errors;
4279         }
4280         ndev->stats.tx_packets = pkts;
4281         ndev->stats.tx_bytes = bytes;
4282         ndev->stats.tx_errors = errors;
4283         return &ndev->stats;
4284 }
4285
4286 static void qlge_set_multicast_list(struct net_device *ndev)
4287 {
4288         struct ql_adapter *qdev = netdev_priv(ndev);
4289         struct netdev_hw_addr *ha;
4290         int i, status;
4291
4292         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4293         if (status)
4294                 return;
4295         /*
4296          * Set or clear promiscuous mode if a
4297          * transition is taking place.
4298          */
4299         if (ndev->flags & IFF_PROMISC) {
4300                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4301                         if (ql_set_routing_reg
4302                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4303                                 netif_err(qdev, hw, qdev->ndev,
4304                                           "Failed to set promiscuous mode.\n");
4305                         } else {
4306                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4307                         }
4308                 }
4309         } else {
4310                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4311                         if (ql_set_routing_reg
4312                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4313                                 netif_err(qdev, hw, qdev->ndev,
4314                                           "Failed to clear promiscuous mode.\n");
4315                         } else {
4316                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4317                         }
4318                 }
4319         }
4320
4321         /*
4322          * Set or clear all multicast mode if a
4323          * transition is taking place.
4324          */
4325         if ((ndev->flags & IFF_ALLMULTI) ||
4326             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4327                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4328                         if (ql_set_routing_reg
4329                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4330                                 netif_err(qdev, hw, qdev->ndev,
4331                                           "Failed to set all-multi mode.\n");
4332                         } else {
4333                                 set_bit(QL_ALLMULTI, &qdev->flags);
4334                         }
4335                 }
4336         } else {
4337                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4338                         if (ql_set_routing_reg
4339                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4340                                 netif_err(qdev, hw, qdev->ndev,
4341                                           "Failed to clear all-multi mode.\n");
4342                         } else {
4343                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4344                         }
4345                 }
4346         }
4347
4348         if (!netdev_mc_empty(ndev)) {
4349                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4350                 if (status)
4351                         goto exit;
4352                 i = 0;
4353                 netdev_for_each_mc_addr(ha, ndev) {
4354                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4355                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4356                                 netif_err(qdev, hw, qdev->ndev,
4357                                           "Failed to loadmulticast address.\n");
4358                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4359                                 goto exit;
4360                         }
4361                         i++;
4362                 }
4363                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4364                 if (ql_set_routing_reg
4365                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4366                         netif_err(qdev, hw, qdev->ndev,
4367                                   "Failed to set multicast match mode.\n");
4368                 } else {
4369                         set_bit(QL_ALLMULTI, &qdev->flags);
4370                 }
4371         }
4372 exit:
4373         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4374 }
4375
4376 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4377 {
4378         struct ql_adapter *qdev = netdev_priv(ndev);
4379         struct sockaddr *addr = p;
4380         int status;
4381
4382         if (!is_valid_ether_addr(addr->sa_data))
4383                 return -EADDRNOTAVAIL;
4384         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4385         /* Update local copy of current mac address. */
4386         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4387
4388         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4389         if (status)
4390                 return status;
4391         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4392                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4393         if (status)
4394                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4395         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4396         return status;
4397 }
4398
4399 static void qlge_tx_timeout(struct net_device *ndev)
4400 {
4401         struct ql_adapter *qdev = netdev_priv(ndev);
4402         ql_queue_asic_error(qdev);
4403 }
4404
4405 static void ql_asic_reset_work(struct work_struct *work)
4406 {
4407         struct ql_adapter *qdev =
4408             container_of(work, struct ql_adapter, asic_reset_work.work);
4409         int status;
4410         rtnl_lock();
4411         status = ql_adapter_down(qdev);
4412         if (status)
4413                 goto error;
4414
4415         status = ql_adapter_up(qdev);
4416         if (status)
4417                 goto error;
4418
4419         /* Restore rx mode. */
4420         clear_bit(QL_ALLMULTI, &qdev->flags);
4421         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4422         qlge_set_multicast_list(qdev->ndev);
4423
4424         rtnl_unlock();
4425         return;
4426 error:
4427         netif_alert(qdev, ifup, qdev->ndev,
4428                     "Driver up/down cycle failed, closing device\n");
4429
4430         set_bit(QL_ADAPTER_UP, &qdev->flags);
4431         dev_close(qdev->ndev);
4432         rtnl_unlock();
4433 }
4434
4435 static const struct nic_operations qla8012_nic_ops = {
4436         .get_flash              = ql_get_8012_flash_params,
4437         .port_initialize        = ql_8012_port_initialize,
4438 };
4439
4440 static const struct nic_operations qla8000_nic_ops = {
4441         .get_flash              = ql_get_8000_flash_params,
4442         .port_initialize        = ql_8000_port_initialize,
4443 };
4444
4445 /* Find the pcie function number for the other NIC
4446  * on this chip.  Since both NIC functions share a
4447  * common firmware we have the lowest enabled function
4448  * do any common work.  Examples would be resetting
4449  * after a fatal firmware error, or doing a firmware
4450  * coredump.
4451  */
4452 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4453 {
4454         int status = 0;
4455         u32 temp;
4456         u32 nic_func1, nic_func2;
4457
4458         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4459                         &temp);
4460         if (status)
4461                 return status;
4462
4463         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4464                         MPI_TEST_NIC_FUNC_MASK);
4465         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4466                         MPI_TEST_NIC_FUNC_MASK);
4467
4468         if (qdev->func == nic_func1)
4469                 qdev->alt_func = nic_func2;
4470         else if (qdev->func == nic_func2)
4471                 qdev->alt_func = nic_func1;
4472         else
4473                 status = -EIO;
4474
4475         return status;
4476 }
4477
4478 static int ql_get_board_info(struct ql_adapter *qdev)
4479 {
4480         int status;
4481         qdev->func =
4482             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4483         if (qdev->func > 3)
4484                 return -EIO;
4485
4486         status = ql_get_alt_pcie_func(qdev);
4487         if (status)
4488                 return status;
4489
4490         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4491         if (qdev->port) {
4492                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4493                 qdev->port_link_up = STS_PL1;
4494                 qdev->port_init = STS_PI1;
4495                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4496                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4497         } else {
4498                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4499                 qdev->port_link_up = STS_PL0;
4500                 qdev->port_init = STS_PI0;
4501                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4502                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4503         }
4504         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4505         qdev->device_id = qdev->pdev->device;
4506         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4507                 qdev->nic_ops = &qla8012_nic_ops;
4508         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4509                 qdev->nic_ops = &qla8000_nic_ops;
4510         return status;
4511 }
4512
4513 static void ql_release_all(struct pci_dev *pdev)
4514 {
4515         struct net_device *ndev = pci_get_drvdata(pdev);
4516         struct ql_adapter *qdev = netdev_priv(ndev);
4517
4518         if (qdev->workqueue) {
4519                 destroy_workqueue(qdev->workqueue);
4520                 qdev->workqueue = NULL;
4521         }
4522
4523         if (qdev->reg_base)
4524                 iounmap(qdev->reg_base);
4525         if (qdev->doorbell_area)
4526                 iounmap(qdev->doorbell_area);
4527         vfree(qdev->mpi_coredump);
4528         pci_release_regions(pdev);
4529         pci_set_drvdata(pdev, NULL);
4530 }
4531
4532 static int __devinit ql_init_device(struct pci_dev *pdev,
4533                                     struct net_device *ndev, int cards_found)
4534 {
4535         struct ql_adapter *qdev = netdev_priv(ndev);
4536         int err = 0;
4537
4538         memset((void *)qdev, 0, sizeof(*qdev));
4539         err = pci_enable_device(pdev);
4540         if (err) {
4541                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4542                 return err;
4543         }
4544
4545         qdev->ndev = ndev;
4546         qdev->pdev = pdev;
4547         pci_set_drvdata(pdev, ndev);
4548
4549         /* Set PCIe read request size */
4550         err = pcie_set_readrq(pdev, 4096);
4551         if (err) {
4552                 dev_err(&pdev->dev, "Set readrq failed.\n");
4553                 goto err_out1;
4554         }
4555
4556         err = pci_request_regions(pdev, DRV_NAME);
4557         if (err) {
4558                 dev_err(&pdev->dev, "PCI region request failed.\n");
4559                 return err;
4560         }
4561
4562         pci_set_master(pdev);
4563         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4564                 set_bit(QL_DMA64, &qdev->flags);
4565                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4566         } else {
4567                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4568                 if (!err)
4569                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4570         }
4571
4572         if (err) {
4573                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4574                 goto err_out2;
4575         }
4576
4577         /* Set PCIe reset type for EEH to fundamental. */
4578         pdev->needs_freset = 1;
4579         pci_save_state(pdev);
4580         qdev->reg_base =
4581             ioremap_nocache(pci_resource_start(pdev, 1),
4582                             pci_resource_len(pdev, 1));
4583         if (!qdev->reg_base) {
4584                 dev_err(&pdev->dev, "Register mapping failed.\n");
4585                 err = -ENOMEM;
4586                 goto err_out2;
4587         }
4588
4589         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4590         qdev->doorbell_area =
4591             ioremap_nocache(pci_resource_start(pdev, 3),
4592                             pci_resource_len(pdev, 3));
4593         if (!qdev->doorbell_area) {
4594                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4595                 err = -ENOMEM;
4596                 goto err_out2;
4597         }
4598
4599         err = ql_get_board_info(qdev);
4600         if (err) {
4601                 dev_err(&pdev->dev, "Register access failed.\n");
4602                 err = -EIO;
4603                 goto err_out2;
4604         }
4605         qdev->msg_enable = netif_msg_init(debug, default_msg);
4606         spin_lock_init(&qdev->hw_lock);
4607         spin_lock_init(&qdev->stats_lock);
4608
4609         if (qlge_mpi_coredump) {
4610                 qdev->mpi_coredump =
4611                         vmalloc(sizeof(struct ql_mpi_coredump));
4612                 if (qdev->mpi_coredump == NULL) {
4613                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4614                         err = -ENOMEM;
4615                         goto err_out2;
4616                 }
4617                 if (qlge_force_coredump)
4618                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4619         }
4620         /* make sure the EEPROM is good */
4621         err = qdev->nic_ops->get_flash(qdev);
4622         if (err) {
4623                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4624                 goto err_out2;
4625         }
4626
4627         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4628         /* Keep local copy of current mac address. */
4629         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4630
4631         /* Set up the default ring sizes. */
4632         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4633         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4634
4635         /* Set up the coalescing parameters. */
4636         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4637         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4638         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4639         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4640
4641         /*
4642          * Set up the operating parameters.
4643          */
4644         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4645         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4646         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4647         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4648         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4649         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4650         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4651         init_completion(&qdev->ide_completion);
4652         mutex_init(&qdev->mpi_mutex);
4653
4654         if (!cards_found) {
4655                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4656                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4657                          DRV_NAME, DRV_VERSION);
4658         }
4659         return 0;
4660 err_out2:
4661         ql_release_all(pdev);
4662 err_out1:
4663         pci_disable_device(pdev);
4664         return err;
4665 }
4666
4667 static const struct net_device_ops qlge_netdev_ops = {
4668         .ndo_open               = qlge_open,
4669         .ndo_stop               = qlge_close,
4670         .ndo_start_xmit         = qlge_send,
4671         .ndo_change_mtu         = qlge_change_mtu,
4672         .ndo_get_stats          = qlge_get_stats,
4673         .ndo_set_rx_mode        = qlge_set_multicast_list,
4674         .ndo_set_mac_address    = qlge_set_mac_address,
4675         .ndo_validate_addr      = eth_validate_addr,
4676         .ndo_tx_timeout         = qlge_tx_timeout,
4677         .ndo_fix_features       = qlge_fix_features,
4678         .ndo_set_features       = qlge_set_features,
4679         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4680         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4681 };
4682
4683 static void ql_timer(unsigned long data)
4684 {
4685         struct ql_adapter *qdev = (struct ql_adapter *)data;
4686         u32 var = 0;
4687
4688         var = ql_read32(qdev, STS);
4689         if (pci_channel_offline(qdev->pdev)) {
4690                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4691                 return;
4692         }
4693
4694         mod_timer(&qdev->timer, jiffies + (5*HZ));
4695 }
4696
4697 static int __devinit qlge_probe(struct pci_dev *pdev,
4698                                 const struct pci_device_id *pci_entry)
4699 {
4700         struct net_device *ndev = NULL;
4701         struct ql_adapter *qdev = NULL;
4702         static int cards_found = 0;
4703         int err = 0;
4704
4705         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4706                         min(MAX_CPUS, (int)num_online_cpus()));
4707         if (!ndev)
4708                 return -ENOMEM;
4709
4710         err = ql_init_device(pdev, ndev, cards_found);
4711         if (err < 0) {
4712                 free_netdev(ndev);
4713                 return err;
4714         }
4715
4716         qdev = netdev_priv(ndev);
4717         SET_NETDEV_DEV(ndev, &pdev->dev);
4718         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4719                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4720                 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4721         ndev->features = ndev->hw_features |
4722                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4723
4724         if (test_bit(QL_DMA64, &qdev->flags))
4725                 ndev->features |= NETIF_F_HIGHDMA;
4726
4727         /*
4728          * Set up net_device structure.
4729          */
4730         ndev->tx_queue_len = qdev->tx_ring_size;
4731         ndev->irq = pdev->irq;
4732
4733         ndev->netdev_ops = &qlge_netdev_ops;
4734         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4735         ndev->watchdog_timeo = 10 * HZ;
4736
4737         err = register_netdev(ndev);
4738         if (err) {
4739                 dev_err(&pdev->dev, "net device registration failed.\n");
4740                 ql_release_all(pdev);
4741                 pci_disable_device(pdev);
4742                 return err;
4743         }
4744         /* Start up the timer to trigger EEH if
4745          * the bus goes dead
4746          */
4747         init_timer_deferrable(&qdev->timer);
4748         qdev->timer.data = (unsigned long)qdev;
4749         qdev->timer.function = ql_timer;
4750         qdev->timer.expires = jiffies + (5*HZ);
4751         add_timer(&qdev->timer);
4752         ql_link_off(qdev);
4753         ql_display_dev_info(ndev);
4754         atomic_set(&qdev->lb_count, 0);
4755         cards_found++;
4756         return 0;
4757 }
4758
4759 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4760 {
4761         return qlge_send(skb, ndev);
4762 }
4763
4764 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4765 {
4766         return ql_clean_inbound_rx_ring(rx_ring, budget);
4767 }
4768
4769 static void __devexit qlge_remove(struct pci_dev *pdev)
4770 {
4771         struct net_device *ndev = pci_get_drvdata(pdev);
4772         struct ql_adapter *qdev = netdev_priv(ndev);
4773         del_timer_sync(&qdev->timer);
4774         ql_cancel_all_work_sync(qdev);
4775         unregister_netdev(ndev);
4776         ql_release_all(pdev);
4777         pci_disable_device(pdev);
4778         free_netdev(ndev);
4779 }
4780
4781 /* Clean up resources without touching hardware. */
4782 static void ql_eeh_close(struct net_device *ndev)
4783 {
4784         int i;
4785         struct ql_adapter *qdev = netdev_priv(ndev);
4786
4787         if (netif_carrier_ok(ndev)) {
4788                 netif_carrier_off(ndev);
4789                 netif_stop_queue(ndev);
4790         }
4791
4792         /* Disabling the timer */
4793         del_timer_sync(&qdev->timer);
4794         ql_cancel_all_work_sync(qdev);
4795
4796         for (i = 0; i < qdev->rss_ring_count; i++)
4797                 netif_napi_del(&qdev->rx_ring[i].napi);
4798
4799         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4800         ql_tx_ring_clean(qdev);
4801         ql_free_rx_buffers(qdev);
4802         ql_release_adapter_resources(qdev);
4803 }
4804
4805 /*
4806  * This callback is called by the PCI subsystem whenever
4807  * a PCI bus error is detected.
4808  */
4809 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4810                                                enum pci_channel_state state)
4811 {
4812         struct net_device *ndev = pci_get_drvdata(pdev);
4813         struct ql_adapter *qdev = netdev_priv(ndev);
4814
4815         switch (state) {
4816         case pci_channel_io_normal:
4817                 return PCI_ERS_RESULT_CAN_RECOVER;
4818         case pci_channel_io_frozen:
4819                 netif_device_detach(ndev);
4820                 if (netif_running(ndev))
4821                         ql_eeh_close(ndev);
4822                 pci_disable_device(pdev);
4823                 return PCI_ERS_RESULT_NEED_RESET;
4824         case pci_channel_io_perm_failure:
4825                 dev_err(&pdev->dev,
4826                         "%s: pci_channel_io_perm_failure.\n", __func__);
4827                 ql_eeh_close(ndev);
4828                 set_bit(QL_EEH_FATAL, &qdev->flags);
4829                 return PCI_ERS_RESULT_DISCONNECT;
4830         }
4831
4832         /* Request a slot reset. */
4833         return PCI_ERS_RESULT_NEED_RESET;
4834 }
4835
4836 /*
4837  * This callback is called after the PCI buss has been reset.
4838  * Basically, this tries to restart the card from scratch.
4839  * This is a shortened version of the device probe/discovery code,
4840  * it resembles the first-half of the () routine.
4841  */
4842 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4843 {
4844         struct net_device *ndev = pci_get_drvdata(pdev);
4845         struct ql_adapter *qdev = netdev_priv(ndev);
4846
4847         pdev->error_state = pci_channel_io_normal;
4848
4849         pci_restore_state(pdev);
4850         if (pci_enable_device(pdev)) {
4851                 netif_err(qdev, ifup, qdev->ndev,
4852                           "Cannot re-enable PCI device after reset.\n");
4853                 return PCI_ERS_RESULT_DISCONNECT;
4854         }
4855         pci_set_master(pdev);
4856
4857         if (ql_adapter_reset(qdev)) {
4858                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4859                 set_bit(QL_EEH_FATAL, &qdev->flags);
4860                 return PCI_ERS_RESULT_DISCONNECT;
4861         }
4862
4863         return PCI_ERS_RESULT_RECOVERED;
4864 }
4865
4866 static void qlge_io_resume(struct pci_dev *pdev)
4867 {
4868         struct net_device *ndev = pci_get_drvdata(pdev);
4869         struct ql_adapter *qdev = netdev_priv(ndev);
4870         int err = 0;
4871
4872         if (netif_running(ndev)) {
4873                 err = qlge_open(ndev);
4874                 if (err) {
4875                         netif_err(qdev, ifup, qdev->ndev,
4876                                   "Device initialization failed after reset.\n");
4877                         return;
4878                 }
4879         } else {
4880                 netif_err(qdev, ifup, qdev->ndev,
4881                           "Device was not running prior to EEH.\n");
4882         }
4883         mod_timer(&qdev->timer, jiffies + (5*HZ));
4884         netif_device_attach(ndev);
4885 }
4886
4887 static struct pci_error_handlers qlge_err_handler = {
4888         .error_detected = qlge_io_error_detected,
4889         .slot_reset = qlge_io_slot_reset,
4890         .resume = qlge_io_resume,
4891 };
4892
4893 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4894 {
4895         struct net_device *ndev = pci_get_drvdata(pdev);
4896         struct ql_adapter *qdev = netdev_priv(ndev);
4897         int err;
4898
4899         netif_device_detach(ndev);
4900         del_timer_sync(&qdev->timer);
4901
4902         if (netif_running(ndev)) {
4903                 err = ql_adapter_down(qdev);
4904                 if (!err)
4905                         return err;
4906         }
4907
4908         ql_wol(qdev);
4909         err = pci_save_state(pdev);
4910         if (err)
4911                 return err;
4912
4913         pci_disable_device(pdev);
4914
4915         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4916
4917         return 0;
4918 }
4919
4920 #ifdef CONFIG_PM
4921 static int qlge_resume(struct pci_dev *pdev)
4922 {
4923         struct net_device *ndev = pci_get_drvdata(pdev);
4924         struct ql_adapter *qdev = netdev_priv(ndev);
4925         int err;
4926
4927         pci_set_power_state(pdev, PCI_D0);
4928         pci_restore_state(pdev);
4929         err = pci_enable_device(pdev);
4930         if (err) {
4931                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4932                 return err;
4933         }
4934         pci_set_master(pdev);
4935
4936         pci_enable_wake(pdev, PCI_D3hot, 0);
4937         pci_enable_wake(pdev, PCI_D3cold, 0);
4938
4939         if (netif_running(ndev)) {
4940                 err = ql_adapter_up(qdev);
4941                 if (err)
4942                         return err;
4943         }
4944
4945         mod_timer(&qdev->timer, jiffies + (5*HZ));
4946         netif_device_attach(ndev);
4947
4948         return 0;
4949 }
4950 #endif /* CONFIG_PM */
4951
4952 static void qlge_shutdown(struct pci_dev *pdev)
4953 {
4954         qlge_suspend(pdev, PMSG_SUSPEND);
4955 }
4956
4957 static struct pci_driver qlge_driver = {
4958         .name = DRV_NAME,
4959         .id_table = qlge_pci_tbl,
4960         .probe = qlge_probe,
4961         .remove = __devexit_p(qlge_remove),
4962 #ifdef CONFIG_PM
4963         .suspend = qlge_suspend,
4964         .resume = qlge_resume,
4965 #endif
4966         .shutdown = qlge_shutdown,
4967         .err_handler = &qlge_err_handler
4968 };
4969
4970 static int __init qlge_init_module(void)
4971 {
4972         return pci_register_driver(&qlge_driver);
4973 }
4974
4975 static void __exit qlge_exit(void)
4976 {
4977         pci_unregister_driver(&qlge_driver);
4978 }
4979
4980 module_init(qlge_init_module);
4981 module_exit(qlge_exit);