qlogic: Move the QLogic drivers
[pandora-kernel.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378
379                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380                                      "Adding %s address %pM at index %d in the CAM.\n",
381                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
382                                      "MULTICAST" : "UNICAST",
383                                      addr, index);
384
385                         status =
386                             ql_wait_reg_rdy(qdev,
387                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388                         if (status)
389                                 goto exit;
390                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
392                                    type);       /* type */
393                         ql_write32(qdev, MAC_ADDR_DATA, lower);
394                         status =
395                             ql_wait_reg_rdy(qdev,
396                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397                         if (status)
398                                 goto exit;
399                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
401                                    type);       /* type */
402                         ql_write32(qdev, MAC_ADDR_DATA, upper);
403                         status =
404                             ql_wait_reg_rdy(qdev,
405                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406                         if (status)
407                                 goto exit;
408                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
409                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
410                                    type);       /* type */
411                         /* This field should also include the queue id
412                            and possibly the function id.  Right now we hardcode
413                            the route field to NIC core.
414                          */
415                         cam_output = (CAM_OUT_ROUTE_NIC |
416                                       (qdev->
417                                        func << CAM_OUT_FUNC_SHIFT) |
418                                         (0 << CAM_OUT_CQ_ID_SHIFT));
419                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420                                 cam_output |= CAM_OUT_RV;
421                         /* route to NIC core */
422                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423                         break;
424                 }
425         case MAC_ADDR_TYPE_VLAN:
426                 {
427                         u32 enable_bit = *((u32 *) &addr[0]);
428                         /* For VLAN, the addr actually holds a bit that
429                          * either enables or disables the vlan id we are
430                          * addressing. It's either MAC_ADDR_E on or off.
431                          * That's bit-27 we're talking about.
432                          */
433                         netif_info(qdev, ifup, qdev->ndev,
434                                    "%s VLAN ID %d %s the CAM.\n",
435                                    enable_bit ? "Adding" : "Removing",
436                                    index,
437                                    enable_bit ? "to" : "from");
438
439                         status =
440                             ql_wait_reg_rdy(qdev,
441                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442                         if (status)
443                                 goto exit;
444                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
446                                    type |       /* type */
447                                    enable_bit); /* enable/disable */
448                         break;
449                 }
450         case MAC_ADDR_TYPE_MULTI_FLTR:
451         default:
452                 netif_crit(qdev, ifup, qdev->ndev,
453                            "Address type %d not yet supported.\n", type);
454                 status = -EPERM;
455         }
456 exit:
457         return status;
458 }
459
460 /* Set or clear MAC address in hardware. We sometimes
461  * have to clear it to prevent wrong frame routing
462  * especially in a bonding environment.
463  */
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 {
466         int status;
467         char zero_mac_addr[ETH_ALEN];
468         char *addr;
469
470         if (set) {
471                 addr = &qdev->current_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Set Mac addr %pM\n", addr);
474         } else {
475                 memset(zero_mac_addr, 0, ETH_ALEN);
476                 addr = &zero_mac_addr[0];
477                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478                              "Clearing MAC address\n");
479         }
480         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 return status;
483         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486         if (status)
487                 netif_err(qdev, ifup, qdev->ndev,
488                           "Failed to init mac address.\n");
489         return status;
490 }
491
492 void ql_link_on(struct ql_adapter *qdev)
493 {
494         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495         netif_carrier_on(qdev->ndev);
496         ql_set_mac_addr(qdev, 1);
497 }
498
499 void ql_link_off(struct ql_adapter *qdev)
500 {
501         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502         netif_carrier_off(qdev->ndev);
503         ql_set_mac_addr(qdev, 0);
504 }
505
506 /* Get a specific frame routing value from the CAM.
507  * Used for debug and reg dump.
508  */
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510 {
511         int status = 0;
512
513         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514         if (status)
515                 goto exit;
516
517         ql_write32(qdev, RT_IDX,
518                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520         if (status)
521                 goto exit;
522         *value = ql_read32(qdev, RT_DATA);
523 exit:
524         return status;
525 }
526
527 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
528  * to route different frame types to various inbound queues.  We send broadcast/
529  * multicast/error frames to the default queue for slow handling,
530  * and CAM hit/RSS frames to the fast handling queues.
531  */
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533                               int enable)
534 {
535         int status = -EINVAL; /* Return error if no mask match. */
536         u32 value = 0;
537
538         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539                      "%s %s mask %s the routing reg.\n",
540                      enable ? "Adding" : "Removing",
541                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
555                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
556                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557                      "(Bad index != RT_IDX)",
558                      enable ? "to" : "from");
559
560         switch (mask) {
561         case RT_IDX_CAM_HIT:
562                 {
563                         value = RT_IDX_DST_CAM_Q |      /* dest */
564                             RT_IDX_TYPE_NICQ |  /* type */
565                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566                         break;
567                 }
568         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
569                 {
570                         value = RT_IDX_DST_DFLT_Q |     /* dest */
571                             RT_IDX_TYPE_NICQ |  /* type */
572                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573                         break;
574                 }
575         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
576                 {
577                         value = RT_IDX_DST_DFLT_Q |     /* dest */
578                             RT_IDX_TYPE_NICQ |  /* type */
579                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580                         break;
581                 }
582         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583                 {
584                         value = RT_IDX_DST_DFLT_Q | /* dest */
585                                 RT_IDX_TYPE_NICQ | /* type */
586                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
587                                 RT_IDX_IDX_SHIFT); /* index */
588                         break;
589                 }
590         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591                 {
592                         value = RT_IDX_DST_DFLT_Q | /* dest */
593                                 RT_IDX_TYPE_NICQ | /* type */
594                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595                                 RT_IDX_IDX_SHIFT); /* index */
596                         break;
597                 }
598         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
599                 {
600                         value = RT_IDX_DST_DFLT_Q |     /* dest */
601                             RT_IDX_TYPE_NICQ |  /* type */
602                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603                         break;
604                 }
605         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
606                 {
607                         value = RT_IDX_DST_DFLT_Q |     /* dest */
608                             RT_IDX_TYPE_NICQ |  /* type */
609                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610                         break;
611                 }
612         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
613                 {
614                         value = RT_IDX_DST_DFLT_Q |     /* dest */
615                             RT_IDX_TYPE_NICQ |  /* type */
616                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617                         break;
618                 }
619         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
620                 {
621                         value = RT_IDX_DST_RSS |        /* dest */
622                             RT_IDX_TYPE_NICQ |  /* type */
623                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624                         break;
625                 }
626         case 0:         /* Clear the E-bit on an entry. */
627                 {
628                         value = RT_IDX_DST_DFLT_Q |     /* dest */
629                             RT_IDX_TYPE_NICQ |  /* type */
630                             (index << RT_IDX_IDX_SHIFT);/* index */
631                         break;
632                 }
633         default:
634                 netif_err(qdev, ifup, qdev->ndev,
635                           "Mask type %d not yet supported.\n", mask);
636                 status = -EPERM;
637                 goto exit;
638         }
639
640         if (value) {
641                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642                 if (status)
643                         goto exit;
644                 value |= (enable ? RT_IDX_E : 0);
645                 ql_write32(qdev, RT_IDX, value);
646                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647         }
648 exit:
649         return status;
650 }
651
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
653 {
654         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 }
656
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
658 {
659         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 }
661
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663  * Otherwise, we may have multiple outstanding workers and don't want to
664  * enable until the last one finishes. In this case, the irq_cnt gets
665  * incremented every time we queue a worker and decremented every time
666  * a worker finishes.  Once it hits zero we enable the interrupt.
667  */
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 {
670         u32 var = 0;
671         unsigned long hw_flags = 0;
672         struct intr_context *ctx = qdev->intr_context + intr;
673
674         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675                 /* Always enable if we're MSIX multi interrupts and
676                  * it's not the default (zeroeth) interrupt.
677                  */
678                 ql_write32(qdev, INTR_EN,
679                            ctx->intr_en_mask);
680                 var = ql_read32(qdev, STS);
681                 return var;
682         }
683
684         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685         if (atomic_dec_and_test(&ctx->irq_cnt)) {
686                 ql_write32(qdev, INTR_EN,
687                            ctx->intr_en_mask);
688                 var = ql_read32(qdev, STS);
689         }
690         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691         return var;
692 }
693
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 {
696         u32 var = 0;
697         struct intr_context *ctx;
698
699         /* HW disables for us if we're MSIX multi interrupts and
700          * it's not the default (zeroeth) interrupt.
701          */
702         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703                 return 0;
704
705         ctx = qdev->intr_context + intr;
706         spin_lock(&qdev->hw_lock);
707         if (!atomic_read(&ctx->irq_cnt)) {
708                 ql_write32(qdev, INTR_EN,
709                 ctx->intr_dis_mask);
710                 var = ql_read32(qdev, STS);
711         }
712         atomic_inc(&ctx->irq_cnt);
713         spin_unlock(&qdev->hw_lock);
714         return var;
715 }
716
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 {
719         int i;
720         for (i = 0; i < qdev->intr_count; i++) {
721                 /* The enable call does a atomic_dec_and_test
722                  * and enables only if the result is zero.
723                  * So we precharge it here.
724                  */
725                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726                         i == 0))
727                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728                 ql_enable_completion_interrupt(qdev, i);
729         }
730
731 }
732
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734 {
735         int status, i;
736         u16 csum = 0;
737         __le16 *flash = (__le16 *)&qdev->flash;
738
739         status = strncmp((char *)&qdev->flash, str, 4);
740         if (status) {
741                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742                 return  status;
743         }
744
745         for (i = 0; i < size; i++)
746                 csum += le16_to_cpu(*flash++);
747
748         if (csum)
749                 netif_err(qdev, ifup, qdev->ndev,
750                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751
752         return csum;
753 }
754
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 {
757         int status = 0;
758         /* wait for reg to come ready */
759         status = ql_wait_reg_rdy(qdev,
760                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761         if (status)
762                 goto exit;
763         /* set up for reg read */
764         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765         /* wait for reg to come ready */
766         status = ql_wait_reg_rdy(qdev,
767                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768         if (status)
769                 goto exit;
770          /* This data is stored on flash as an array of
771          * __le32.  Since ql_read32() returns cpu endian
772          * we need to swap it back.
773          */
774         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775 exit:
776         return status;
777 }
778
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780 {
781         u32 i, size;
782         int status;
783         __le32 *p = (__le32 *)&qdev->flash;
784         u32 offset;
785         u8 mac_addr[6];
786
787         /* Get flash offset for function and adjust
788          * for dword access.
789          */
790         if (!qdev->port)
791                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792         else
793                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796                 return -ETIMEDOUT;
797
798         size = sizeof(struct flash_params_8000) / sizeof(u32);
799         for (i = 0; i < size; i++, p++) {
800                 status = ql_read_flash_word(qdev, i+offset, p);
801                 if (status) {
802                         netif_err(qdev, ifup, qdev->ndev,
803                                   "Error reading flash.\n");
804                         goto exit;
805                 }
806         }
807
808         status = ql_validate_flash(qdev,
809                         sizeof(struct flash_params_8000) / sizeof(u16),
810                         "8000");
811         if (status) {
812                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813                 status = -EINVAL;
814                 goto exit;
815         }
816
817         /* Extract either manufacturer or BOFM modified
818          * MAC address.
819          */
820         if (qdev->flash.flash_params_8000.data_type1 == 2)
821                 memcpy(mac_addr,
822                         qdev->flash.flash_params_8000.mac_addr1,
823                         qdev->ndev->addr_len);
824         else
825                 memcpy(mac_addr,
826                         qdev->flash.flash_params_8000.mac_addr,
827                         qdev->ndev->addr_len);
828
829         if (!is_valid_ether_addr(mac_addr)) {
830                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831                 status = -EINVAL;
832                 goto exit;
833         }
834
835         memcpy(qdev->ndev->dev_addr,
836                 mac_addr,
837                 qdev->ndev->addr_len);
838
839 exit:
840         ql_sem_unlock(qdev, SEM_FLASH_MASK);
841         return status;
842 }
843
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845 {
846         int i;
847         int status;
848         __le32 *p = (__le32 *)&qdev->flash;
849         u32 offset = 0;
850         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851
852         /* Second function's parameters follow the first
853          * function's.
854          */
855         if (qdev->port)
856                 offset = size;
857
858         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859                 return -ETIMEDOUT;
860
861         for (i = 0; i < size; i++, p++) {
862                 status = ql_read_flash_word(qdev, i+offset, p);
863                 if (status) {
864                         netif_err(qdev, ifup, qdev->ndev,
865                                   "Error reading flash.\n");
866                         goto exit;
867                 }
868
869         }
870
871         status = ql_validate_flash(qdev,
872                         sizeof(struct flash_params_8012) / sizeof(u16),
873                         "8012");
874         if (status) {
875                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876                 status = -EINVAL;
877                 goto exit;
878         }
879
880         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881                 status = -EINVAL;
882                 goto exit;
883         }
884
885         memcpy(qdev->ndev->dev_addr,
886                 qdev->flash.flash_params_8012.mac_addr,
887                 qdev->ndev->addr_len);
888
889 exit:
890         ql_sem_unlock(qdev, SEM_FLASH_MASK);
891         return status;
892 }
893
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895  * register pair.  Each read/write requires us to wait for the ready
896  * bit before reading/writing the data.
897  */
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 {
900         int status;
901         /* wait for reg to come ready */
902         status = ql_wait_reg_rdy(qdev,
903                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904         if (status)
905                 return status;
906         /* write the data to the data reg */
907         ql_write32(qdev, XGMAC_DATA, data);
908         /* trigger the write */
909         ql_write32(qdev, XGMAC_ADDR, reg);
910         return status;
911 }
912
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914  * register pair.  Each read/write requires us to wait for the ready
915  * bit before reading/writing the data.
916  */
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 {
919         int status = 0;
920         /* wait for reg to come ready */
921         status = ql_wait_reg_rdy(qdev,
922                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923         if (status)
924                 goto exit;
925         /* set up for reg read */
926         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927         /* wait for reg to come ready */
928         status = ql_wait_reg_rdy(qdev,
929                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930         if (status)
931                 goto exit;
932         /* get the data */
933         *data = ql_read32(qdev, XGMAC_DATA);
934 exit:
935         return status;
936 }
937
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 {
941         int status = 0;
942         u32 hi = 0;
943         u32 lo = 0;
944
945         status = ql_read_xgmac_reg(qdev, reg, &lo);
946         if (status)
947                 goto exit;
948
949         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950         if (status)
951                 goto exit;
952
953         *data = (u64) lo | ((u64) hi << 32);
954
955 exit:
956         return status;
957 }
958
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
960 {
961         int status;
962         /*
963          * Get MPI firmware version for driver banner
964          * and ethool info.
965          */
966         status = ql_mb_about_fw(qdev);
967         if (status)
968                 goto exit;
969         status = ql_mb_get_fw_state(qdev);
970         if (status)
971                 goto exit;
972         /* Wake up a worker to get/set the TX/RX frame sizes. */
973         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974 exit:
975         return status;
976 }
977
978 /* Take the MAC Core out of reset.
979  * Enable statistics counting.
980  * Take the transmitter/receiver out of reset.
981  * This functionality may be done in the MPI firmware at a
982  * later date.
983  */
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
985 {
986         int status = 0;
987         u32 data;
988
989         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990                 /* Another function has the semaphore, so
991                  * wait for the port init bit to come ready.
992                  */
993                 netif_info(qdev, link, qdev->ndev,
994                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996                 if (status) {
997                         netif_crit(qdev, link, qdev->ndev,
998                                    "Port initialize timed out.\n");
999                 }
1000                 return status;
1001         }
1002
1003         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004         /* Set the core reset. */
1005         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006         if (status)
1007                 goto end;
1008         data |= GLOBAL_CFG_RESET;
1009         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010         if (status)
1011                 goto end;
1012
1013         /* Clear the core reset and turn on jumbo for receiver. */
1014         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1015         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1016         data |= GLOBAL_CFG_TX_STAT_EN;
1017         data |= GLOBAL_CFG_RX_STAT_EN;
1018         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019         if (status)
1020                 goto end;
1021
1022         /* Enable transmitter, and clear it's reset. */
1023         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024         if (status)
1025                 goto end;
1026         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1027         data |= TX_CFG_EN;      /* Enable the transmitter. */
1028         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029         if (status)
1030                 goto end;
1031
1032         /* Enable receiver and clear it's reset. */
1033         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034         if (status)
1035                 goto end;
1036         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1037         data |= RX_CFG_EN;      /* Enable the receiver. */
1038         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039         if (status)
1040                 goto end;
1041
1042         /* Turn on jumbo. */
1043         status =
1044             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045         if (status)
1046                 goto end;
1047         status =
1048             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049         if (status)
1050                 goto end;
1051
1052         /* Signal to the world that the port is enabled.        */
1053         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054 end:
1055         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056         return status;
1057 }
1058
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060 {
1061         return PAGE_SIZE << qdev->lbq_buf_order;
1062 }
1063
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066 {
1067         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068         rx_ring->lbq_curr_idx++;
1069         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070                 rx_ring->lbq_curr_idx = 0;
1071         rx_ring->lbq_free_cnt++;
1072         return lbq_desc;
1073 }
1074
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076                 struct rx_ring *rx_ring)
1077 {
1078         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080         pci_dma_sync_single_for_cpu(qdev->pdev,
1081                                         dma_unmap_addr(lbq_desc, mapaddr),
1082                                     rx_ring->lbq_buf_size,
1083                                         PCI_DMA_FROMDEVICE);
1084
1085         /* If it's the last chunk of our master page then
1086          * we unmap it.
1087          */
1088         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089                                         == ql_lbq_block_size(qdev))
1090                 pci_unmap_page(qdev->pdev,
1091                                 lbq_desc->p.pg_chunk.map,
1092                                 ql_lbq_block_size(qdev),
1093                                 PCI_DMA_FROMDEVICE);
1094         return lbq_desc;
1095 }
1096
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099 {
1100         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101         rx_ring->sbq_curr_idx++;
1102         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103                 rx_ring->sbq_curr_idx = 0;
1104         rx_ring->sbq_free_cnt++;
1105         return sbq_desc;
1106 }
1107
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1110 {
1111         rx_ring->cnsmr_idx++;
1112         rx_ring->curr_entry++;
1113         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114                 rx_ring->cnsmr_idx = 0;
1115                 rx_ring->curr_entry = rx_ring->cq_base;
1116         }
1117 }
1118
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120 {
1121         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 }
1123
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125                                                 struct bq_desc *lbq_desc)
1126 {
1127         if (!rx_ring->pg_chunk.page) {
1128                 u64 map;
1129                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130                                                 GFP_ATOMIC,
1131                                                 qdev->lbq_buf_order);
1132                 if (unlikely(!rx_ring->pg_chunk.page)) {
1133                         netif_err(qdev, drv, qdev->ndev,
1134                                   "page allocation failed.\n");
1135                         return -ENOMEM;
1136                 }
1137                 rx_ring->pg_chunk.offset = 0;
1138                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139                                         0, ql_lbq_block_size(qdev),
1140                                         PCI_DMA_FROMDEVICE);
1141                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142                         __free_pages(rx_ring->pg_chunk.page,
1143                                         qdev->lbq_buf_order);
1144                         netif_err(qdev, drv, qdev->ndev,
1145                                   "PCI mapping failed.\n");
1146                         return -ENOMEM;
1147                 }
1148                 rx_ring->pg_chunk.map = map;
1149                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150         }
1151
1152         /* Copy the current master pg_chunk info
1153          * to the current descriptor.
1154          */
1155         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157         /* Adjust the master page chunk for next
1158          * buffer get.
1159          */
1160         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162                 rx_ring->pg_chunk.page = NULL;
1163                 lbq_desc->p.pg_chunk.last_flag = 1;
1164         } else {
1165                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166                 get_page(rx_ring->pg_chunk.page);
1167                 lbq_desc->p.pg_chunk.last_flag = 0;
1168         }
1169         return 0;
1170 }
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173 {
1174         u32 clean_idx = rx_ring->lbq_clean_idx;
1175         u32 start_idx = clean_idx;
1176         struct bq_desc *lbq_desc;
1177         u64 map;
1178         int i;
1179
1180         while (rx_ring->lbq_free_cnt > 32) {
1181                 for (i = 0; i < 16; i++) {
1182                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                                      "lbq: try cleaning clean_idx = %d.\n",
1184                                      clean_idx);
1185                         lbq_desc = &rx_ring->lbq[clean_idx];
1186                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187                                 netif_err(qdev, ifup, qdev->ndev,
1188                                           "Could not get a page chunk.\n");
1189                                 return;
1190                         }
1191
1192                         map = lbq_desc->p.pg_chunk.map +
1193                                 lbq_desc->p.pg_chunk.offset;
1194                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195                         dma_unmap_len_set(lbq_desc, maplen,
1196                                         rx_ring->lbq_buf_size);
1197                                 *lbq_desc->addr = cpu_to_le64(map);
1198
1199                         pci_dma_sync_single_for_device(qdev->pdev, map,
1200                                                 rx_ring->lbq_buf_size,
1201                                                 PCI_DMA_FROMDEVICE);
1202                         clean_idx++;
1203                         if (clean_idx == rx_ring->lbq_len)
1204                                 clean_idx = 0;
1205                 }
1206
1207                 rx_ring->lbq_clean_idx = clean_idx;
1208                 rx_ring->lbq_prod_idx += 16;
1209                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210                         rx_ring->lbq_prod_idx = 0;
1211                 rx_ring->lbq_free_cnt -= 16;
1212         }
1213
1214         if (start_idx != clean_idx) {
1215                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216                              "lbq: updating prod idx = %d.\n",
1217                              rx_ring->lbq_prod_idx);
1218                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219                                 rx_ring->lbq_prod_idx_db_reg);
1220         }
1221 }
1222
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225 {
1226         u32 clean_idx = rx_ring->sbq_clean_idx;
1227         u32 start_idx = clean_idx;
1228         struct bq_desc *sbq_desc;
1229         u64 map;
1230         int i;
1231
1232         while (rx_ring->sbq_free_cnt > 16) {
1233                 for (i = 0; i < 16; i++) {
1234                         sbq_desc = &rx_ring->sbq[clean_idx];
1235                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236                                      "sbq: try cleaning clean_idx = %d.\n",
1237                                      clean_idx);
1238                         if (sbq_desc->p.skb == NULL) {
1239                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1240                                              qdev->ndev,
1241                                              "sbq: getting new skb for index %d.\n",
1242                                              sbq_desc->index);
1243                                 sbq_desc->p.skb =
1244                                     netdev_alloc_skb(qdev->ndev,
1245                                                      SMALL_BUFFER_SIZE);
1246                                 if (sbq_desc->p.skb == NULL) {
1247                                         netif_err(qdev, probe, qdev->ndev,
1248                                                   "Couldn't get an skb.\n");
1249                                         rx_ring->sbq_clean_idx = clean_idx;
1250                                         return;
1251                                 }
1252                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253                                 map = pci_map_single(qdev->pdev,
1254                                                      sbq_desc->p.skb->data,
1255                                                      rx_ring->sbq_buf_size,
1256                                                      PCI_DMA_FROMDEVICE);
1257                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258                                         netif_err(qdev, ifup, qdev->ndev,
1259                                                   "PCI mapping failed.\n");
1260                                         rx_ring->sbq_clean_idx = clean_idx;
1261                                         dev_kfree_skb_any(sbq_desc->p.skb);
1262                                         sbq_desc->p.skb = NULL;
1263                                         return;
1264                                 }
1265                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266                                 dma_unmap_len_set(sbq_desc, maplen,
1267                                                   rx_ring->sbq_buf_size);
1268                                 *sbq_desc->addr = cpu_to_le64(map);
1269                         }
1270
1271                         clean_idx++;
1272                         if (clean_idx == rx_ring->sbq_len)
1273                                 clean_idx = 0;
1274                 }
1275                 rx_ring->sbq_clean_idx = clean_idx;
1276                 rx_ring->sbq_prod_idx += 16;
1277                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278                         rx_ring->sbq_prod_idx = 0;
1279                 rx_ring->sbq_free_cnt -= 16;
1280         }
1281
1282         if (start_idx != clean_idx) {
1283                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284                              "sbq: updating prod idx = %d.\n",
1285                              rx_ring->sbq_prod_idx);
1286                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287                                 rx_ring->sbq_prod_idx_db_reg);
1288         }
1289 }
1290
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292                                     struct rx_ring *rx_ring)
1293 {
1294         ql_update_sbq(qdev, rx_ring);
1295         ql_update_lbq(qdev, rx_ring);
1296 }
1297
1298 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1299  * fails at some stage, or from the interrupt when a tx completes.
1300  */
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302                           struct tx_ring_desc *tx_ring_desc, int mapped)
1303 {
1304         int i;
1305         for (i = 0; i < mapped; i++) {
1306                 if (i == 0 || (i == 7 && mapped > 7)) {
1307                         /*
1308                          * Unmap the skb->data area, or the
1309                          * external sglist (AKA the Outbound
1310                          * Address List (OAL)).
1311                          * If its the zeroeth element, then it's
1312                          * the skb->data area.  If it's the 7th
1313                          * element and there is more than 6 frags,
1314                          * then its an OAL.
1315                          */
1316                         if (i == 7) {
1317                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1318                                              qdev->ndev,
1319                                              "unmapping OAL area.\n");
1320                         }
1321                         pci_unmap_single(qdev->pdev,
1322                                          dma_unmap_addr(&tx_ring_desc->map[i],
1323                                                         mapaddr),
1324                                          dma_unmap_len(&tx_ring_desc->map[i],
1325                                                        maplen),
1326                                          PCI_DMA_TODEVICE);
1327                 } else {
1328                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329                                      "unmapping frag %d.\n", i);
1330                         pci_unmap_page(qdev->pdev,
1331                                        dma_unmap_addr(&tx_ring_desc->map[i],
1332                                                       mapaddr),
1333                                        dma_unmap_len(&tx_ring_desc->map[i],
1334                                                      maplen), PCI_DMA_TODEVICE);
1335                 }
1336         }
1337
1338 }
1339
1340 /* Map the buffers for this transmit.  This will return
1341  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342  */
1343 static int ql_map_send(struct ql_adapter *qdev,
1344                        struct ob_mac_iocb_req *mac_iocb_ptr,
1345                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346 {
1347         int len = skb_headlen(skb);
1348         dma_addr_t map;
1349         int frag_idx, err, map_idx = 0;
1350         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351         int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353         if (frag_cnt) {
1354                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355                              "frag_cnt = %d.\n", frag_cnt);
1356         }
1357         /*
1358          * Map the skb buffer first.
1359          */
1360         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362         err = pci_dma_mapping_error(qdev->pdev, map);
1363         if (err) {
1364                 netif_err(qdev, tx_queued, qdev->ndev,
1365                           "PCI mapping failed with error: %d\n", err);
1366
1367                 return NETDEV_TX_BUSY;
1368         }
1369
1370         tbd->len = cpu_to_le32(len);
1371         tbd->addr = cpu_to_le64(map);
1372         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374         map_idx++;
1375
1376         /*
1377          * This loop fills the remainder of the 8 address descriptors
1378          * in the IOCB.  If there are more than 7 fragments, then the
1379          * eighth address desc will point to an external list (OAL).
1380          * When this happens, the remainder of the frags will be stored
1381          * in this list.
1382          */
1383         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385                 tbd++;
1386                 if (frag_idx == 6 && frag_cnt > 7) {
1387                         /* Let's tack on an sglist.
1388                          * Our control block will now
1389                          * look like this:
1390                          * iocb->seg[0] = skb->data
1391                          * iocb->seg[1] = frag[0]
1392                          * iocb->seg[2] = frag[1]
1393                          * iocb->seg[3] = frag[2]
1394                          * iocb->seg[4] = frag[3]
1395                          * iocb->seg[5] = frag[4]
1396                          * iocb->seg[6] = frag[5]
1397                          * iocb->seg[7] = ptr to OAL (external sglist)
1398                          * oal->seg[0] = frag[6]
1399                          * oal->seg[1] = frag[7]
1400                          * oal->seg[2] = frag[8]
1401                          * oal->seg[3] = frag[9]
1402                          * oal->seg[4] = frag[10]
1403                          *      etc...
1404                          */
1405                         /* Tack on the OAL in the eighth segment of IOCB. */
1406                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407                                              sizeof(struct oal),
1408                                              PCI_DMA_TODEVICE);
1409                         err = pci_dma_mapping_error(qdev->pdev, map);
1410                         if (err) {
1411                                 netif_err(qdev, tx_queued, qdev->ndev,
1412                                           "PCI mapping outbound address list with error: %d\n",
1413                                           err);
1414                                 goto map_error;
1415                         }
1416
1417                         tbd->addr = cpu_to_le64(map);
1418                         /*
1419                          * The length is the number of fragments
1420                          * that remain to be mapped times the length
1421                          * of our sglist (OAL).
1422                          */
1423                         tbd->len =
1424                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1425                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1426                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427                                            map);
1428                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429                                           sizeof(struct oal));
1430                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431                         map_idx++;
1432                 }
1433
1434                 map =
1435                     pci_map_page(qdev->pdev, frag->page,
1436                                  frag->page_offset, frag->size,
1437                                  PCI_DMA_TODEVICE);
1438
1439                 err = pci_dma_mapping_error(qdev->pdev, map);
1440                 if (err) {
1441                         netif_err(qdev, tx_queued, qdev->ndev,
1442                                   "PCI mapping frags failed with error: %d.\n",
1443                                   err);
1444                         goto map_error;
1445                 }
1446
1447                 tbd->addr = cpu_to_le64(map);
1448                 tbd->len = cpu_to_le32(frag->size);
1449                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1450                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1451                                   frag->size);
1452
1453         }
1454         /* Save the number of segments we've mapped. */
1455         tx_ring_desc->map_cnt = map_idx;
1456         /* Terminate the last segment. */
1457         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1458         return NETDEV_TX_OK;
1459
1460 map_error:
1461         /*
1462          * If the first frag mapping failed, then i will be zero.
1463          * This causes the unmap of the skb->data area.  Otherwise
1464          * we pass in the number of frags that mapped successfully
1465          * so they can be umapped.
1466          */
1467         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1468         return NETDEV_TX_BUSY;
1469 }
1470
1471 /* Process an inbound completion from an rx ring. */
1472 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1473                                         struct rx_ring *rx_ring,
1474                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1475                                         u32 length,
1476                                         u16 vlan_id)
1477 {
1478         struct sk_buff *skb;
1479         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1480         struct skb_frag_struct *rx_frag;
1481         int nr_frags;
1482         struct napi_struct *napi = &rx_ring->napi;
1483
1484         napi->dev = qdev->ndev;
1485
1486         skb = napi_get_frags(napi);
1487         if (!skb) {
1488                 netif_err(qdev, drv, qdev->ndev,
1489                           "Couldn't get an skb, exiting.\n");
1490                 rx_ring->rx_dropped++;
1491                 put_page(lbq_desc->p.pg_chunk.page);
1492                 return;
1493         }
1494         prefetch(lbq_desc->p.pg_chunk.va);
1495         rx_frag = skb_shinfo(skb)->frags;
1496         nr_frags = skb_shinfo(skb)->nr_frags;
1497         rx_frag += nr_frags;
1498         rx_frag->page = lbq_desc->p.pg_chunk.page;
1499         rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1500         rx_frag->size = length;
1501
1502         skb->len += length;
1503         skb->data_len += length;
1504         skb->truesize += length;
1505         skb_shinfo(skb)->nr_frags++;
1506
1507         rx_ring->rx_packets++;
1508         rx_ring->rx_bytes += length;
1509         skb->ip_summed = CHECKSUM_UNNECESSARY;
1510         skb_record_rx_queue(skb, rx_ring->cq_id);
1511         if (vlan_id != 0xffff)
1512                 __vlan_hwaccel_put_tag(skb, vlan_id);
1513         napi_gro_frags(napi);
1514 }
1515
1516 /* Process an inbound completion from an rx ring. */
1517 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1518                                         struct rx_ring *rx_ring,
1519                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1520                                         u32 length,
1521                                         u16 vlan_id)
1522 {
1523         struct net_device *ndev = qdev->ndev;
1524         struct sk_buff *skb = NULL;
1525         void *addr;
1526         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1527         struct napi_struct *napi = &rx_ring->napi;
1528
1529         skb = netdev_alloc_skb(ndev, length);
1530         if (!skb) {
1531                 netif_err(qdev, drv, qdev->ndev,
1532                           "Couldn't get an skb, need to unwind!.\n");
1533                 rx_ring->rx_dropped++;
1534                 put_page(lbq_desc->p.pg_chunk.page);
1535                 return;
1536         }
1537
1538         addr = lbq_desc->p.pg_chunk.va;
1539         prefetch(addr);
1540
1541
1542         /* Frame error, so drop the packet. */
1543         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1544                 netif_info(qdev, drv, qdev->ndev,
1545                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1546                 rx_ring->rx_errors++;
1547                 goto err_out;
1548         }
1549
1550         /* The max framesize filter on this chip is set higher than
1551          * MTU since FCoE uses 2k frames.
1552          */
1553         if (skb->len > ndev->mtu + ETH_HLEN) {
1554                 netif_err(qdev, drv, qdev->ndev,
1555                           "Segment too small, dropping.\n");
1556                 rx_ring->rx_dropped++;
1557                 goto err_out;
1558         }
1559         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1560         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1561                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562                      length);
1563         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1564                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565                                 length-ETH_HLEN);
1566         skb->len += length-ETH_HLEN;
1567         skb->data_len += length-ETH_HLEN;
1568         skb->truesize += length-ETH_HLEN;
1569
1570         rx_ring->rx_packets++;
1571         rx_ring->rx_bytes += skb->len;
1572         skb->protocol = eth_type_trans(skb, ndev);
1573         skb_checksum_none_assert(skb);
1574
1575         if ((ndev->features & NETIF_F_RXCSUM) &&
1576                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577                 /* TCP frame. */
1578                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1579                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1580                                      "TCP checksum done!\n");
1581                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1582                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1583                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1584                         /* Unfragmented ipv4 UDP frame. */
1585                         struct iphdr *iph = (struct iphdr *) skb->data;
1586                         if (!(iph->frag_off &
1587                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1588                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1589                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1590                                              qdev->ndev,
1591                                              "TCP checksum done!\n");
1592                         }
1593                 }
1594         }
1595
1596         skb_record_rx_queue(skb, rx_ring->cq_id);
1597         if (vlan_id != 0xffff)
1598                 __vlan_hwaccel_put_tag(skb, vlan_id);
1599         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1600                 napi_gro_receive(napi, skb);
1601         else
1602                 netif_receive_skb(skb);
1603         return;
1604 err_out:
1605         dev_kfree_skb_any(skb);
1606         put_page(lbq_desc->p.pg_chunk.page);
1607 }
1608
1609 /* Process an inbound completion from an rx ring. */
1610 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1611                                         struct rx_ring *rx_ring,
1612                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1613                                         u32 length,
1614                                         u16 vlan_id)
1615 {
1616         struct net_device *ndev = qdev->ndev;
1617         struct sk_buff *skb = NULL;
1618         struct sk_buff *new_skb = NULL;
1619         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1620
1621         skb = sbq_desc->p.skb;
1622         /* Allocate new_skb and copy */
1623         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1624         if (new_skb == NULL) {
1625                 netif_err(qdev, probe, qdev->ndev,
1626                           "No skb available, drop the packet.\n");
1627                 rx_ring->rx_dropped++;
1628                 return;
1629         }
1630         skb_reserve(new_skb, NET_IP_ALIGN);
1631         memcpy(skb_put(new_skb, length), skb->data, length);
1632         skb = new_skb;
1633
1634         /* Frame error, so drop the packet. */
1635         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1636                 netif_info(qdev, drv, qdev->ndev,
1637                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1638                 dev_kfree_skb_any(skb);
1639                 rx_ring->rx_errors++;
1640                 return;
1641         }
1642
1643         /* loopback self test for ethtool */
1644         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1645                 ql_check_lb_frame(qdev, skb);
1646                 dev_kfree_skb_any(skb);
1647                 return;
1648         }
1649
1650         /* The max framesize filter on this chip is set higher than
1651          * MTU since FCoE uses 2k frames.
1652          */
1653         if (skb->len > ndev->mtu + ETH_HLEN) {
1654                 dev_kfree_skb_any(skb);
1655                 rx_ring->rx_dropped++;
1656                 return;
1657         }
1658
1659         prefetch(skb->data);
1660         skb->dev = ndev;
1661         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1662                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1663                              "%s Multicast.\n",
1664                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1665                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1666                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1667                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1668                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1670         }
1671         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1672                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673                              "Promiscuous Packet.\n");
1674
1675         rx_ring->rx_packets++;
1676         rx_ring->rx_bytes += skb->len;
1677         skb->protocol = eth_type_trans(skb, ndev);
1678         skb_checksum_none_assert(skb);
1679
1680         /* If rx checksum is on, and there are no
1681          * csum or frame errors.
1682          */
1683         if ((ndev->features & NETIF_F_RXCSUM) &&
1684                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1685                 /* TCP frame. */
1686                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1687                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1688                                      "TCP checksum done!\n");
1689                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1690                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1691                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1692                         /* Unfragmented ipv4 UDP frame. */
1693                         struct iphdr *iph = (struct iphdr *) skb->data;
1694                         if (!(iph->frag_off &
1695                                 ntohs(IP_MF|IP_OFFSET))) {
1696                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1697                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1698                                              qdev->ndev,
1699                                              "TCP checksum done!\n");
1700                         }
1701                 }
1702         }
1703
1704         skb_record_rx_queue(skb, rx_ring->cq_id);
1705         if (vlan_id != 0xffff)
1706                 __vlan_hwaccel_put_tag(skb, vlan_id);
1707         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1708                 napi_gro_receive(&rx_ring->napi, skb);
1709         else
1710                 netif_receive_skb(skb);
1711 }
1712
1713 static void ql_realign_skb(struct sk_buff *skb, int len)
1714 {
1715         void *temp_addr = skb->data;
1716
1717         /* Undo the skb_reserve(skb,32) we did before
1718          * giving to hardware, and realign data on
1719          * a 2-byte boundary.
1720          */
1721         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1722         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1723         skb_copy_to_linear_data(skb, temp_addr,
1724                 (unsigned int)len);
1725 }
1726
1727 /*
1728  * This function builds an skb for the given inbound
1729  * completion.  It will be rewritten for readability in the near
1730  * future, but for not it works well.
1731  */
1732 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1733                                        struct rx_ring *rx_ring,
1734                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1735 {
1736         struct bq_desc *lbq_desc;
1737         struct bq_desc *sbq_desc;
1738         struct sk_buff *skb = NULL;
1739         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1740        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1741
1742         /*
1743          * Handle the header buffer if present.
1744          */
1745         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1746             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1747                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748                              "Header of %d bytes in small buffer.\n", hdr_len);
1749                 /*
1750                  * Headers fit nicely into a small buffer.
1751                  */
1752                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1753                 pci_unmap_single(qdev->pdev,
1754                                 dma_unmap_addr(sbq_desc, mapaddr),
1755                                 dma_unmap_len(sbq_desc, maplen),
1756                                 PCI_DMA_FROMDEVICE);
1757                 skb = sbq_desc->p.skb;
1758                 ql_realign_skb(skb, hdr_len);
1759                 skb_put(skb, hdr_len);
1760                 sbq_desc->p.skb = NULL;
1761         }
1762
1763         /*
1764          * Handle the data buffer(s).
1765          */
1766         if (unlikely(!length)) {        /* Is there data too? */
1767                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1768                              "No Data buffer in this packet.\n");
1769                 return skb;
1770         }
1771
1772         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1773                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1774                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775                                      "Headers in small, data of %d bytes in small, combine them.\n",
1776                                      length);
1777                         /*
1778                          * Data is less than small buffer size so it's
1779                          * stuffed in a small buffer.
1780                          * For this case we append the data
1781                          * from the "data" small buffer to the "header" small
1782                          * buffer.
1783                          */
1784                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1785                         pci_dma_sync_single_for_cpu(qdev->pdev,
1786                                                     dma_unmap_addr
1787                                                     (sbq_desc, mapaddr),
1788                                                     dma_unmap_len
1789                                                     (sbq_desc, maplen),
1790                                                     PCI_DMA_FROMDEVICE);
1791                         memcpy(skb_put(skb, length),
1792                                sbq_desc->p.skb->data, length);
1793                         pci_dma_sync_single_for_device(qdev->pdev,
1794                                                        dma_unmap_addr
1795                                                        (sbq_desc,
1796                                                         mapaddr),
1797                                                        dma_unmap_len
1798                                                        (sbq_desc,
1799                                                         maplen),
1800                                                        PCI_DMA_FROMDEVICE);
1801                 } else {
1802                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1803                                      "%d bytes in a single small buffer.\n",
1804                                      length);
1805                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1806                         skb = sbq_desc->p.skb;
1807                         ql_realign_skb(skb, length);
1808                         skb_put(skb, length);
1809                         pci_unmap_single(qdev->pdev,
1810                                          dma_unmap_addr(sbq_desc,
1811                                                         mapaddr),
1812                                          dma_unmap_len(sbq_desc,
1813                                                        maplen),
1814                                          PCI_DMA_FROMDEVICE);
1815                         sbq_desc->p.skb = NULL;
1816                 }
1817         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1818                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1819                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820                                      "Header in small, %d bytes in large. Chain large to small!\n",
1821                                      length);
1822                         /*
1823                          * The data is in a single large buffer.  We
1824                          * chain it to the header buffer's skb and let
1825                          * it rip.
1826                          */
1827                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1828                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1830                                      lbq_desc->p.pg_chunk.offset, length);
1831                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1832                                                 lbq_desc->p.pg_chunk.offset,
1833                                                 length);
1834                         skb->len += length;
1835                         skb->data_len += length;
1836                         skb->truesize += length;
1837                 } else {
1838                         /*
1839                          * The headers and data are in a single large buffer. We
1840                          * copy it to a new skb and let it go. This can happen with
1841                          * jumbo mtu on a non-TCP/UDP frame.
1842                          */
1843                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1844                         skb = netdev_alloc_skb(qdev->ndev, length);
1845                         if (skb == NULL) {
1846                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1847                                              "No skb available, drop the packet.\n");
1848                                 return NULL;
1849                         }
1850                         pci_unmap_page(qdev->pdev,
1851                                        dma_unmap_addr(lbq_desc,
1852                                                       mapaddr),
1853                                        dma_unmap_len(lbq_desc, maplen),
1854                                        PCI_DMA_FROMDEVICE);
1855                         skb_reserve(skb, NET_IP_ALIGN);
1856                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1858                                      length);
1859                         skb_fill_page_desc(skb, 0,
1860                                                 lbq_desc->p.pg_chunk.page,
1861                                                 lbq_desc->p.pg_chunk.offset,
1862                                                 length);
1863                         skb->len += length;
1864                         skb->data_len += length;
1865                         skb->truesize += length;
1866                         length -= length;
1867                         __pskb_pull_tail(skb,
1868                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1869                                 VLAN_ETH_HLEN : ETH_HLEN);
1870                 }
1871         } else {
1872                 /*
1873                  * The data is in a chain of large buffers
1874                  * pointed to by a small buffer.  We loop
1875                  * thru and chain them to the our small header
1876                  * buffer's skb.
1877                  * frags:  There are 18 max frags and our small
1878                  *         buffer will hold 32 of them. The thing is,
1879                  *         we'll use 3 max for our 9000 byte jumbo
1880                  *         frames.  If the MTU goes up we could
1881                  *          eventually be in trouble.
1882                  */
1883                 int size, i = 0;
1884                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1885                 pci_unmap_single(qdev->pdev,
1886                                  dma_unmap_addr(sbq_desc, mapaddr),
1887                                  dma_unmap_len(sbq_desc, maplen),
1888                                  PCI_DMA_FROMDEVICE);
1889                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1890                         /*
1891                          * This is an non TCP/UDP IP frame, so
1892                          * the headers aren't split into a small
1893                          * buffer.  We have to use the small buffer
1894                          * that contains our sg list as our skb to
1895                          * send upstairs. Copy the sg list here to
1896                          * a local buffer and use it to find the
1897                          * pages to chain.
1898                          */
1899                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1900                                      "%d bytes of headers & data in chain of large.\n",
1901                                      length);
1902                         skb = sbq_desc->p.skb;
1903                         sbq_desc->p.skb = NULL;
1904                         skb_reserve(skb, NET_IP_ALIGN);
1905                 }
1906                 while (length > 0) {
1907                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1908                         size = (length < rx_ring->lbq_buf_size) ? length :
1909                                 rx_ring->lbq_buf_size;
1910
1911                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1912                                      "Adding page %d to skb for %d bytes.\n",
1913                                      i, size);
1914                         skb_fill_page_desc(skb, i,
1915                                                 lbq_desc->p.pg_chunk.page,
1916                                                 lbq_desc->p.pg_chunk.offset,
1917                                                 size);
1918                         skb->len += size;
1919                         skb->data_len += size;
1920                         skb->truesize += size;
1921                         length -= size;
1922                         i++;
1923                 }
1924                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1925                                 VLAN_ETH_HLEN : ETH_HLEN);
1926         }
1927         return skb;
1928 }
1929
1930 /* Process an inbound completion from an rx ring. */
1931 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1932                                    struct rx_ring *rx_ring,
1933                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1934                                    u16 vlan_id)
1935 {
1936         struct net_device *ndev = qdev->ndev;
1937         struct sk_buff *skb = NULL;
1938
1939         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1940
1941         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1942         if (unlikely(!skb)) {
1943                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1944                              "No skb available, drop packet.\n");
1945                 rx_ring->rx_dropped++;
1946                 return;
1947         }
1948
1949         /* Frame error, so drop the packet. */
1950         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1951                 netif_info(qdev, drv, qdev->ndev,
1952                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1953                 dev_kfree_skb_any(skb);
1954                 rx_ring->rx_errors++;
1955                 return;
1956         }
1957
1958         /* The max framesize filter on this chip is set higher than
1959          * MTU since FCoE uses 2k frames.
1960          */
1961         if (skb->len > ndev->mtu + ETH_HLEN) {
1962                 dev_kfree_skb_any(skb);
1963                 rx_ring->rx_dropped++;
1964                 return;
1965         }
1966
1967         /* loopback self test for ethtool */
1968         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1969                 ql_check_lb_frame(qdev, skb);
1970                 dev_kfree_skb_any(skb);
1971                 return;
1972         }
1973
1974         prefetch(skb->data);
1975         skb->dev = ndev;
1976         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1977                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1978                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1979                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1980                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1981                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1982                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1983                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1984                 rx_ring->rx_multicast++;
1985         }
1986         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1987                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1988                              "Promiscuous Packet.\n");
1989         }
1990
1991         skb->protocol = eth_type_trans(skb, ndev);
1992         skb_checksum_none_assert(skb);
1993
1994         /* If rx checksum is on, and there are no
1995          * csum or frame errors.
1996          */
1997         if ((ndev->features & NETIF_F_RXCSUM) &&
1998                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1999                 /* TCP frame. */
2000                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2001                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2002                                      "TCP checksum done!\n");
2003                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2004                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2005                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2006                 /* Unfragmented ipv4 UDP frame. */
2007                         struct iphdr *iph = (struct iphdr *) skb->data;
2008                         if (!(iph->frag_off &
2009                                 ntohs(IP_MF|IP_OFFSET))) {
2010                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2011                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012                                              "TCP checksum done!\n");
2013                         }
2014                 }
2015         }
2016
2017         rx_ring->rx_packets++;
2018         rx_ring->rx_bytes += skb->len;
2019         skb_record_rx_queue(skb, rx_ring->cq_id);
2020         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2021                 __vlan_hwaccel_put_tag(skb, vlan_id);
2022         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2023                 napi_gro_receive(&rx_ring->napi, skb);
2024         else
2025                 netif_receive_skb(skb);
2026 }
2027
2028 /* Process an inbound completion from an rx ring. */
2029 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2030                                         struct rx_ring *rx_ring,
2031                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2032 {
2033         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2034         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2035                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2036                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2037
2038         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2039
2040         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2041                 /* The data and headers are split into
2042                  * separate buffers.
2043                  */
2044                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2045                                                 vlan_id);
2046         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2047                 /* The data fit in a single small buffer.
2048                  * Allocate a new skb, copy the data and
2049                  * return the buffer to the free pool.
2050                  */
2051                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2052                                                 length, vlan_id);
2053         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2054                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2055                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2056                 /* TCP packet in a page chunk that's been checksummed.
2057                  * Tack it on to our GRO skb and let it go.
2058                  */
2059                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2060                                                 length, vlan_id);
2061         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2062                 /* Non-TCP packet in a page chunk. Allocate an
2063                  * skb, tack it on frags, and send it up.
2064                  */
2065                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2066                                                 length, vlan_id);
2067         } else {
2068                 /* Non-TCP/UDP large frames that span multiple buffers
2069                  * can be processed corrrectly by the split frame logic.
2070                  */
2071                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2072                                                 vlan_id);
2073         }
2074
2075         return (unsigned long)length;
2076 }
2077
2078 /* Process an outbound completion from an rx ring. */
2079 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2080                                    struct ob_mac_iocb_rsp *mac_rsp)
2081 {
2082         struct tx_ring *tx_ring;
2083         struct tx_ring_desc *tx_ring_desc;
2084
2085         QL_DUMP_OB_MAC_RSP(mac_rsp);
2086         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2087         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2088         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2089         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2090         tx_ring->tx_packets++;
2091         dev_kfree_skb(tx_ring_desc->skb);
2092         tx_ring_desc->skb = NULL;
2093
2094         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2095                                         OB_MAC_IOCB_RSP_S |
2096                                         OB_MAC_IOCB_RSP_L |
2097                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2098                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2099                         netif_warn(qdev, tx_done, qdev->ndev,
2100                                    "Total descriptor length did not match transfer length.\n");
2101                 }
2102                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2103                         netif_warn(qdev, tx_done, qdev->ndev,
2104                                    "Frame too short to be valid, not sent.\n");
2105                 }
2106                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2107                         netif_warn(qdev, tx_done, qdev->ndev,
2108                                    "Frame too long, but sent anyway.\n");
2109                 }
2110                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2111                         netif_warn(qdev, tx_done, qdev->ndev,
2112                                    "PCI backplane error. Frame not sent.\n");
2113                 }
2114         }
2115         atomic_inc(&tx_ring->tx_count);
2116 }
2117
2118 /* Fire up a handler to reset the MPI processor. */
2119 void ql_queue_fw_error(struct ql_adapter *qdev)
2120 {
2121         ql_link_off(qdev);
2122         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2123 }
2124
2125 void ql_queue_asic_error(struct ql_adapter *qdev)
2126 {
2127         ql_link_off(qdev);
2128         ql_disable_interrupts(qdev);
2129         /* Clear adapter up bit to signal the recovery
2130          * process that it shouldn't kill the reset worker
2131          * thread
2132          */
2133         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2134         /* Set asic recovery bit to indicate reset process that we are
2135          * in fatal error recovery process rather than normal close
2136          */
2137         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2138         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2139 }
2140
2141 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2142                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2143 {
2144         switch (ib_ae_rsp->event) {
2145         case MGMT_ERR_EVENT:
2146                 netif_err(qdev, rx_err, qdev->ndev,
2147                           "Management Processor Fatal Error.\n");
2148                 ql_queue_fw_error(qdev);
2149                 return;
2150
2151         case CAM_LOOKUP_ERR_EVENT:
2152                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2153                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2154                 ql_queue_asic_error(qdev);
2155                 return;
2156
2157         case SOFT_ECC_ERROR_EVENT:
2158                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2159                 ql_queue_asic_error(qdev);
2160                 break;
2161
2162         case PCI_ERR_ANON_BUF_RD:
2163                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2164                                         "anonymous buffers from rx_ring %d.\n",
2165                                         ib_ae_rsp->q_id);
2166                 ql_queue_asic_error(qdev);
2167                 break;
2168
2169         default:
2170                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2171                           ib_ae_rsp->event);
2172                 ql_queue_asic_error(qdev);
2173                 break;
2174         }
2175 }
2176
2177 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2178 {
2179         struct ql_adapter *qdev = rx_ring->qdev;
2180         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2181         struct ob_mac_iocb_rsp *net_rsp = NULL;
2182         int count = 0;
2183
2184         struct tx_ring *tx_ring;
2185         /* While there are entries in the completion queue. */
2186         while (prod != rx_ring->cnsmr_idx) {
2187
2188                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2189                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2190                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2191
2192                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2193                 rmb();
2194                 switch (net_rsp->opcode) {
2195
2196                 case OPCODE_OB_MAC_TSO_IOCB:
2197                 case OPCODE_OB_MAC_IOCB:
2198                         ql_process_mac_tx_intr(qdev, net_rsp);
2199                         break;
2200                 default:
2201                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2202                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2203                                      net_rsp->opcode);
2204                 }
2205                 count++;
2206                 ql_update_cq(rx_ring);
2207                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2208         }
2209         if (!net_rsp)
2210                 return 0;
2211         ql_write_cq_idx(rx_ring);
2212         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2213         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2214                 if (atomic_read(&tx_ring->queue_stopped) &&
2215                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2216                         /*
2217                          * The queue got stopped because the tx_ring was full.
2218                          * Wake it up, because it's now at least 25% empty.
2219                          */
2220                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2221         }
2222
2223         return count;
2224 }
2225
2226 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2227 {
2228         struct ql_adapter *qdev = rx_ring->qdev;
2229         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230         struct ql_net_rsp_iocb *net_rsp;
2231         int count = 0;
2232
2233         /* While there are entries in the completion queue. */
2234         while (prod != rx_ring->cnsmr_idx) {
2235
2236                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2237                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2238                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2239
2240                 net_rsp = rx_ring->curr_entry;
2241                 rmb();
2242                 switch (net_rsp->opcode) {
2243                 case OPCODE_IB_MAC_IOCB:
2244                         ql_process_mac_rx_intr(qdev, rx_ring,
2245                                                (struct ib_mac_iocb_rsp *)
2246                                                net_rsp);
2247                         break;
2248
2249                 case OPCODE_IB_AE_IOCB:
2250                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2251                                                 net_rsp);
2252                         break;
2253                 default:
2254                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2255                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2256                                      net_rsp->opcode);
2257                         break;
2258                 }
2259                 count++;
2260                 ql_update_cq(rx_ring);
2261                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2262                 if (count == budget)
2263                         break;
2264         }
2265         ql_update_buffer_queues(qdev, rx_ring);
2266         ql_write_cq_idx(rx_ring);
2267         return count;
2268 }
2269
2270 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2271 {
2272         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2273         struct ql_adapter *qdev = rx_ring->qdev;
2274         struct rx_ring *trx_ring;
2275         int i, work_done = 0;
2276         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2277
2278         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2280
2281         /* Service the TX rings first.  They start
2282          * right after the RSS rings. */
2283         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2284                 trx_ring = &qdev->rx_ring[i];
2285                 /* If this TX completion ring belongs to this vector and
2286                  * it's not empty then service it.
2287                  */
2288                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2289                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2290                                         trx_ring->cnsmr_idx)) {
2291                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2292                                      "%s: Servicing TX completion ring %d.\n",
2293                                      __func__, trx_ring->cq_id);
2294                         ql_clean_outbound_rx_ring(trx_ring);
2295                 }
2296         }
2297
2298         /*
2299          * Now service the RSS ring if it's active.
2300          */
2301         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2302                                         rx_ring->cnsmr_idx) {
2303                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2304                              "%s: Servicing RX completion ring %d.\n",
2305                              __func__, rx_ring->cq_id);
2306                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2307         }
2308
2309         if (work_done < budget) {
2310                 napi_complete(napi);
2311                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2312         }
2313         return work_done;
2314 }
2315
2316 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2317 {
2318         struct ql_adapter *qdev = netdev_priv(ndev);
2319
2320         if (features & NETIF_F_HW_VLAN_RX) {
2321                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2322                              "Turning on VLAN in NIC_RCV_CFG.\n");
2323                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2324                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2325         } else {
2326                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2327                              "Turning off VLAN in NIC_RCV_CFG.\n");
2328                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2329         }
2330 }
2331
2332 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2333 {
2334         /*
2335          * Since there is no support for separate rx/tx vlan accel
2336          * enable/disable make sure tx flag is always in same state as rx.
2337          */
2338         if (features & NETIF_F_HW_VLAN_RX)
2339                 features |= NETIF_F_HW_VLAN_TX;
2340         else
2341                 features &= ~NETIF_F_HW_VLAN_TX;
2342
2343         return features;
2344 }
2345
2346 static int qlge_set_features(struct net_device *ndev, u32 features)
2347 {
2348         u32 changed = ndev->features ^ features;
2349
2350         if (changed & NETIF_F_HW_VLAN_RX)
2351                 qlge_vlan_mode(ndev, features);
2352
2353         return 0;
2354 }
2355
2356 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2357 {
2358         u32 enable_bit = MAC_ADDR_E;
2359
2360         if (ql_set_mac_addr_reg
2361             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2362                 netif_err(qdev, ifup, qdev->ndev,
2363                           "Failed to init vlan address.\n");
2364         }
2365 }
2366
2367 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2368 {
2369         struct ql_adapter *qdev = netdev_priv(ndev);
2370         int status;
2371
2372         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373         if (status)
2374                 return;
2375
2376         __qlge_vlan_rx_add_vid(qdev, vid);
2377         set_bit(vid, qdev->active_vlans);
2378
2379         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2380 }
2381
2382 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2383 {
2384         u32 enable_bit = 0;
2385
2386         if (ql_set_mac_addr_reg
2387             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2388                 netif_err(qdev, ifup, qdev->ndev,
2389                           "Failed to clear vlan address.\n");
2390         }
2391 }
2392
2393 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2394 {
2395         struct ql_adapter *qdev = netdev_priv(ndev);
2396         int status;
2397
2398         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2399         if (status)
2400                 return;
2401
2402         __qlge_vlan_rx_kill_vid(qdev, vid);
2403         clear_bit(vid, qdev->active_vlans);
2404
2405         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2406 }
2407
2408 static void qlge_restore_vlan(struct ql_adapter *qdev)
2409 {
2410         int status;
2411         u16 vid;
2412
2413         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2414         if (status)
2415                 return;
2416
2417         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2418                 __qlge_vlan_rx_add_vid(qdev, vid);
2419
2420         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2421 }
2422
2423 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2424 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2425 {
2426         struct rx_ring *rx_ring = dev_id;
2427         napi_schedule(&rx_ring->napi);
2428         return IRQ_HANDLED;
2429 }
2430
2431 /* This handles a fatal error, MPI activity, and the default
2432  * rx_ring in an MSI-X multiple vector environment.
2433  * In MSI/Legacy environment it also process the rest of
2434  * the rx_rings.
2435  */
2436 static irqreturn_t qlge_isr(int irq, void *dev_id)
2437 {
2438         struct rx_ring *rx_ring = dev_id;
2439         struct ql_adapter *qdev = rx_ring->qdev;
2440         struct intr_context *intr_context = &qdev->intr_context[0];
2441         u32 var;
2442         int work_done = 0;
2443
2444         spin_lock(&qdev->hw_lock);
2445         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2446                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2447                              "Shared Interrupt, Not ours!\n");
2448                 spin_unlock(&qdev->hw_lock);
2449                 return IRQ_NONE;
2450         }
2451         spin_unlock(&qdev->hw_lock);
2452
2453         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2454
2455         /*
2456          * Check for fatal error.
2457          */
2458         if (var & STS_FE) {
2459                 ql_queue_asic_error(qdev);
2460                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2461                 var = ql_read32(qdev, ERR_STS);
2462                 netdev_err(qdev->ndev, "Resetting chip. "
2463                                         "Error Status Register = 0x%x\n", var);
2464                 return IRQ_HANDLED;
2465         }
2466
2467         /*
2468          * Check MPI processor activity.
2469          */
2470         if ((var & STS_PI) &&
2471                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2472                 /*
2473                  * We've got an async event or mailbox completion.
2474                  * Handle it and clear the source of the interrupt.
2475                  */
2476                 netif_err(qdev, intr, qdev->ndev,
2477                           "Got MPI processor interrupt.\n");
2478                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2479                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2480                 queue_delayed_work_on(smp_processor_id(),
2481                                 qdev->workqueue, &qdev->mpi_work, 0);
2482                 work_done++;
2483         }
2484
2485         /*
2486          * Get the bit-mask that shows the active queues for this
2487          * pass.  Compare it to the queues that this irq services
2488          * and call napi if there's a match.
2489          */
2490         var = ql_read32(qdev, ISR1);
2491         if (var & intr_context->irq_mask) {
2492                 netif_info(qdev, intr, qdev->ndev,
2493                            "Waking handler for rx_ring[0].\n");
2494                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2495                 napi_schedule(&rx_ring->napi);
2496                 work_done++;
2497         }
2498         ql_enable_completion_interrupt(qdev, intr_context->intr);
2499         return work_done ? IRQ_HANDLED : IRQ_NONE;
2500 }
2501
2502 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2503 {
2504
2505         if (skb_is_gso(skb)) {
2506                 int err;
2507                 if (skb_header_cloned(skb)) {
2508                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2509                         if (err)
2510                                 return err;
2511                 }
2512
2513                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2515                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2516                 mac_iocb_ptr->total_hdrs_len =
2517                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2518                 mac_iocb_ptr->net_trans_offset =
2519                     cpu_to_le16(skb_network_offset(skb) |
2520                                 skb_transport_offset(skb)
2521                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2522                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2523                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2524                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2525                         struct iphdr *iph = ip_hdr(skb);
2526                         iph->check = 0;
2527                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2528                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2529                                                                  iph->daddr, 0,
2530                                                                  IPPROTO_TCP,
2531                                                                  0);
2532                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2533                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2534                         tcp_hdr(skb)->check =
2535                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2536                                              &ipv6_hdr(skb)->daddr,
2537                                              0, IPPROTO_TCP, 0);
2538                 }
2539                 return 1;
2540         }
2541         return 0;
2542 }
2543
2544 static void ql_hw_csum_setup(struct sk_buff *skb,
2545                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2546 {
2547         int len;
2548         struct iphdr *iph = ip_hdr(skb);
2549         __sum16 *check;
2550         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2551         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2552         mac_iocb_ptr->net_trans_offset =
2553                 cpu_to_le16(skb_network_offset(skb) |
2554                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2555
2556         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2557         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2558         if (likely(iph->protocol == IPPROTO_TCP)) {
2559                 check = &(tcp_hdr(skb)->check);
2560                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2561                 mac_iocb_ptr->total_hdrs_len =
2562                     cpu_to_le16(skb_transport_offset(skb) +
2563                                 (tcp_hdr(skb)->doff << 2));
2564         } else {
2565                 check = &(udp_hdr(skb)->check);
2566                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2567                 mac_iocb_ptr->total_hdrs_len =
2568                     cpu_to_le16(skb_transport_offset(skb) +
2569                                 sizeof(struct udphdr));
2570         }
2571         *check = ~csum_tcpudp_magic(iph->saddr,
2572                                     iph->daddr, len, iph->protocol, 0);
2573 }
2574
2575 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2576 {
2577         struct tx_ring_desc *tx_ring_desc;
2578         struct ob_mac_iocb_req *mac_iocb_ptr;
2579         struct ql_adapter *qdev = netdev_priv(ndev);
2580         int tso;
2581         struct tx_ring *tx_ring;
2582         u32 tx_ring_idx = (u32) skb->queue_mapping;
2583
2584         tx_ring = &qdev->tx_ring[tx_ring_idx];
2585
2586         if (skb_padto(skb, ETH_ZLEN))
2587                 return NETDEV_TX_OK;
2588
2589         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2590                 netif_info(qdev, tx_queued, qdev->ndev,
2591                            "%s: shutting down tx queue %d du to lack of resources.\n",
2592                            __func__, tx_ring_idx);
2593                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2594                 atomic_inc(&tx_ring->queue_stopped);
2595                 tx_ring->tx_errors++;
2596                 return NETDEV_TX_BUSY;
2597         }
2598         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2599         mac_iocb_ptr = tx_ring_desc->queue_entry;
2600         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2601
2602         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2603         mac_iocb_ptr->tid = tx_ring_desc->index;
2604         /* We use the upper 32-bits to store the tx queue for this IO.
2605          * When we get the completion we can use it to establish the context.
2606          */
2607         mac_iocb_ptr->txq_idx = tx_ring_idx;
2608         tx_ring_desc->skb = skb;
2609
2610         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2611
2612         if (vlan_tx_tag_present(skb)) {
2613                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2614                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2615                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2616                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2617         }
2618         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2619         if (tso < 0) {
2620                 dev_kfree_skb_any(skb);
2621                 return NETDEV_TX_OK;
2622         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2623                 ql_hw_csum_setup(skb,
2624                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2625         }
2626         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2627                         NETDEV_TX_OK) {
2628                 netif_err(qdev, tx_queued, qdev->ndev,
2629                           "Could not map the segments.\n");
2630                 tx_ring->tx_errors++;
2631                 return NETDEV_TX_BUSY;
2632         }
2633         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2634         tx_ring->prod_idx++;
2635         if (tx_ring->prod_idx == tx_ring->wq_len)
2636                 tx_ring->prod_idx = 0;
2637         wmb();
2638
2639         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2640         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2641                      "tx queued, slot %d, len %d\n",
2642                      tx_ring->prod_idx, skb->len);
2643
2644         atomic_dec(&tx_ring->tx_count);
2645         return NETDEV_TX_OK;
2646 }
2647
2648
2649 static void ql_free_shadow_space(struct ql_adapter *qdev)
2650 {
2651         if (qdev->rx_ring_shadow_reg_area) {
2652                 pci_free_consistent(qdev->pdev,
2653                                     PAGE_SIZE,
2654                                     qdev->rx_ring_shadow_reg_area,
2655                                     qdev->rx_ring_shadow_reg_dma);
2656                 qdev->rx_ring_shadow_reg_area = NULL;
2657         }
2658         if (qdev->tx_ring_shadow_reg_area) {
2659                 pci_free_consistent(qdev->pdev,
2660                                     PAGE_SIZE,
2661                                     qdev->tx_ring_shadow_reg_area,
2662                                     qdev->tx_ring_shadow_reg_dma);
2663                 qdev->tx_ring_shadow_reg_area = NULL;
2664         }
2665 }
2666
2667 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2668 {
2669         qdev->rx_ring_shadow_reg_area =
2670             pci_alloc_consistent(qdev->pdev,
2671                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2672         if (qdev->rx_ring_shadow_reg_area == NULL) {
2673                 netif_err(qdev, ifup, qdev->ndev,
2674                           "Allocation of RX shadow space failed.\n");
2675                 return -ENOMEM;
2676         }
2677         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2678         qdev->tx_ring_shadow_reg_area =
2679             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2680                                  &qdev->tx_ring_shadow_reg_dma);
2681         if (qdev->tx_ring_shadow_reg_area == NULL) {
2682                 netif_err(qdev, ifup, qdev->ndev,
2683                           "Allocation of TX shadow space failed.\n");
2684                 goto err_wqp_sh_area;
2685         }
2686         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2687         return 0;
2688
2689 err_wqp_sh_area:
2690         pci_free_consistent(qdev->pdev,
2691                             PAGE_SIZE,
2692                             qdev->rx_ring_shadow_reg_area,
2693                             qdev->rx_ring_shadow_reg_dma);
2694         return -ENOMEM;
2695 }
2696
2697 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2698 {
2699         struct tx_ring_desc *tx_ring_desc;
2700         int i;
2701         struct ob_mac_iocb_req *mac_iocb_ptr;
2702
2703         mac_iocb_ptr = tx_ring->wq_base;
2704         tx_ring_desc = tx_ring->q;
2705         for (i = 0; i < tx_ring->wq_len; i++) {
2706                 tx_ring_desc->index = i;
2707                 tx_ring_desc->skb = NULL;
2708                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2709                 mac_iocb_ptr++;
2710                 tx_ring_desc++;
2711         }
2712         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2713         atomic_set(&tx_ring->queue_stopped, 0);
2714 }
2715
2716 static void ql_free_tx_resources(struct ql_adapter *qdev,
2717                                  struct tx_ring *tx_ring)
2718 {
2719         if (tx_ring->wq_base) {
2720                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2721                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2722                 tx_ring->wq_base = NULL;
2723         }
2724         kfree(tx_ring->q);
2725         tx_ring->q = NULL;
2726 }
2727
2728 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2729                                  struct tx_ring *tx_ring)
2730 {
2731         tx_ring->wq_base =
2732             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2733                                  &tx_ring->wq_base_dma);
2734
2735         if ((tx_ring->wq_base == NULL) ||
2736             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2737                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2738                 return -ENOMEM;
2739         }
2740         tx_ring->q =
2741             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2742         if (tx_ring->q == NULL)
2743                 goto err;
2744
2745         return 0;
2746 err:
2747         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2748                             tx_ring->wq_base, tx_ring->wq_base_dma);
2749         return -ENOMEM;
2750 }
2751
2752 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2753 {
2754         struct bq_desc *lbq_desc;
2755
2756         uint32_t  curr_idx, clean_idx;
2757
2758         curr_idx = rx_ring->lbq_curr_idx;
2759         clean_idx = rx_ring->lbq_clean_idx;
2760         while (curr_idx != clean_idx) {
2761                 lbq_desc = &rx_ring->lbq[curr_idx];
2762
2763                 if (lbq_desc->p.pg_chunk.last_flag) {
2764                         pci_unmap_page(qdev->pdev,
2765                                 lbq_desc->p.pg_chunk.map,
2766                                 ql_lbq_block_size(qdev),
2767                                        PCI_DMA_FROMDEVICE);
2768                         lbq_desc->p.pg_chunk.last_flag = 0;
2769                 }
2770
2771                 put_page(lbq_desc->p.pg_chunk.page);
2772                 lbq_desc->p.pg_chunk.page = NULL;
2773
2774                 if (++curr_idx == rx_ring->lbq_len)
2775                         curr_idx = 0;
2776
2777         }
2778 }
2779
2780 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2781 {
2782         int i;
2783         struct bq_desc *sbq_desc;
2784
2785         for (i = 0; i < rx_ring->sbq_len; i++) {
2786                 sbq_desc = &rx_ring->sbq[i];
2787                 if (sbq_desc == NULL) {
2788                         netif_err(qdev, ifup, qdev->ndev,
2789                                   "sbq_desc %d is NULL.\n", i);
2790                         return;
2791                 }
2792                 if (sbq_desc->p.skb) {
2793                         pci_unmap_single(qdev->pdev,
2794                                          dma_unmap_addr(sbq_desc, mapaddr),
2795                                          dma_unmap_len(sbq_desc, maplen),
2796                                          PCI_DMA_FROMDEVICE);
2797                         dev_kfree_skb(sbq_desc->p.skb);
2798                         sbq_desc->p.skb = NULL;
2799                 }
2800         }
2801 }
2802
2803 /* Free all large and small rx buffers associated
2804  * with the completion queues for this device.
2805  */
2806 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2807 {
2808         int i;
2809         struct rx_ring *rx_ring;
2810
2811         for (i = 0; i < qdev->rx_ring_count; i++) {
2812                 rx_ring = &qdev->rx_ring[i];
2813                 if (rx_ring->lbq)
2814                         ql_free_lbq_buffers(qdev, rx_ring);
2815                 if (rx_ring->sbq)
2816                         ql_free_sbq_buffers(qdev, rx_ring);
2817         }
2818 }
2819
2820 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2821 {
2822         struct rx_ring *rx_ring;
2823         int i;
2824
2825         for (i = 0; i < qdev->rx_ring_count; i++) {
2826                 rx_ring = &qdev->rx_ring[i];
2827                 if (rx_ring->type != TX_Q)
2828                         ql_update_buffer_queues(qdev, rx_ring);
2829         }
2830 }
2831
2832 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2833                                 struct rx_ring *rx_ring)
2834 {
2835         int i;
2836         struct bq_desc *lbq_desc;
2837         __le64 *bq = rx_ring->lbq_base;
2838
2839         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2840         for (i = 0; i < rx_ring->lbq_len; i++) {
2841                 lbq_desc = &rx_ring->lbq[i];
2842                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2843                 lbq_desc->index = i;
2844                 lbq_desc->addr = bq;
2845                 bq++;
2846         }
2847 }
2848
2849 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2850                                 struct rx_ring *rx_ring)
2851 {
2852         int i;
2853         struct bq_desc *sbq_desc;
2854         __le64 *bq = rx_ring->sbq_base;
2855
2856         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2857         for (i = 0; i < rx_ring->sbq_len; i++) {
2858                 sbq_desc = &rx_ring->sbq[i];
2859                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2860                 sbq_desc->index = i;
2861                 sbq_desc->addr = bq;
2862                 bq++;
2863         }
2864 }
2865
2866 static void ql_free_rx_resources(struct ql_adapter *qdev,
2867                                  struct rx_ring *rx_ring)
2868 {
2869         /* Free the small buffer queue. */
2870         if (rx_ring->sbq_base) {
2871                 pci_free_consistent(qdev->pdev,
2872                                     rx_ring->sbq_size,
2873                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2874                 rx_ring->sbq_base = NULL;
2875         }
2876
2877         /* Free the small buffer queue control blocks. */
2878         kfree(rx_ring->sbq);
2879         rx_ring->sbq = NULL;
2880
2881         /* Free the large buffer queue. */
2882         if (rx_ring->lbq_base) {
2883                 pci_free_consistent(qdev->pdev,
2884                                     rx_ring->lbq_size,
2885                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2886                 rx_ring->lbq_base = NULL;
2887         }
2888
2889         /* Free the large buffer queue control blocks. */
2890         kfree(rx_ring->lbq);
2891         rx_ring->lbq = NULL;
2892
2893         /* Free the rx queue. */
2894         if (rx_ring->cq_base) {
2895                 pci_free_consistent(qdev->pdev,
2896                                     rx_ring->cq_size,
2897                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2898                 rx_ring->cq_base = NULL;
2899         }
2900 }
2901
2902 /* Allocate queues and buffers for this completions queue based
2903  * on the values in the parameter structure. */
2904 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2905                                  struct rx_ring *rx_ring)
2906 {
2907
2908         /*
2909          * Allocate the completion queue for this rx_ring.
2910          */
2911         rx_ring->cq_base =
2912             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2913                                  &rx_ring->cq_base_dma);
2914
2915         if (rx_ring->cq_base == NULL) {
2916                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2917                 return -ENOMEM;
2918         }
2919
2920         if (rx_ring->sbq_len) {
2921                 /*
2922                  * Allocate small buffer queue.
2923                  */
2924                 rx_ring->sbq_base =
2925                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2926                                          &rx_ring->sbq_base_dma);
2927
2928                 if (rx_ring->sbq_base == NULL) {
2929                         netif_err(qdev, ifup, qdev->ndev,
2930                                   "Small buffer queue allocation failed.\n");
2931                         goto err_mem;
2932                 }
2933
2934                 /*
2935                  * Allocate small buffer queue control blocks.
2936                  */
2937                 rx_ring->sbq =
2938                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2939                             GFP_KERNEL);
2940                 if (rx_ring->sbq == NULL) {
2941                         netif_err(qdev, ifup, qdev->ndev,
2942                                   "Small buffer queue control block allocation failed.\n");
2943                         goto err_mem;
2944                 }
2945
2946                 ql_init_sbq_ring(qdev, rx_ring);
2947         }
2948
2949         if (rx_ring->lbq_len) {
2950                 /*
2951                  * Allocate large buffer queue.
2952                  */
2953                 rx_ring->lbq_base =
2954                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2955                                          &rx_ring->lbq_base_dma);
2956
2957                 if (rx_ring->lbq_base == NULL) {
2958                         netif_err(qdev, ifup, qdev->ndev,
2959                                   "Large buffer queue allocation failed.\n");
2960                         goto err_mem;
2961                 }
2962                 /*
2963                  * Allocate large buffer queue control blocks.
2964                  */
2965                 rx_ring->lbq =
2966                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2967                             GFP_KERNEL);
2968                 if (rx_ring->lbq == NULL) {
2969                         netif_err(qdev, ifup, qdev->ndev,
2970                                   "Large buffer queue control block allocation failed.\n");
2971                         goto err_mem;
2972                 }
2973
2974                 ql_init_lbq_ring(qdev, rx_ring);
2975         }
2976
2977         return 0;
2978
2979 err_mem:
2980         ql_free_rx_resources(qdev, rx_ring);
2981         return -ENOMEM;
2982 }
2983
2984 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2985 {
2986         struct tx_ring *tx_ring;
2987         struct tx_ring_desc *tx_ring_desc;
2988         int i, j;
2989
2990         /*
2991          * Loop through all queues and free
2992          * any resources.
2993          */
2994         for (j = 0; j < qdev->tx_ring_count; j++) {
2995                 tx_ring = &qdev->tx_ring[j];
2996                 for (i = 0; i < tx_ring->wq_len; i++) {
2997                         tx_ring_desc = &tx_ring->q[i];
2998                         if (tx_ring_desc && tx_ring_desc->skb) {
2999                                 netif_err(qdev, ifdown, qdev->ndev,
3000                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
3001                                           tx_ring_desc->skb, j,
3002                                           tx_ring_desc->index);
3003                                 ql_unmap_send(qdev, tx_ring_desc,
3004                                               tx_ring_desc->map_cnt);
3005                                 dev_kfree_skb(tx_ring_desc->skb);
3006                                 tx_ring_desc->skb = NULL;
3007                         }
3008                 }
3009         }
3010 }
3011
3012 static void ql_free_mem_resources(struct ql_adapter *qdev)
3013 {
3014         int i;
3015
3016         for (i = 0; i < qdev->tx_ring_count; i++)
3017                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3018         for (i = 0; i < qdev->rx_ring_count; i++)
3019                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3020         ql_free_shadow_space(qdev);
3021 }
3022
3023 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3024 {
3025         int i;
3026
3027         /* Allocate space for our shadow registers and such. */
3028         if (ql_alloc_shadow_space(qdev))
3029                 return -ENOMEM;
3030
3031         for (i = 0; i < qdev->rx_ring_count; i++) {
3032                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3033                         netif_err(qdev, ifup, qdev->ndev,
3034                                   "RX resource allocation failed.\n");
3035                         goto err_mem;
3036                 }
3037         }
3038         /* Allocate tx queue resources */
3039         for (i = 0; i < qdev->tx_ring_count; i++) {
3040                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3041                         netif_err(qdev, ifup, qdev->ndev,
3042                                   "TX resource allocation failed.\n");
3043                         goto err_mem;
3044                 }
3045         }
3046         return 0;
3047
3048 err_mem:
3049         ql_free_mem_resources(qdev);
3050         return -ENOMEM;
3051 }
3052
3053 /* Set up the rx ring control block and pass it to the chip.
3054  * The control block is defined as
3055  * "Completion Queue Initialization Control Block", or cqicb.
3056  */
3057 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3058 {
3059         struct cqicb *cqicb = &rx_ring->cqicb;
3060         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3061                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3062         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3063                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3064         void __iomem *doorbell_area =
3065             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3066         int err = 0;
3067         u16 bq_len;
3068         u64 tmp;
3069         __le64 *base_indirect_ptr;
3070         int page_entries;
3071
3072         /* Set up the shadow registers for this ring. */
3073         rx_ring->prod_idx_sh_reg = shadow_reg;
3074         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3075         *rx_ring->prod_idx_sh_reg = 0;
3076         shadow_reg += sizeof(u64);
3077         shadow_reg_dma += sizeof(u64);
3078         rx_ring->lbq_base_indirect = shadow_reg;
3079         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3080         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3081         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3082         rx_ring->sbq_base_indirect = shadow_reg;
3083         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3084
3085         /* PCI doorbell mem area + 0x00 for consumer index register */
3086         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3087         rx_ring->cnsmr_idx = 0;
3088         rx_ring->curr_entry = rx_ring->cq_base;
3089
3090         /* PCI doorbell mem area + 0x04 for valid register */
3091         rx_ring->valid_db_reg = doorbell_area + 0x04;
3092
3093         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3094         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3095
3096         /* PCI doorbell mem area + 0x1c */
3097         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3098
3099         memset((void *)cqicb, 0, sizeof(struct cqicb));
3100         cqicb->msix_vect = rx_ring->irq;
3101
3102         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3103         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3104
3105         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3106
3107         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3108
3109         /*
3110          * Set up the control block load flags.
3111          */
3112         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3113             FLAGS_LV |          /* Load MSI-X vector */
3114             FLAGS_LI;           /* Load irq delay values */
3115         if (rx_ring->lbq_len) {
3116                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3117                 tmp = (u64)rx_ring->lbq_base_dma;
3118                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3119                 page_entries = 0;
3120                 do {
3121                         *base_indirect_ptr = cpu_to_le64(tmp);
3122                         tmp += DB_PAGE_SIZE;
3123                         base_indirect_ptr++;
3124                         page_entries++;
3125                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3126                 cqicb->lbq_addr =
3127                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3128                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3129                         (u16) rx_ring->lbq_buf_size;
3130                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3131                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3132                         (u16) rx_ring->lbq_len;
3133                 cqicb->lbq_len = cpu_to_le16(bq_len);
3134                 rx_ring->lbq_prod_idx = 0;
3135                 rx_ring->lbq_curr_idx = 0;
3136                 rx_ring->lbq_clean_idx = 0;
3137                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3138         }
3139         if (rx_ring->sbq_len) {
3140                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3141                 tmp = (u64)rx_ring->sbq_base_dma;
3142                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3143                 page_entries = 0;
3144                 do {
3145                         *base_indirect_ptr = cpu_to_le64(tmp);
3146                         tmp += DB_PAGE_SIZE;
3147                         base_indirect_ptr++;
3148                         page_entries++;
3149                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3150                 cqicb->sbq_addr =
3151                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3152                 cqicb->sbq_buf_size =
3153                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3154                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3155                         (u16) rx_ring->sbq_len;
3156                 cqicb->sbq_len = cpu_to_le16(bq_len);
3157                 rx_ring->sbq_prod_idx = 0;
3158                 rx_ring->sbq_curr_idx = 0;
3159                 rx_ring->sbq_clean_idx = 0;
3160                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3161         }
3162         switch (rx_ring->type) {
3163         case TX_Q:
3164                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3165                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3166                 break;
3167         case RX_Q:
3168                 /* Inbound completion handling rx_rings run in
3169                  * separate NAPI contexts.
3170                  */
3171                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3172                                64);
3173                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3174                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3175                 break;
3176         default:
3177                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3178                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3179         }
3180         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3181                      "Initializing rx work queue.\n");
3182         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3183                            CFG_LCQ, rx_ring->cq_id);
3184         if (err) {
3185                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3186                 return err;
3187         }
3188         return err;
3189 }
3190
3191 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3192 {
3193         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3194         void __iomem *doorbell_area =
3195             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3196         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3197             (tx_ring->wq_id * sizeof(u64));
3198         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3199             (tx_ring->wq_id * sizeof(u64));
3200         int err = 0;
3201
3202         /*
3203          * Assign doorbell registers for this tx_ring.
3204          */
3205         /* TX PCI doorbell mem area for tx producer index */
3206         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3207         tx_ring->prod_idx = 0;
3208         /* TX PCI doorbell mem area + 0x04 */
3209         tx_ring->valid_db_reg = doorbell_area + 0x04;
3210
3211         /*
3212          * Assign shadow registers for this tx_ring.
3213          */
3214         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3215         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3216
3217         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3218         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3219                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3220         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3221         wqicb->rid = 0;
3222         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3223
3224         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3225
3226         ql_init_tx_ring(qdev, tx_ring);
3227
3228         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3229                            (u16) tx_ring->wq_id);
3230         if (err) {
3231                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3232                 return err;
3233         }
3234         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3235                      "Successfully loaded WQICB.\n");
3236         return err;
3237 }
3238
3239 static void ql_disable_msix(struct ql_adapter *qdev)
3240 {
3241         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3242                 pci_disable_msix(qdev->pdev);
3243                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3244                 kfree(qdev->msi_x_entry);
3245                 qdev->msi_x_entry = NULL;
3246         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3247                 pci_disable_msi(qdev->pdev);
3248                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3249         }
3250 }
3251
3252 /* We start by trying to get the number of vectors
3253  * stored in qdev->intr_count. If we don't get that
3254  * many then we reduce the count and try again.
3255  */
3256 static void ql_enable_msix(struct ql_adapter *qdev)
3257 {
3258         int i, err;
3259
3260         /* Get the MSIX vectors. */
3261         if (qlge_irq_type == MSIX_IRQ) {
3262                 /* Try to alloc space for the msix struct,
3263                  * if it fails then go to MSI/legacy.
3264                  */
3265                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3266                                             sizeof(struct msix_entry),
3267                                             GFP_KERNEL);
3268                 if (!qdev->msi_x_entry) {
3269                         qlge_irq_type = MSI_IRQ;
3270                         goto msi;
3271                 }
3272
3273                 for (i = 0; i < qdev->intr_count; i++)
3274                         qdev->msi_x_entry[i].entry = i;
3275
3276                 /* Loop to get our vectors.  We start with
3277                  * what we want and settle for what we get.
3278                  */
3279                 do {
3280                         err = pci_enable_msix(qdev->pdev,
3281                                 qdev->msi_x_entry, qdev->intr_count);
3282                         if (err > 0)
3283                                 qdev->intr_count = err;
3284                 } while (err > 0);
3285
3286                 if (err < 0) {
3287                         kfree(qdev->msi_x_entry);
3288                         qdev->msi_x_entry = NULL;
3289                         netif_warn(qdev, ifup, qdev->ndev,
3290                                    "MSI-X Enable failed, trying MSI.\n");
3291                         qdev->intr_count = 1;
3292                         qlge_irq_type = MSI_IRQ;
3293                 } else if (err == 0) {
3294                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3295                         netif_info(qdev, ifup, qdev->ndev,
3296                                    "MSI-X Enabled, got %d vectors.\n",
3297                                    qdev->intr_count);
3298                         return;
3299                 }
3300         }
3301 msi:
3302         qdev->intr_count = 1;
3303         if (qlge_irq_type == MSI_IRQ) {
3304                 if (!pci_enable_msi(qdev->pdev)) {
3305                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3306                         netif_info(qdev, ifup, qdev->ndev,
3307                                    "Running with MSI interrupts.\n");
3308                         return;
3309                 }
3310         }
3311         qlge_irq_type = LEG_IRQ;
3312         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3313                      "Running with legacy interrupts.\n");
3314 }
3315
3316 /* Each vector services 1 RSS ring and and 1 or more
3317  * TX completion rings.  This function loops through
3318  * the TX completion rings and assigns the vector that
3319  * will service it.  An example would be if there are
3320  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3321  * This would mean that vector 0 would service RSS ring 0
3322  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3323  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3324  */
3325 static void ql_set_tx_vect(struct ql_adapter *qdev)
3326 {
3327         int i, j, vect;
3328         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3329
3330         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3331                 /* Assign irq vectors to TX rx_rings.*/
3332                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3333                                          i < qdev->rx_ring_count; i++) {
3334                         if (j == tx_rings_per_vector) {
3335                                 vect++;
3336                                 j = 0;
3337                         }
3338                         qdev->rx_ring[i].irq = vect;
3339                         j++;
3340                 }
3341         } else {
3342                 /* For single vector all rings have an irq
3343                  * of zero.
3344                  */
3345                 for (i = 0; i < qdev->rx_ring_count; i++)
3346                         qdev->rx_ring[i].irq = 0;
3347         }
3348 }
3349
3350 /* Set the interrupt mask for this vector.  Each vector
3351  * will service 1 RSS ring and 1 or more TX completion
3352  * rings.  This function sets up a bit mask per vector
3353  * that indicates which rings it services.
3354  */
3355 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3356 {
3357         int j, vect = ctx->intr;
3358         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3359
3360         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3361                 /* Add the RSS ring serviced by this vector
3362                  * to the mask.
3363                  */
3364                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3365                 /* Add the TX ring(s) serviced by this vector
3366                  * to the mask. */
3367                 for (j = 0; j < tx_rings_per_vector; j++) {
3368                         ctx->irq_mask |=
3369                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3370                         (vect * tx_rings_per_vector) + j].cq_id);
3371                 }
3372         } else {
3373                 /* For single vector we just shift each queue's
3374                  * ID into the mask.
3375                  */
3376                 for (j = 0; j < qdev->rx_ring_count; j++)
3377                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3378         }
3379 }
3380
3381 /*
3382  * Here we build the intr_context structures based on
3383  * our rx_ring count and intr vector count.
3384  * The intr_context structure is used to hook each vector
3385  * to possibly different handlers.
3386  */
3387 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3388 {
3389         int i = 0;
3390         struct intr_context *intr_context = &qdev->intr_context[0];
3391
3392         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3393                 /* Each rx_ring has it's
3394                  * own intr_context since we have separate
3395                  * vectors for each queue.
3396                  */
3397                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3398                         qdev->rx_ring[i].irq = i;
3399                         intr_context->intr = i;
3400                         intr_context->qdev = qdev;
3401                         /* Set up this vector's bit-mask that indicates
3402                          * which queues it services.
3403                          */
3404                         ql_set_irq_mask(qdev, intr_context);
3405                         /*
3406                          * We set up each vectors enable/disable/read bits so
3407                          * there's no bit/mask calculations in the critical path.
3408                          */
3409                         intr_context->intr_en_mask =
3410                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3411                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3412                             | i;
3413                         intr_context->intr_dis_mask =
3414                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3415                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3416                             INTR_EN_IHD | i;
3417                         intr_context->intr_read_mask =
3418                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3419                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3420                             i;
3421                         if (i == 0) {
3422                                 /* The first vector/queue handles
3423                                  * broadcast/multicast, fatal errors,
3424                                  * and firmware events.  This in addition
3425                                  * to normal inbound NAPI processing.
3426                                  */
3427                                 intr_context->handler = qlge_isr;
3428                                 sprintf(intr_context->name, "%s-rx-%d",
3429                                         qdev->ndev->name, i);
3430                         } else {
3431                                 /*
3432                                  * Inbound queues handle unicast frames only.
3433                                  */
3434                                 intr_context->handler = qlge_msix_rx_isr;
3435                                 sprintf(intr_context->name, "%s-rx-%d",
3436                                         qdev->ndev->name, i);
3437                         }
3438                 }
3439         } else {
3440                 /*
3441                  * All rx_rings use the same intr_context since
3442                  * there is only one vector.
3443                  */
3444                 intr_context->intr = 0;
3445                 intr_context->qdev = qdev;
3446                 /*
3447                  * We set up each vectors enable/disable/read bits so
3448                  * there's no bit/mask calculations in the critical path.
3449                  */
3450                 intr_context->intr_en_mask =
3451                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3452                 intr_context->intr_dis_mask =
3453                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3454                     INTR_EN_TYPE_DISABLE;
3455                 intr_context->intr_read_mask =
3456                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3457                 /*
3458                  * Single interrupt means one handler for all rings.
3459                  */
3460                 intr_context->handler = qlge_isr;
3461                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3462                 /* Set up this vector's bit-mask that indicates
3463                  * which queues it services. In this case there is
3464                  * a single vector so it will service all RSS and
3465                  * TX completion rings.
3466                  */
3467                 ql_set_irq_mask(qdev, intr_context);
3468         }
3469         /* Tell the TX completion rings which MSIx vector
3470          * they will be using.
3471          */
3472         ql_set_tx_vect(qdev);
3473 }
3474
3475 static void ql_free_irq(struct ql_adapter *qdev)
3476 {
3477         int i;
3478         struct intr_context *intr_context = &qdev->intr_context[0];
3479
3480         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3481                 if (intr_context->hooked) {
3482                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3483                                 free_irq(qdev->msi_x_entry[i].vector,
3484                                          &qdev->rx_ring[i]);
3485                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3486                                              "freeing msix interrupt %d.\n", i);
3487                         } else {
3488                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3489                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3490                                              "freeing msi interrupt %d.\n", i);
3491                         }
3492                 }
3493         }
3494         ql_disable_msix(qdev);
3495 }
3496
3497 static int ql_request_irq(struct ql_adapter *qdev)
3498 {
3499         int i;
3500         int status = 0;
3501         struct pci_dev *pdev = qdev->pdev;
3502         struct intr_context *intr_context = &qdev->intr_context[0];
3503
3504         ql_resolve_queues_to_irqs(qdev);
3505
3506         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3507                 atomic_set(&intr_context->irq_cnt, 0);
3508                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3509                         status = request_irq(qdev->msi_x_entry[i].vector,
3510                                              intr_context->handler,
3511                                              0,
3512                                              intr_context->name,
3513                                              &qdev->rx_ring[i]);
3514                         if (status) {
3515                                 netif_err(qdev, ifup, qdev->ndev,
3516                                           "Failed request for MSIX interrupt %d.\n",
3517                                           i);
3518                                 goto err_irq;
3519                         } else {
3520                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3521                                              "Hooked intr %d, queue type %s, with name %s.\n",
3522                                              i,
3523                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3524                                              "DEFAULT_Q" :
3525                                              qdev->rx_ring[i].type == TX_Q ?
3526                                              "TX_Q" :
3527                                              qdev->rx_ring[i].type == RX_Q ?
3528                                              "RX_Q" : "",
3529                                              intr_context->name);
3530                         }
3531                 } else {
3532                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3533                                      "trying msi or legacy interrupts.\n");
3534                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3535                                      "%s: irq = %d.\n", __func__, pdev->irq);
3536                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3537                                      "%s: context->name = %s.\n", __func__,
3538                                      intr_context->name);
3539                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3540                                      "%s: dev_id = 0x%p.\n", __func__,
3541                                      &qdev->rx_ring[0]);
3542                         status =
3543                             request_irq(pdev->irq, qlge_isr,
3544                                         test_bit(QL_MSI_ENABLED,
3545                                                  &qdev->
3546                                                  flags) ? 0 : IRQF_SHARED,
3547                                         intr_context->name, &qdev->rx_ring[0]);
3548                         if (status)
3549                                 goto err_irq;
3550
3551                         netif_err(qdev, ifup, qdev->ndev,
3552                                   "Hooked intr %d, queue type %s, with name %s.\n",
3553                                   i,
3554                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3555                                   "DEFAULT_Q" :
3556                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3557                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3558                                   intr_context->name);
3559                 }
3560                 intr_context->hooked = 1;
3561         }
3562         return status;
3563 err_irq:
3564         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3565         ql_free_irq(qdev);
3566         return status;
3567 }
3568
3569 static int ql_start_rss(struct ql_adapter *qdev)
3570 {
3571         static const u8 init_hash_seed[] = {
3572                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3573                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3574                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3575                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3576                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3577         };
3578         struct ricb *ricb = &qdev->ricb;
3579         int status = 0;
3580         int i;
3581         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3582
3583         memset((void *)ricb, 0, sizeof(*ricb));
3584
3585         ricb->base_cq = RSS_L4K;
3586         ricb->flags =
3587                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3588         ricb->mask = cpu_to_le16((u16)(0x3ff));
3589
3590         /*
3591          * Fill out the Indirection Table.
3592          */
3593         for (i = 0; i < 1024; i++)
3594                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3595
3596         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3597         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3598
3599         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3600
3601         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3602         if (status) {
3603                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3604                 return status;
3605         }
3606         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3607                      "Successfully loaded RICB.\n");
3608         return status;
3609 }
3610
3611 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3612 {
3613         int i, status = 0;
3614
3615         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3616         if (status)
3617                 return status;
3618         /* Clear all the entries in the routing table. */
3619         for (i = 0; i < 16; i++) {
3620                 status = ql_set_routing_reg(qdev, i, 0, 0);
3621                 if (status) {
3622                         netif_err(qdev, ifup, qdev->ndev,
3623                                   "Failed to init routing register for CAM packets.\n");
3624                         break;
3625                 }
3626         }
3627         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3628         return status;
3629 }
3630
3631 /* Initialize the frame-to-queue routing. */
3632 static int ql_route_initialize(struct ql_adapter *qdev)
3633 {
3634         int status = 0;
3635
3636         /* Clear all the entries in the routing table. */
3637         status = ql_clear_routing_entries(qdev);
3638         if (status)
3639                 return status;
3640
3641         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3642         if (status)
3643                 return status;
3644
3645         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3646                                                 RT_IDX_IP_CSUM_ERR, 1);
3647         if (status) {
3648                 netif_err(qdev, ifup, qdev->ndev,
3649                         "Failed to init routing register "
3650                         "for IP CSUM error packets.\n");
3651                 goto exit;
3652         }
3653         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3654                                                 RT_IDX_TU_CSUM_ERR, 1);
3655         if (status) {
3656                 netif_err(qdev, ifup, qdev->ndev,
3657                         "Failed to init routing register "
3658                         "for TCP/UDP CSUM error packets.\n");
3659                 goto exit;
3660         }
3661         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3662         if (status) {
3663                 netif_err(qdev, ifup, qdev->ndev,
3664                           "Failed to init routing register for broadcast packets.\n");
3665                 goto exit;
3666         }
3667         /* If we have more than one inbound queue, then turn on RSS in the
3668          * routing block.
3669          */
3670         if (qdev->rss_ring_count > 1) {
3671                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3672                                         RT_IDX_RSS_MATCH, 1);
3673                 if (status) {
3674                         netif_err(qdev, ifup, qdev->ndev,
3675                                   "Failed to init routing register for MATCH RSS packets.\n");
3676                         goto exit;
3677                 }
3678         }
3679
3680         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3681                                     RT_IDX_CAM_HIT, 1);
3682         if (status)
3683                 netif_err(qdev, ifup, qdev->ndev,
3684                           "Failed to init routing register for CAM packets.\n");
3685 exit:
3686         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3687         return status;
3688 }
3689
3690 int ql_cam_route_initialize(struct ql_adapter *qdev)
3691 {
3692         int status, set;
3693
3694         /* If check if the link is up and use to
3695          * determine if we are setting or clearing
3696          * the MAC address in the CAM.
3697          */
3698         set = ql_read32(qdev, STS);
3699         set &= qdev->port_link_up;
3700         status = ql_set_mac_addr(qdev, set);
3701         if (status) {
3702                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3703                 return status;
3704         }
3705
3706         status = ql_route_initialize(qdev);
3707         if (status)
3708                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3709
3710         return status;
3711 }
3712
3713 static int ql_adapter_initialize(struct ql_adapter *qdev)
3714 {
3715         u32 value, mask;
3716         int i;
3717         int status = 0;
3718
3719         /*
3720          * Set up the System register to halt on errors.
3721          */
3722         value = SYS_EFE | SYS_FAE;
3723         mask = value << 16;
3724         ql_write32(qdev, SYS, mask | value);
3725
3726         /* Set the default queue, and VLAN behavior. */
3727         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3728         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3729         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3730
3731         /* Set the MPI interrupt to enabled. */
3732         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3733
3734         /* Enable the function, set pagesize, enable error checking. */
3735         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3736             FSC_EC | FSC_VM_PAGE_4K;
3737         value |= SPLT_SETTING;
3738
3739         /* Set/clear header splitting. */
3740         mask = FSC_VM_PAGESIZE_MASK |
3741             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3742         ql_write32(qdev, FSC, mask | value);
3743
3744         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3745
3746         /* Set RX packet routing to use port/pci function on which the
3747          * packet arrived on in addition to usual frame routing.
3748          * This is helpful on bonding where both interfaces can have
3749          * the same MAC address.
3750          */
3751         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3752         /* Reroute all packets to our Interface.
3753          * They may have been routed to MPI firmware
3754          * due to WOL.
3755          */
3756         value = ql_read32(qdev, MGMT_RCV_CFG);
3757         value &= ~MGMT_RCV_CFG_RM;
3758         mask = 0xffff0000;
3759
3760         /* Sticky reg needs clearing due to WOL. */
3761         ql_write32(qdev, MGMT_RCV_CFG, mask);
3762         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3763
3764         /* Default WOL is enable on Mezz cards */
3765         if (qdev->pdev->subsystem_device == 0x0068 ||
3766                         qdev->pdev->subsystem_device == 0x0180)
3767                 qdev->wol = WAKE_MAGIC;
3768
3769         /* Start up the rx queues. */
3770         for (i = 0; i < qdev->rx_ring_count; i++) {
3771                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3772                 if (status) {
3773                         netif_err(qdev, ifup, qdev->ndev,
3774                                   "Failed to start rx ring[%d].\n", i);
3775                         return status;
3776                 }
3777         }
3778
3779         /* If there is more than one inbound completion queue
3780          * then download a RICB to configure RSS.
3781          */
3782         if (qdev->rss_ring_count > 1) {
3783                 status = ql_start_rss(qdev);
3784                 if (status) {
3785                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3786                         return status;
3787                 }
3788         }
3789
3790         /* Start up the tx queues. */
3791         for (i = 0; i < qdev->tx_ring_count; i++) {
3792                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3793                 if (status) {
3794                         netif_err(qdev, ifup, qdev->ndev,
3795                                   "Failed to start tx ring[%d].\n", i);
3796                         return status;
3797                 }
3798         }
3799
3800         /* Initialize the port and set the max framesize. */
3801         status = qdev->nic_ops->port_initialize(qdev);
3802         if (status)
3803                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3804
3805         /* Set up the MAC address and frame routing filter. */
3806         status = ql_cam_route_initialize(qdev);
3807         if (status) {
3808                 netif_err(qdev, ifup, qdev->ndev,
3809                           "Failed to init CAM/Routing tables.\n");
3810                 return status;
3811         }
3812
3813         /* Start NAPI for the RSS queues. */
3814         for (i = 0; i < qdev->rss_ring_count; i++) {
3815                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3816                              "Enabling NAPI for rx_ring[%d].\n", i);
3817                 napi_enable(&qdev->rx_ring[i].napi);
3818         }
3819
3820         return status;
3821 }
3822
3823 /* Issue soft reset to chip. */
3824 static int ql_adapter_reset(struct ql_adapter *qdev)
3825 {
3826         u32 value;
3827         int status = 0;
3828         unsigned long end_jiffies;
3829
3830         /* Clear all the entries in the routing table. */
3831         status = ql_clear_routing_entries(qdev);
3832         if (status) {
3833                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3834                 return status;
3835         }
3836
3837         end_jiffies = jiffies +
3838                 max((unsigned long)1, usecs_to_jiffies(30));
3839
3840         /* Check if bit is set then skip the mailbox command and
3841          * clear the bit, else we are in normal reset process.
3842          */
3843         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3844                 /* Stop management traffic. */
3845                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3846
3847                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3848                 ql_wait_fifo_empty(qdev);
3849         } else
3850                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3851
3852         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3853
3854         do {
3855                 value = ql_read32(qdev, RST_FO);
3856                 if ((value & RST_FO_FR) == 0)
3857                         break;
3858                 cpu_relax();
3859         } while (time_before(jiffies, end_jiffies));
3860
3861         if (value & RST_FO_FR) {
3862                 netif_err(qdev, ifdown, qdev->ndev,
3863                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3864                 status = -ETIMEDOUT;
3865         }
3866
3867         /* Resume management traffic. */
3868         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3869         return status;
3870 }
3871
3872 static void ql_display_dev_info(struct net_device *ndev)
3873 {
3874         struct ql_adapter *qdev = netdev_priv(ndev);
3875
3876         netif_info(qdev, probe, qdev->ndev,
3877                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3878                    "XG Roll = %d, XG Rev = %d.\n",
3879                    qdev->func,
3880                    qdev->port,
3881                    qdev->chip_rev_id & 0x0000000f,
3882                    qdev->chip_rev_id >> 4 & 0x0000000f,
3883                    qdev->chip_rev_id >> 8 & 0x0000000f,
3884                    qdev->chip_rev_id >> 12 & 0x0000000f);
3885         netif_info(qdev, probe, qdev->ndev,
3886                    "MAC address %pM\n", ndev->dev_addr);
3887 }
3888
3889 static int ql_wol(struct ql_adapter *qdev)
3890 {
3891         int status = 0;
3892         u32 wol = MB_WOL_DISABLE;
3893
3894         /* The CAM is still intact after a reset, but if we
3895          * are doing WOL, then we may need to program the
3896          * routing regs. We would also need to issue the mailbox
3897          * commands to instruct the MPI what to do per the ethtool
3898          * settings.
3899          */
3900
3901         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3902                         WAKE_MCAST | WAKE_BCAST)) {
3903                 netif_err(qdev, ifdown, qdev->ndev,
3904                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3905                           qdev->wol);
3906                 return -EINVAL;
3907         }
3908
3909         if (qdev->wol & WAKE_MAGIC) {
3910                 status = ql_mb_wol_set_magic(qdev, 1);
3911                 if (status) {
3912                         netif_err(qdev, ifdown, qdev->ndev,
3913                                   "Failed to set magic packet on %s.\n",
3914                                   qdev->ndev->name);
3915                         return status;
3916                 } else
3917                         netif_info(qdev, drv, qdev->ndev,
3918                                    "Enabled magic packet successfully on %s.\n",
3919                                    qdev->ndev->name);
3920
3921                 wol |= MB_WOL_MAGIC_PKT;
3922         }
3923
3924         if (qdev->wol) {
3925                 wol |= MB_WOL_MODE_ON;
3926                 status = ql_mb_wol_mode(qdev, wol);
3927                 netif_err(qdev, drv, qdev->ndev,
3928                           "WOL %s (wol code 0x%x) on %s\n",
3929                           (status == 0) ? "Successfully set" : "Failed",
3930                           wol, qdev->ndev->name);
3931         }
3932
3933         return status;
3934 }
3935
3936 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3937 {
3938
3939         /* Don't kill the reset worker thread if we
3940          * are in the process of recovery.
3941          */
3942         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3943                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3944         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3945         cancel_delayed_work_sync(&qdev->mpi_work);
3946         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3947         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3948         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3949 }
3950
3951 static int ql_adapter_down(struct ql_adapter *qdev)
3952 {
3953         int i, status = 0;
3954
3955         ql_link_off(qdev);
3956
3957         ql_cancel_all_work_sync(qdev);
3958
3959         for (i = 0; i < qdev->rss_ring_count; i++)
3960                 napi_disable(&qdev->rx_ring[i].napi);
3961
3962         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3963
3964         ql_disable_interrupts(qdev);
3965
3966         ql_tx_ring_clean(qdev);
3967
3968         /* Call netif_napi_del() from common point.
3969          */
3970         for (i = 0; i < qdev->rss_ring_count; i++)
3971                 netif_napi_del(&qdev->rx_ring[i].napi);
3972
3973         status = ql_adapter_reset(qdev);
3974         if (status)
3975                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3976                           qdev->func);
3977         ql_free_rx_buffers(qdev);
3978
3979         return status;
3980 }
3981
3982 static int ql_adapter_up(struct ql_adapter *qdev)
3983 {
3984         int err = 0;
3985
3986         err = ql_adapter_initialize(qdev);
3987         if (err) {
3988                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3989                 goto err_init;
3990         }
3991         set_bit(QL_ADAPTER_UP, &qdev->flags);
3992         ql_alloc_rx_buffers(qdev);
3993         /* If the port is initialized and the
3994          * link is up the turn on the carrier.
3995          */
3996         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3997                         (ql_read32(qdev, STS) & qdev->port_link_up))
3998                 ql_link_on(qdev);
3999         /* Restore rx mode. */
4000         clear_bit(QL_ALLMULTI, &qdev->flags);
4001         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4002         qlge_set_multicast_list(qdev->ndev);
4003
4004         /* Restore vlan setting. */
4005         qlge_restore_vlan(qdev);
4006
4007         ql_enable_interrupts(qdev);
4008         ql_enable_all_completion_interrupts(qdev);
4009         netif_tx_start_all_queues(qdev->ndev);
4010
4011         return 0;
4012 err_init:
4013         ql_adapter_reset(qdev);
4014         return err;
4015 }
4016
4017 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4018 {
4019         ql_free_mem_resources(qdev);
4020         ql_free_irq(qdev);
4021 }
4022
4023 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4024 {
4025         int status = 0;
4026
4027         if (ql_alloc_mem_resources(qdev)) {
4028                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4029                 return -ENOMEM;
4030         }
4031         status = ql_request_irq(qdev);
4032         return status;
4033 }
4034
4035 static int qlge_close(struct net_device *ndev)
4036 {
4037         struct ql_adapter *qdev = netdev_priv(ndev);
4038
4039         /* If we hit pci_channel_io_perm_failure
4040          * failure condition, then we already
4041          * brought the adapter down.
4042          */
4043         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4044                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4045                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4046                 return 0;
4047         }
4048
4049         /*
4050          * Wait for device to recover from a reset.
4051          * (Rarely happens, but possible.)
4052          */
4053         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4054                 msleep(1);
4055         ql_adapter_down(qdev);
4056         ql_release_adapter_resources(qdev);
4057         return 0;
4058 }
4059
4060 static int ql_configure_rings(struct ql_adapter *qdev)
4061 {
4062         int i;
4063         struct rx_ring *rx_ring;
4064         struct tx_ring *tx_ring;
4065         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4066         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4067                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4068
4069         qdev->lbq_buf_order = get_order(lbq_buf_len);
4070
4071         /* In a perfect world we have one RSS ring for each CPU
4072          * and each has it's own vector.  To do that we ask for
4073          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4074          * vector count to what we actually get.  We then
4075          * allocate an RSS ring for each.
4076          * Essentially, we are doing min(cpu_count, msix_vector_count).
4077          */
4078         qdev->intr_count = cpu_cnt;
4079         ql_enable_msix(qdev);
4080         /* Adjust the RSS ring count to the actual vector count. */
4081         qdev->rss_ring_count = qdev->intr_count;
4082         qdev->tx_ring_count = cpu_cnt;
4083         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4084
4085         for (i = 0; i < qdev->tx_ring_count; i++) {
4086                 tx_ring = &qdev->tx_ring[i];
4087                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4088                 tx_ring->qdev = qdev;
4089                 tx_ring->wq_id = i;
4090                 tx_ring->wq_len = qdev->tx_ring_size;
4091                 tx_ring->wq_size =
4092                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4093
4094                 /*
4095                  * The completion queue ID for the tx rings start
4096                  * immediately after the rss rings.
4097                  */
4098                 tx_ring->cq_id = qdev->rss_ring_count + i;
4099         }
4100
4101         for (i = 0; i < qdev->rx_ring_count; i++) {
4102                 rx_ring = &qdev->rx_ring[i];
4103                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4104                 rx_ring->qdev = qdev;
4105                 rx_ring->cq_id = i;
4106                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4107                 if (i < qdev->rss_ring_count) {
4108                         /*
4109                          * Inbound (RSS) queues.
4110                          */
4111                         rx_ring->cq_len = qdev->rx_ring_size;
4112                         rx_ring->cq_size =
4113                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4114                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4115                         rx_ring->lbq_size =
4116                             rx_ring->lbq_len * sizeof(__le64);
4117                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4118                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4119                                      "lbq_buf_size %d, order = %d\n",
4120                                      rx_ring->lbq_buf_size,
4121                                      qdev->lbq_buf_order);
4122                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4123                         rx_ring->sbq_size =
4124                             rx_ring->sbq_len * sizeof(__le64);
4125                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4126                         rx_ring->type = RX_Q;
4127                 } else {
4128                         /*
4129                          * Outbound queue handles outbound completions only.
4130                          */
4131                         /* outbound cq is same size as tx_ring it services. */
4132                         rx_ring->cq_len = qdev->tx_ring_size;
4133                         rx_ring->cq_size =
4134                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4135                         rx_ring->lbq_len = 0;
4136                         rx_ring->lbq_size = 0;
4137                         rx_ring->lbq_buf_size = 0;
4138                         rx_ring->sbq_len = 0;
4139                         rx_ring->sbq_size = 0;
4140                         rx_ring->sbq_buf_size = 0;
4141                         rx_ring->type = TX_Q;
4142                 }
4143         }
4144         return 0;
4145 }
4146
4147 static int qlge_open(struct net_device *ndev)
4148 {
4149         int err = 0;
4150         struct ql_adapter *qdev = netdev_priv(ndev);
4151
4152         err = ql_adapter_reset(qdev);
4153         if (err)
4154                 return err;
4155
4156         err = ql_configure_rings(qdev);
4157         if (err)
4158                 return err;
4159
4160         err = ql_get_adapter_resources(qdev);
4161         if (err)
4162                 goto error_up;
4163
4164         err = ql_adapter_up(qdev);
4165         if (err)
4166                 goto error_up;
4167
4168         return err;
4169
4170 error_up:
4171         ql_release_adapter_resources(qdev);
4172         return err;
4173 }
4174
4175 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4176 {
4177         struct rx_ring *rx_ring;
4178         int i, status;
4179         u32 lbq_buf_len;
4180
4181         /* Wait for an outstanding reset to complete. */
4182         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4183                 int i = 3;
4184                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4185                         netif_err(qdev, ifup, qdev->ndev,
4186                                   "Waiting for adapter UP...\n");
4187                         ssleep(1);
4188                 }
4189
4190                 if (!i) {
4191                         netif_err(qdev, ifup, qdev->ndev,
4192                                   "Timed out waiting for adapter UP\n");
4193                         return -ETIMEDOUT;
4194                 }
4195         }
4196
4197         status = ql_adapter_down(qdev);
4198         if (status)
4199                 goto error;
4200
4201         /* Get the new rx buffer size. */
4202         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4203                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4204         qdev->lbq_buf_order = get_order(lbq_buf_len);
4205
4206         for (i = 0; i < qdev->rss_ring_count; i++) {
4207                 rx_ring = &qdev->rx_ring[i];
4208                 /* Set the new size. */
4209                 rx_ring->lbq_buf_size = lbq_buf_len;
4210         }
4211
4212         status = ql_adapter_up(qdev);
4213         if (status)
4214                 goto error;
4215
4216         return status;
4217 error:
4218         netif_alert(qdev, ifup, qdev->ndev,
4219                     "Driver up/down cycle failed, closing device.\n");
4220         set_bit(QL_ADAPTER_UP, &qdev->flags);
4221         dev_close(qdev->ndev);
4222         return status;
4223 }
4224
4225 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4226 {
4227         struct ql_adapter *qdev = netdev_priv(ndev);
4228         int status;
4229
4230         if (ndev->mtu == 1500 && new_mtu == 9000) {
4231                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4232         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4233                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4234         } else
4235                 return -EINVAL;
4236
4237         queue_delayed_work(qdev->workqueue,
4238                         &qdev->mpi_port_cfg_work, 3*HZ);
4239
4240         ndev->mtu = new_mtu;
4241
4242         if (!netif_running(qdev->ndev)) {
4243                 return 0;
4244         }
4245
4246         status = ql_change_rx_buffers(qdev);
4247         if (status) {
4248                 netif_err(qdev, ifup, qdev->ndev,
4249                           "Changing MTU failed.\n");
4250         }
4251
4252         return status;
4253 }
4254
4255 static struct net_device_stats *qlge_get_stats(struct net_device
4256                                                *ndev)
4257 {
4258         struct ql_adapter *qdev = netdev_priv(ndev);
4259         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4260         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4261         unsigned long pkts, mcast, dropped, errors, bytes;
4262         int i;
4263
4264         /* Get RX stats. */
4265         pkts = mcast = dropped = errors = bytes = 0;
4266         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4267                         pkts += rx_ring->rx_packets;
4268                         bytes += rx_ring->rx_bytes;
4269                         dropped += rx_ring->rx_dropped;
4270                         errors += rx_ring->rx_errors;
4271                         mcast += rx_ring->rx_multicast;
4272         }
4273         ndev->stats.rx_packets = pkts;
4274         ndev->stats.rx_bytes = bytes;
4275         ndev->stats.rx_dropped = dropped;
4276         ndev->stats.rx_errors = errors;
4277         ndev->stats.multicast = mcast;
4278
4279         /* Get TX stats. */
4280         pkts = errors = bytes = 0;
4281         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4282                         pkts += tx_ring->tx_packets;
4283                         bytes += tx_ring->tx_bytes;
4284                         errors += tx_ring->tx_errors;
4285         }
4286         ndev->stats.tx_packets = pkts;
4287         ndev->stats.tx_bytes = bytes;
4288         ndev->stats.tx_errors = errors;
4289         return &ndev->stats;
4290 }
4291
4292 static void qlge_set_multicast_list(struct net_device *ndev)
4293 {
4294         struct ql_adapter *qdev = netdev_priv(ndev);
4295         struct netdev_hw_addr *ha;
4296         int i, status;
4297
4298         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4299         if (status)
4300                 return;
4301         /*
4302          * Set or clear promiscuous mode if a
4303          * transition is taking place.
4304          */
4305         if (ndev->flags & IFF_PROMISC) {
4306                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4307                         if (ql_set_routing_reg
4308                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4309                                 netif_err(qdev, hw, qdev->ndev,
4310                                           "Failed to set promiscuous mode.\n");
4311                         } else {
4312                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4313                         }
4314                 }
4315         } else {
4316                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4317                         if (ql_set_routing_reg
4318                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4319                                 netif_err(qdev, hw, qdev->ndev,
4320                                           "Failed to clear promiscuous mode.\n");
4321                         } else {
4322                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4323                         }
4324                 }
4325         }
4326
4327         /*
4328          * Set or clear all multicast mode if a
4329          * transition is taking place.
4330          */
4331         if ((ndev->flags & IFF_ALLMULTI) ||
4332             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4333                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4334                         if (ql_set_routing_reg
4335                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4336                                 netif_err(qdev, hw, qdev->ndev,
4337                                           "Failed to set all-multi mode.\n");
4338                         } else {
4339                                 set_bit(QL_ALLMULTI, &qdev->flags);
4340                         }
4341                 }
4342         } else {
4343                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4344                         if (ql_set_routing_reg
4345                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4346                                 netif_err(qdev, hw, qdev->ndev,
4347                                           "Failed to clear all-multi mode.\n");
4348                         } else {
4349                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4350                         }
4351                 }
4352         }
4353
4354         if (!netdev_mc_empty(ndev)) {
4355                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4356                 if (status)
4357                         goto exit;
4358                 i = 0;
4359                 netdev_for_each_mc_addr(ha, ndev) {
4360                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4361                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4362                                 netif_err(qdev, hw, qdev->ndev,
4363                                           "Failed to loadmulticast address.\n");
4364                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4365                                 goto exit;
4366                         }
4367                         i++;
4368                 }
4369                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4370                 if (ql_set_routing_reg
4371                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4372                         netif_err(qdev, hw, qdev->ndev,
4373                                   "Failed to set multicast match mode.\n");
4374                 } else {
4375                         set_bit(QL_ALLMULTI, &qdev->flags);
4376                 }
4377         }
4378 exit:
4379         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4380 }
4381
4382 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4383 {
4384         struct ql_adapter *qdev = netdev_priv(ndev);
4385         struct sockaddr *addr = p;
4386         int status;
4387
4388         if (!is_valid_ether_addr(addr->sa_data))
4389                 return -EADDRNOTAVAIL;
4390         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4391         /* Update local copy of current mac address. */
4392         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4393
4394         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4395         if (status)
4396                 return status;
4397         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4398                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4399         if (status)
4400                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4401         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4402         return status;
4403 }
4404
4405 static void qlge_tx_timeout(struct net_device *ndev)
4406 {
4407         struct ql_adapter *qdev = netdev_priv(ndev);
4408         ql_queue_asic_error(qdev);
4409 }
4410
4411 static void ql_asic_reset_work(struct work_struct *work)
4412 {
4413         struct ql_adapter *qdev =
4414             container_of(work, struct ql_adapter, asic_reset_work.work);
4415         int status;
4416         rtnl_lock();
4417         status = ql_adapter_down(qdev);
4418         if (status)
4419                 goto error;
4420
4421         status = ql_adapter_up(qdev);
4422         if (status)
4423                 goto error;
4424
4425         /* Restore rx mode. */
4426         clear_bit(QL_ALLMULTI, &qdev->flags);
4427         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4428         qlge_set_multicast_list(qdev->ndev);
4429
4430         rtnl_unlock();
4431         return;
4432 error:
4433         netif_alert(qdev, ifup, qdev->ndev,
4434                     "Driver up/down cycle failed, closing device\n");
4435
4436         set_bit(QL_ADAPTER_UP, &qdev->flags);
4437         dev_close(qdev->ndev);
4438         rtnl_unlock();
4439 }
4440
4441 static const struct nic_operations qla8012_nic_ops = {
4442         .get_flash              = ql_get_8012_flash_params,
4443         .port_initialize        = ql_8012_port_initialize,
4444 };
4445
4446 static const struct nic_operations qla8000_nic_ops = {
4447         .get_flash              = ql_get_8000_flash_params,
4448         .port_initialize        = ql_8000_port_initialize,
4449 };
4450
4451 /* Find the pcie function number for the other NIC
4452  * on this chip.  Since both NIC functions share a
4453  * common firmware we have the lowest enabled function
4454  * do any common work.  Examples would be resetting
4455  * after a fatal firmware error, or doing a firmware
4456  * coredump.
4457  */
4458 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4459 {
4460         int status = 0;
4461         u32 temp;
4462         u32 nic_func1, nic_func2;
4463
4464         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4465                         &temp);
4466         if (status)
4467                 return status;
4468
4469         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4470                         MPI_TEST_NIC_FUNC_MASK);
4471         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4472                         MPI_TEST_NIC_FUNC_MASK);
4473
4474         if (qdev->func == nic_func1)
4475                 qdev->alt_func = nic_func2;
4476         else if (qdev->func == nic_func2)
4477                 qdev->alt_func = nic_func1;
4478         else
4479                 status = -EIO;
4480
4481         return status;
4482 }
4483
4484 static int ql_get_board_info(struct ql_adapter *qdev)
4485 {
4486         int status;
4487         qdev->func =
4488             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4489         if (qdev->func > 3)
4490                 return -EIO;
4491
4492         status = ql_get_alt_pcie_func(qdev);
4493         if (status)
4494                 return status;
4495
4496         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4497         if (qdev->port) {
4498                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4499                 qdev->port_link_up = STS_PL1;
4500                 qdev->port_init = STS_PI1;
4501                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4502                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4503         } else {
4504                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4505                 qdev->port_link_up = STS_PL0;
4506                 qdev->port_init = STS_PI0;
4507                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4508                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4509         }
4510         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4511         qdev->device_id = qdev->pdev->device;
4512         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4513                 qdev->nic_ops = &qla8012_nic_ops;
4514         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4515                 qdev->nic_ops = &qla8000_nic_ops;
4516         return status;
4517 }
4518
4519 static void ql_release_all(struct pci_dev *pdev)
4520 {
4521         struct net_device *ndev = pci_get_drvdata(pdev);
4522         struct ql_adapter *qdev = netdev_priv(ndev);
4523
4524         if (qdev->workqueue) {
4525                 destroy_workqueue(qdev->workqueue);
4526                 qdev->workqueue = NULL;
4527         }
4528
4529         if (qdev->reg_base)
4530                 iounmap(qdev->reg_base);
4531         if (qdev->doorbell_area)
4532                 iounmap(qdev->doorbell_area);
4533         vfree(qdev->mpi_coredump);
4534         pci_release_regions(pdev);
4535         pci_set_drvdata(pdev, NULL);
4536 }
4537
4538 static int __devinit ql_init_device(struct pci_dev *pdev,
4539                                     struct net_device *ndev, int cards_found)
4540 {
4541         struct ql_adapter *qdev = netdev_priv(ndev);
4542         int err = 0;
4543
4544         memset((void *)qdev, 0, sizeof(*qdev));
4545         err = pci_enable_device(pdev);
4546         if (err) {
4547                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4548                 return err;
4549         }
4550
4551         qdev->ndev = ndev;
4552         qdev->pdev = pdev;
4553         pci_set_drvdata(pdev, ndev);
4554
4555         /* Set PCIe read request size */
4556         err = pcie_set_readrq(pdev, 4096);
4557         if (err) {
4558                 dev_err(&pdev->dev, "Set readrq failed.\n");
4559                 goto err_out1;
4560         }
4561
4562         err = pci_request_regions(pdev, DRV_NAME);
4563         if (err) {
4564                 dev_err(&pdev->dev, "PCI region request failed.\n");
4565                 return err;
4566         }
4567
4568         pci_set_master(pdev);
4569         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4570                 set_bit(QL_DMA64, &qdev->flags);
4571                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4572         } else {
4573                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4574                 if (!err)
4575                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4576         }
4577
4578         if (err) {
4579                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4580                 goto err_out2;
4581         }
4582
4583         /* Set PCIe reset type for EEH to fundamental. */
4584         pdev->needs_freset = 1;
4585         pci_save_state(pdev);
4586         qdev->reg_base =
4587             ioremap_nocache(pci_resource_start(pdev, 1),
4588                             pci_resource_len(pdev, 1));
4589         if (!qdev->reg_base) {
4590                 dev_err(&pdev->dev, "Register mapping failed.\n");
4591                 err = -ENOMEM;
4592                 goto err_out2;
4593         }
4594
4595         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4596         qdev->doorbell_area =
4597             ioremap_nocache(pci_resource_start(pdev, 3),
4598                             pci_resource_len(pdev, 3));
4599         if (!qdev->doorbell_area) {
4600                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4601                 err = -ENOMEM;
4602                 goto err_out2;
4603         }
4604
4605         err = ql_get_board_info(qdev);
4606         if (err) {
4607                 dev_err(&pdev->dev, "Register access failed.\n");
4608                 err = -EIO;
4609                 goto err_out2;
4610         }
4611         qdev->msg_enable = netif_msg_init(debug, default_msg);
4612         spin_lock_init(&qdev->hw_lock);
4613         spin_lock_init(&qdev->stats_lock);
4614
4615         if (qlge_mpi_coredump) {
4616                 qdev->mpi_coredump =
4617                         vmalloc(sizeof(struct ql_mpi_coredump));
4618                 if (qdev->mpi_coredump == NULL) {
4619                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4620                         err = -ENOMEM;
4621                         goto err_out2;
4622                 }
4623                 if (qlge_force_coredump)
4624                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4625         }
4626         /* make sure the EEPROM is good */
4627         err = qdev->nic_ops->get_flash(qdev);
4628         if (err) {
4629                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4630                 goto err_out2;
4631         }
4632
4633         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4634         /* Keep local copy of current mac address. */
4635         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4636
4637         /* Set up the default ring sizes. */
4638         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4639         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4640
4641         /* Set up the coalescing parameters. */
4642         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4643         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4644         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4645         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4646
4647         /*
4648          * Set up the operating parameters.
4649          */
4650         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4651         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4652         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4653         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4654         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4655         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4656         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4657         init_completion(&qdev->ide_completion);
4658         mutex_init(&qdev->mpi_mutex);
4659
4660         if (!cards_found) {
4661                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4662                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4663                          DRV_NAME, DRV_VERSION);
4664         }
4665         return 0;
4666 err_out2:
4667         ql_release_all(pdev);
4668 err_out1:
4669         pci_disable_device(pdev);
4670         return err;
4671 }
4672
4673 static const struct net_device_ops qlge_netdev_ops = {
4674         .ndo_open               = qlge_open,
4675         .ndo_stop               = qlge_close,
4676         .ndo_start_xmit         = qlge_send,
4677         .ndo_change_mtu         = qlge_change_mtu,
4678         .ndo_get_stats          = qlge_get_stats,
4679         .ndo_set_multicast_list = qlge_set_multicast_list,
4680         .ndo_set_mac_address    = qlge_set_mac_address,
4681         .ndo_validate_addr      = eth_validate_addr,
4682         .ndo_tx_timeout         = qlge_tx_timeout,
4683         .ndo_fix_features       = qlge_fix_features,
4684         .ndo_set_features       = qlge_set_features,
4685         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4686         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4687 };
4688
4689 static void ql_timer(unsigned long data)
4690 {
4691         struct ql_adapter *qdev = (struct ql_adapter *)data;
4692         u32 var = 0;
4693
4694         var = ql_read32(qdev, STS);
4695         if (pci_channel_offline(qdev->pdev)) {
4696                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4697                 return;
4698         }
4699
4700         mod_timer(&qdev->timer, jiffies + (5*HZ));
4701 }
4702
4703 static int __devinit qlge_probe(struct pci_dev *pdev,
4704                                 const struct pci_device_id *pci_entry)
4705 {
4706         struct net_device *ndev = NULL;
4707         struct ql_adapter *qdev = NULL;
4708         static int cards_found = 0;
4709         int err = 0;
4710
4711         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4712                         min(MAX_CPUS, (int)num_online_cpus()));
4713         if (!ndev)
4714                 return -ENOMEM;
4715
4716         err = ql_init_device(pdev, ndev, cards_found);
4717         if (err < 0) {
4718                 free_netdev(ndev);
4719                 return err;
4720         }
4721
4722         qdev = netdev_priv(ndev);
4723         SET_NETDEV_DEV(ndev, &pdev->dev);
4724         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4725                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4726                 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4727         ndev->features = ndev->hw_features |
4728                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4729
4730         if (test_bit(QL_DMA64, &qdev->flags))
4731                 ndev->features |= NETIF_F_HIGHDMA;
4732
4733         /*
4734          * Set up net_device structure.
4735          */
4736         ndev->tx_queue_len = qdev->tx_ring_size;
4737         ndev->irq = pdev->irq;
4738
4739         ndev->netdev_ops = &qlge_netdev_ops;
4740         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4741         ndev->watchdog_timeo = 10 * HZ;
4742
4743         err = register_netdev(ndev);
4744         if (err) {
4745                 dev_err(&pdev->dev, "net device registration failed.\n");
4746                 ql_release_all(pdev);
4747                 pci_disable_device(pdev);
4748                 return err;
4749         }
4750         /* Start up the timer to trigger EEH if
4751          * the bus goes dead
4752          */
4753         init_timer_deferrable(&qdev->timer);
4754         qdev->timer.data = (unsigned long)qdev;
4755         qdev->timer.function = ql_timer;
4756         qdev->timer.expires = jiffies + (5*HZ);
4757         add_timer(&qdev->timer);
4758         ql_link_off(qdev);
4759         ql_display_dev_info(ndev);
4760         atomic_set(&qdev->lb_count, 0);
4761         cards_found++;
4762         return 0;
4763 }
4764
4765 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4766 {
4767         return qlge_send(skb, ndev);
4768 }
4769
4770 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4771 {
4772         return ql_clean_inbound_rx_ring(rx_ring, budget);
4773 }
4774
4775 static void __devexit qlge_remove(struct pci_dev *pdev)
4776 {
4777         struct net_device *ndev = pci_get_drvdata(pdev);
4778         struct ql_adapter *qdev = netdev_priv(ndev);
4779         del_timer_sync(&qdev->timer);
4780         ql_cancel_all_work_sync(qdev);
4781         unregister_netdev(ndev);
4782         ql_release_all(pdev);
4783         pci_disable_device(pdev);
4784         free_netdev(ndev);
4785 }
4786
4787 /* Clean up resources without touching hardware. */
4788 static void ql_eeh_close(struct net_device *ndev)
4789 {
4790         int i;
4791         struct ql_adapter *qdev = netdev_priv(ndev);
4792
4793         if (netif_carrier_ok(ndev)) {
4794                 netif_carrier_off(ndev);
4795                 netif_stop_queue(ndev);
4796         }
4797
4798         /* Disabling the timer */
4799         del_timer_sync(&qdev->timer);
4800         ql_cancel_all_work_sync(qdev);
4801
4802         for (i = 0; i < qdev->rss_ring_count; i++)
4803                 netif_napi_del(&qdev->rx_ring[i].napi);
4804
4805         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4806         ql_tx_ring_clean(qdev);
4807         ql_free_rx_buffers(qdev);
4808         ql_release_adapter_resources(qdev);
4809 }
4810
4811 /*
4812  * This callback is called by the PCI subsystem whenever
4813  * a PCI bus error is detected.
4814  */
4815 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4816                                                enum pci_channel_state state)
4817 {
4818         struct net_device *ndev = pci_get_drvdata(pdev);
4819         struct ql_adapter *qdev = netdev_priv(ndev);
4820
4821         switch (state) {
4822         case pci_channel_io_normal:
4823                 return PCI_ERS_RESULT_CAN_RECOVER;
4824         case pci_channel_io_frozen:
4825                 netif_device_detach(ndev);
4826                 if (netif_running(ndev))
4827                         ql_eeh_close(ndev);
4828                 pci_disable_device(pdev);
4829                 return PCI_ERS_RESULT_NEED_RESET;
4830         case pci_channel_io_perm_failure:
4831                 dev_err(&pdev->dev,
4832                         "%s: pci_channel_io_perm_failure.\n", __func__);
4833                 ql_eeh_close(ndev);
4834                 set_bit(QL_EEH_FATAL, &qdev->flags);
4835                 return PCI_ERS_RESULT_DISCONNECT;
4836         }
4837
4838         /* Request a slot reset. */
4839         return PCI_ERS_RESULT_NEED_RESET;
4840 }
4841
4842 /*
4843  * This callback is called after the PCI buss has been reset.
4844  * Basically, this tries to restart the card from scratch.
4845  * This is a shortened version of the device probe/discovery code,
4846  * it resembles the first-half of the () routine.
4847  */
4848 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4849 {
4850         struct net_device *ndev = pci_get_drvdata(pdev);
4851         struct ql_adapter *qdev = netdev_priv(ndev);
4852
4853         pdev->error_state = pci_channel_io_normal;
4854
4855         pci_restore_state(pdev);
4856         if (pci_enable_device(pdev)) {
4857                 netif_err(qdev, ifup, qdev->ndev,
4858                           "Cannot re-enable PCI device after reset.\n");
4859                 return PCI_ERS_RESULT_DISCONNECT;
4860         }
4861         pci_set_master(pdev);
4862
4863         if (ql_adapter_reset(qdev)) {
4864                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4865                 set_bit(QL_EEH_FATAL, &qdev->flags);
4866                 return PCI_ERS_RESULT_DISCONNECT;
4867         }
4868
4869         return PCI_ERS_RESULT_RECOVERED;
4870 }
4871
4872 static void qlge_io_resume(struct pci_dev *pdev)
4873 {
4874         struct net_device *ndev = pci_get_drvdata(pdev);
4875         struct ql_adapter *qdev = netdev_priv(ndev);
4876         int err = 0;
4877
4878         if (netif_running(ndev)) {
4879                 err = qlge_open(ndev);
4880                 if (err) {
4881                         netif_err(qdev, ifup, qdev->ndev,
4882                                   "Device initialization failed after reset.\n");
4883                         return;
4884                 }
4885         } else {
4886                 netif_err(qdev, ifup, qdev->ndev,
4887                           "Device was not running prior to EEH.\n");
4888         }
4889         mod_timer(&qdev->timer, jiffies + (5*HZ));
4890         netif_device_attach(ndev);
4891 }
4892
4893 static struct pci_error_handlers qlge_err_handler = {
4894         .error_detected = qlge_io_error_detected,
4895         .slot_reset = qlge_io_slot_reset,
4896         .resume = qlge_io_resume,
4897 };
4898
4899 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4900 {
4901         struct net_device *ndev = pci_get_drvdata(pdev);
4902         struct ql_adapter *qdev = netdev_priv(ndev);
4903         int err;
4904
4905         netif_device_detach(ndev);
4906         del_timer_sync(&qdev->timer);
4907
4908         if (netif_running(ndev)) {
4909                 err = ql_adapter_down(qdev);
4910                 if (!err)
4911                         return err;
4912         }
4913
4914         ql_wol(qdev);
4915         err = pci_save_state(pdev);
4916         if (err)
4917                 return err;
4918
4919         pci_disable_device(pdev);
4920
4921         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4922
4923         return 0;
4924 }
4925
4926 #ifdef CONFIG_PM
4927 static int qlge_resume(struct pci_dev *pdev)
4928 {
4929         struct net_device *ndev = pci_get_drvdata(pdev);
4930         struct ql_adapter *qdev = netdev_priv(ndev);
4931         int err;
4932
4933         pci_set_power_state(pdev, PCI_D0);
4934         pci_restore_state(pdev);
4935         err = pci_enable_device(pdev);
4936         if (err) {
4937                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4938                 return err;
4939         }
4940         pci_set_master(pdev);
4941
4942         pci_enable_wake(pdev, PCI_D3hot, 0);
4943         pci_enable_wake(pdev, PCI_D3cold, 0);
4944
4945         if (netif_running(ndev)) {
4946                 err = ql_adapter_up(qdev);
4947                 if (err)
4948                         return err;
4949         }
4950
4951         mod_timer(&qdev->timer, jiffies + (5*HZ));
4952         netif_device_attach(ndev);
4953
4954         return 0;
4955 }
4956 #endif /* CONFIG_PM */
4957
4958 static void qlge_shutdown(struct pci_dev *pdev)
4959 {
4960         qlge_suspend(pdev, PMSG_SUSPEND);
4961 }
4962
4963 static struct pci_driver qlge_driver = {
4964         .name = DRV_NAME,
4965         .id_table = qlge_pci_tbl,
4966         .probe = qlge_probe,
4967         .remove = __devexit_p(qlge_remove),
4968 #ifdef CONFIG_PM
4969         .suspend = qlge_suspend,
4970         .resume = qlge_resume,
4971 #endif
4972         .shutdown = qlge_shutdown,
4973         .err_handler = &qlge_err_handler
4974 };
4975
4976 static int __init qlge_init_module(void)
4977 {
4978         return pci_register_driver(&qlge_driver);
4979 }
4980
4981 static void __exit qlge_exit(void)
4982 {
4983         pci_unregister_driver(&qlge_driver);
4984 }
4985
4986 module_init(qlge_init_module);
4987 module_exit(qlge_exit);