Merge branch 'stable/xen-swiotlb.bugfix' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |    */
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61 /*  NETIF_MSG_TX_QUEUED | */
62 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66 static int debug = -1;  /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80                 "Option to enable MPI firmware dump. "
81                 "Default is OFF - Do Not allocate memory. ");
82
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86                 "Option to allow force of firmware core dump. "
87                 "Default is OFF - Do not allow.");
88
89 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92         /* required last entry */
93         {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
98 static int ql_wol(struct ql_adapter *qdev);
99 static void qlge_set_multicast_list(struct net_device *ndev);
100
101 /* This hardware semaphore causes exclusive access to
102  * resources shared between the NIC driver, MPI firmware,
103  * FCOE firmware and the FC driver.
104  */
105 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106 {
107         u32 sem_bits = 0;
108
109         switch (sem_mask) {
110         case SEM_XGMAC0_MASK:
111                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
112                 break;
113         case SEM_XGMAC1_MASK:
114                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
115                 break;
116         case SEM_ICB_MASK:
117                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
118                 break;
119         case SEM_MAC_ADDR_MASK:
120                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
121                 break;
122         case SEM_FLASH_MASK:
123                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
124                 break;
125         case SEM_PROBE_MASK:
126                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
127                 break;
128         case SEM_RT_IDX_MASK:
129                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
130                 break;
131         case SEM_PROC_REG_MASK:
132                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
133                 break;
134         default:
135                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
136                 return -EINVAL;
137         }
138
139         ql_write32(qdev, SEM, sem_bits | sem_mask);
140         return !(ql_read32(qdev, SEM) & sem_bits);
141 }
142
143 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
144 {
145         unsigned int wait_count = 30;
146         do {
147                 if (!ql_sem_trylock(qdev, sem_mask))
148                         return 0;
149                 udelay(100);
150         } while (--wait_count);
151         return -ETIMEDOUT;
152 }
153
154 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
155 {
156         ql_write32(qdev, SEM, sem_mask);
157         ql_read32(qdev, SEM);   /* flush */
158 }
159
160 /* This function waits for a specific bit to come ready
161  * in a given register.  It is used mostly by the initialize
162  * process, but is also used in kernel thread API such as
163  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
164  */
165 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
166 {
167         u32 temp;
168         int count = UDELAY_COUNT;
169
170         while (count) {
171                 temp = ql_read32(qdev, reg);
172
173                 /* check for errors */
174                 if (temp & err_bit) {
175                         netif_alert(qdev, probe, qdev->ndev,
176                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
177                                     reg, temp);
178                         return -EIO;
179                 } else if (temp & bit)
180                         return 0;
181                 udelay(UDELAY_DELAY);
182                 count--;
183         }
184         netif_alert(qdev, probe, qdev->ndev,
185                     "Timed out waiting for reg %x to come ready.\n", reg);
186         return -ETIMEDOUT;
187 }
188
189 /* The CFG register is used to download TX and RX control blocks
190  * to the chip. This function waits for an operation to complete.
191  */
192 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
193 {
194         int count = UDELAY_COUNT;
195         u32 temp;
196
197         while (count) {
198                 temp = ql_read32(qdev, CFG);
199                 if (temp & CFG_LE)
200                         return -EIO;
201                 if (!(temp & bit))
202                         return 0;
203                 udelay(UDELAY_DELAY);
204                 count--;
205         }
206         return -ETIMEDOUT;
207 }
208
209
210 /* Used to issue init control blocks to hw. Maps control block,
211  * sets address, triggers download, waits for completion.
212  */
213 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
214                  u16 q_id)
215 {
216         u64 map;
217         int status = 0;
218         int direction;
219         u32 mask;
220         u32 value;
221
222         direction =
223             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
224             PCI_DMA_FROMDEVICE;
225
226         map = pci_map_single(qdev->pdev, ptr, size, direction);
227         if (pci_dma_mapping_error(qdev->pdev, map)) {
228                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
229                 return -ENOMEM;
230         }
231
232         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
233         if (status)
234                 return status;
235
236         status = ql_wait_cfg(qdev, bit);
237         if (status) {
238                 netif_err(qdev, ifup, qdev->ndev,
239                           "Timed out waiting for CFG to come ready.\n");
240                 goto exit;
241         }
242
243         ql_write32(qdev, ICB_L, (u32) map);
244         ql_write32(qdev, ICB_H, (u32) (map >> 32));
245
246         mask = CFG_Q_MASK | (bit << 16);
247         value = bit | (q_id << CFG_Q_SHIFT);
248         ql_write32(qdev, CFG, (mask | value));
249
250         /*
251          * Wait for the bit to clear after signaling hw.
252          */
253         status = ql_wait_cfg(qdev, bit);
254 exit:
255         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
256         pci_unmap_single(qdev->pdev, map, size, direction);
257         return status;
258 }
259
260 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
261 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
262                         u32 *value)
263 {
264         u32 offset = 0;
265         int status;
266
267         switch (type) {
268         case MAC_ADDR_TYPE_MULTI_MAC:
269         case MAC_ADDR_TYPE_CAM_MAC:
270                 {
271                         status =
272                             ql_wait_reg_rdy(qdev,
273                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
274                         if (status)
275                                 goto exit;
276                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
278                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279                         status =
280                             ql_wait_reg_rdy(qdev,
281                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
282                         if (status)
283                                 goto exit;
284                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285                         status =
286                             ql_wait_reg_rdy(qdev,
287                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
288                         if (status)
289                                 goto exit;
290                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
291                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
292                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293                         status =
294                             ql_wait_reg_rdy(qdev,
295                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
296                         if (status)
297                                 goto exit;
298                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
299                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
300                                 status =
301                                     ql_wait_reg_rdy(qdev,
302                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
303                                 if (status)
304                                         goto exit;
305                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
306                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
307                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
308                                 status =
309                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
310                                                     MAC_ADDR_MR, 0);
311                                 if (status)
312                                         goto exit;
313                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
314                         }
315                         break;
316                 }
317         case MAC_ADDR_TYPE_VLAN:
318         case MAC_ADDR_TYPE_MULTI_FLTR:
319         default:
320                 netif_crit(qdev, ifup, qdev->ndev,
321                            "Address type %d not yet supported.\n", type);
322                 status = -EPERM;
323         }
324 exit:
325         return status;
326 }
327
328 /* Set up a MAC, multicast or VLAN address for the
329  * inbound frame matching.
330  */
331 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
332                                u16 index)
333 {
334         u32 offset = 0;
335         int status = 0;
336
337         switch (type) {
338         case MAC_ADDR_TYPE_MULTI_MAC:
339                 {
340                         u32 upper = (addr[0] << 8) | addr[1];
341                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
342                                         (addr[4] << 8) | (addr[5]);
343
344                         status =
345                                 ql_wait_reg_rdy(qdev,
346                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
347                         if (status)
348                                 goto exit;
349                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
350                                 (index << MAC_ADDR_IDX_SHIFT) |
351                                 type | MAC_ADDR_E);
352                         ql_write32(qdev, MAC_ADDR_DATA, lower);
353                         status =
354                                 ql_wait_reg_rdy(qdev,
355                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
356                         if (status)
357                                 goto exit;
358                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
359                                 (index << MAC_ADDR_IDX_SHIFT) |
360                                 type | MAC_ADDR_E);
361
362                         ql_write32(qdev, MAC_ADDR_DATA, upper);
363                         status =
364                                 ql_wait_reg_rdy(qdev,
365                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366                         if (status)
367                                 goto exit;
368                         break;
369                 }
370         case MAC_ADDR_TYPE_CAM_MAC:
371                 {
372                         u32 cam_output;
373                         u32 upper = (addr[0] << 8) | addr[1];
374                         u32 lower =
375                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
376                             (addr[5]);
377
378                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
379                                      "Adding %s address %pM at index %d in the CAM.\n",
380                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
381                                      "MULTICAST" : "UNICAST",
382                                      addr, index);
383
384                         status =
385                             ql_wait_reg_rdy(qdev,
386                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
387                         if (status)
388                                 goto exit;
389                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
390                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
391                                    type);       /* type */
392                         ql_write32(qdev, MAC_ADDR_DATA, lower);
393                         status =
394                             ql_wait_reg_rdy(qdev,
395                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
396                         if (status)
397                                 goto exit;
398                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
399                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
400                                    type);       /* type */
401                         ql_write32(qdev, MAC_ADDR_DATA, upper);
402                         status =
403                             ql_wait_reg_rdy(qdev,
404                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
405                         if (status)
406                                 goto exit;
407                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
408                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
409                                    type);       /* type */
410                         /* This field should also include the queue id
411                            and possibly the function id.  Right now we hardcode
412                            the route field to NIC core.
413                          */
414                         cam_output = (CAM_OUT_ROUTE_NIC |
415                                       (qdev->
416                                        func << CAM_OUT_FUNC_SHIFT) |
417                                         (0 << CAM_OUT_CQ_ID_SHIFT));
418                         if (qdev->vlgrp)
419                                 cam_output |= CAM_OUT_RV;
420                         /* route to NIC core */
421                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
422                         break;
423                 }
424         case MAC_ADDR_TYPE_VLAN:
425                 {
426                         u32 enable_bit = *((u32 *) &addr[0]);
427                         /* For VLAN, the addr actually holds a bit that
428                          * either enables or disables the vlan id we are
429                          * addressing. It's either MAC_ADDR_E on or off.
430                          * That's bit-27 we're talking about.
431                          */
432                         netif_info(qdev, ifup, qdev->ndev,
433                                    "%s VLAN ID %d %s the CAM.\n",
434                                    enable_bit ? "Adding" : "Removing",
435                                    index,
436                                    enable_bit ? "to" : "from");
437
438                         status =
439                             ql_wait_reg_rdy(qdev,
440                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
441                         if (status)
442                                 goto exit;
443                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
444                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
445                                    type |       /* type */
446                                    enable_bit); /* enable/disable */
447                         break;
448                 }
449         case MAC_ADDR_TYPE_MULTI_FLTR:
450         default:
451                 netif_crit(qdev, ifup, qdev->ndev,
452                            "Address type %d not yet supported.\n", type);
453                 status = -EPERM;
454         }
455 exit:
456         return status;
457 }
458
459 /* Set or clear MAC address in hardware. We sometimes
460  * have to clear it to prevent wrong frame routing
461  * especially in a bonding environment.
462  */
463 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
464 {
465         int status;
466         char zero_mac_addr[ETH_ALEN];
467         char *addr;
468
469         if (set) {
470                 addr = &qdev->current_mac_addr[0];
471                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
472                              "Set Mac addr %pM\n", addr);
473         } else {
474                 memset(zero_mac_addr, 0, ETH_ALEN);
475                 addr = &zero_mac_addr[0];
476                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
477                              "Clearing MAC address\n");
478         }
479         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
480         if (status)
481                 return status;
482         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
483                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
484         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
485         if (status)
486                 netif_err(qdev, ifup, qdev->ndev,
487                           "Failed to init mac address.\n");
488         return status;
489 }
490
491 void ql_link_on(struct ql_adapter *qdev)
492 {
493         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
494         netif_carrier_on(qdev->ndev);
495         ql_set_mac_addr(qdev, 1);
496 }
497
498 void ql_link_off(struct ql_adapter *qdev)
499 {
500         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
501         netif_carrier_off(qdev->ndev);
502         ql_set_mac_addr(qdev, 0);
503 }
504
505 /* Get a specific frame routing value from the CAM.
506  * Used for debug and reg dump.
507  */
508 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
509 {
510         int status = 0;
511
512         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
513         if (status)
514                 goto exit;
515
516         ql_write32(qdev, RT_IDX,
517                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
518         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
519         if (status)
520                 goto exit;
521         *value = ql_read32(qdev, RT_DATA);
522 exit:
523         return status;
524 }
525
526 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
527  * to route different frame types to various inbound queues.  We send broadcast/
528  * multicast/error frames to the default queue for slow handling,
529  * and CAM hit/RSS frames to the fast handling queues.
530  */
531 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
532                               int enable)
533 {
534         int status = -EINVAL; /* Return error if no mask match. */
535         u32 value = 0;
536
537         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
538                      "%s %s mask %s the routing reg.\n",
539                      enable ? "Adding" : "Removing",
540                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
541                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
542                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
543                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
544                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
545                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
546                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
547                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
548                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
549                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
550                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
551                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
552                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
553                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
554                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
555                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
556                      "(Bad index != RT_IDX)",
557                      enable ? "to" : "from");
558
559         switch (mask) {
560         case RT_IDX_CAM_HIT:
561                 {
562                         value = RT_IDX_DST_CAM_Q |      /* dest */
563                             RT_IDX_TYPE_NICQ |  /* type */
564                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
565                         break;
566                 }
567         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
568                 {
569                         value = RT_IDX_DST_DFLT_Q |     /* dest */
570                             RT_IDX_TYPE_NICQ |  /* type */
571                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
572                         break;
573                 }
574         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
575                 {
576                         value = RT_IDX_DST_DFLT_Q |     /* dest */
577                             RT_IDX_TYPE_NICQ |  /* type */
578                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
579                         break;
580                 }
581         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
582                 {
583                         value = RT_IDX_DST_DFLT_Q | /* dest */
584                                 RT_IDX_TYPE_NICQ | /* type */
585                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
586                                 RT_IDX_IDX_SHIFT); /* index */
587                         break;
588                 }
589         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
590                 {
591                         value = RT_IDX_DST_DFLT_Q | /* dest */
592                                 RT_IDX_TYPE_NICQ | /* type */
593                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
594                                 RT_IDX_IDX_SHIFT); /* index */
595                         break;
596                 }
597         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
598                 {
599                         value = RT_IDX_DST_DFLT_Q |     /* dest */
600                             RT_IDX_TYPE_NICQ |  /* type */
601                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
602                         break;
603                 }
604         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
605                 {
606                         value = RT_IDX_DST_DFLT_Q |     /* dest */
607                             RT_IDX_TYPE_NICQ |  /* type */
608                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
609                         break;
610                 }
611         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
612                 {
613                         value = RT_IDX_DST_DFLT_Q |     /* dest */
614                             RT_IDX_TYPE_NICQ |  /* type */
615                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
616                         break;
617                 }
618         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
619                 {
620                         value = RT_IDX_DST_RSS |        /* dest */
621                             RT_IDX_TYPE_NICQ |  /* type */
622                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
623                         break;
624                 }
625         case 0:         /* Clear the E-bit on an entry. */
626                 {
627                         value = RT_IDX_DST_DFLT_Q |     /* dest */
628                             RT_IDX_TYPE_NICQ |  /* type */
629                             (index << RT_IDX_IDX_SHIFT);/* index */
630                         break;
631                 }
632         default:
633                 netif_err(qdev, ifup, qdev->ndev,
634                           "Mask type %d not yet supported.\n", mask);
635                 status = -EPERM;
636                 goto exit;
637         }
638
639         if (value) {
640                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
641                 if (status)
642                         goto exit;
643                 value |= (enable ? RT_IDX_E : 0);
644                 ql_write32(qdev, RT_IDX, value);
645                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
646         }
647 exit:
648         return status;
649 }
650
651 static void ql_enable_interrupts(struct ql_adapter *qdev)
652 {
653         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
654 }
655
656 static void ql_disable_interrupts(struct ql_adapter *qdev)
657 {
658         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
659 }
660
661 /* If we're running with multiple MSI-X vectors then we enable on the fly.
662  * Otherwise, we may have multiple outstanding workers and don't want to
663  * enable until the last one finishes. In this case, the irq_cnt gets
664  * incremented every time we queue a worker and decremented every time
665  * a worker finishes.  Once it hits zero we enable the interrupt.
666  */
667 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
668 {
669         u32 var = 0;
670         unsigned long hw_flags = 0;
671         struct intr_context *ctx = qdev->intr_context + intr;
672
673         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
674                 /* Always enable if we're MSIX multi interrupts and
675                  * it's not the default (zeroeth) interrupt.
676                  */
677                 ql_write32(qdev, INTR_EN,
678                            ctx->intr_en_mask);
679                 var = ql_read32(qdev, STS);
680                 return var;
681         }
682
683         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
684         if (atomic_dec_and_test(&ctx->irq_cnt)) {
685                 ql_write32(qdev, INTR_EN,
686                            ctx->intr_en_mask);
687                 var = ql_read32(qdev, STS);
688         }
689         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
690         return var;
691 }
692
693 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
694 {
695         u32 var = 0;
696         struct intr_context *ctx;
697
698         /* HW disables for us if we're MSIX multi interrupts and
699          * it's not the default (zeroeth) interrupt.
700          */
701         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
702                 return 0;
703
704         ctx = qdev->intr_context + intr;
705         spin_lock(&qdev->hw_lock);
706         if (!atomic_read(&ctx->irq_cnt)) {
707                 ql_write32(qdev, INTR_EN,
708                 ctx->intr_dis_mask);
709                 var = ql_read32(qdev, STS);
710         }
711         atomic_inc(&ctx->irq_cnt);
712         spin_unlock(&qdev->hw_lock);
713         return var;
714 }
715
716 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
717 {
718         int i;
719         for (i = 0; i < qdev->intr_count; i++) {
720                 /* The enable call does a atomic_dec_and_test
721                  * and enables only if the result is zero.
722                  * So we precharge it here.
723                  */
724                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
725                         i == 0))
726                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
727                 ql_enable_completion_interrupt(qdev, i);
728         }
729
730 }
731
732 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
733 {
734         int status, i;
735         u16 csum = 0;
736         __le16 *flash = (__le16 *)&qdev->flash;
737
738         status = strncmp((char *)&qdev->flash, str, 4);
739         if (status) {
740                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
741                 return  status;
742         }
743
744         for (i = 0; i < size; i++)
745                 csum += le16_to_cpu(*flash++);
746
747         if (csum)
748                 netif_err(qdev, ifup, qdev->ndev,
749                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
750
751         return csum;
752 }
753
754 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
755 {
756         int status = 0;
757         /* wait for reg to come ready */
758         status = ql_wait_reg_rdy(qdev,
759                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
760         if (status)
761                 goto exit;
762         /* set up for reg read */
763         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
764         /* wait for reg to come ready */
765         status = ql_wait_reg_rdy(qdev,
766                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
767         if (status)
768                 goto exit;
769          /* This data is stored on flash as an array of
770          * __le32.  Since ql_read32() returns cpu endian
771          * we need to swap it back.
772          */
773         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
774 exit:
775         return status;
776 }
777
778 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
779 {
780         u32 i, size;
781         int status;
782         __le32 *p = (__le32 *)&qdev->flash;
783         u32 offset;
784         u8 mac_addr[6];
785
786         /* Get flash offset for function and adjust
787          * for dword access.
788          */
789         if (!qdev->port)
790                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
791         else
792                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
793
794         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
795                 return -ETIMEDOUT;
796
797         size = sizeof(struct flash_params_8000) / sizeof(u32);
798         for (i = 0; i < size; i++, p++) {
799                 status = ql_read_flash_word(qdev, i+offset, p);
800                 if (status) {
801                         netif_err(qdev, ifup, qdev->ndev,
802                                   "Error reading flash.\n");
803                         goto exit;
804                 }
805         }
806
807         status = ql_validate_flash(qdev,
808                         sizeof(struct flash_params_8000) / sizeof(u16),
809                         "8000");
810         if (status) {
811                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
812                 status = -EINVAL;
813                 goto exit;
814         }
815
816         /* Extract either manufacturer or BOFM modified
817          * MAC address.
818          */
819         if (qdev->flash.flash_params_8000.data_type1 == 2)
820                 memcpy(mac_addr,
821                         qdev->flash.flash_params_8000.mac_addr1,
822                         qdev->ndev->addr_len);
823         else
824                 memcpy(mac_addr,
825                         qdev->flash.flash_params_8000.mac_addr,
826                         qdev->ndev->addr_len);
827
828         if (!is_valid_ether_addr(mac_addr)) {
829                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
830                 status = -EINVAL;
831                 goto exit;
832         }
833
834         memcpy(qdev->ndev->dev_addr,
835                 mac_addr,
836                 qdev->ndev->addr_len);
837
838 exit:
839         ql_sem_unlock(qdev, SEM_FLASH_MASK);
840         return status;
841 }
842
843 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
844 {
845         int i;
846         int status;
847         __le32 *p = (__le32 *)&qdev->flash;
848         u32 offset = 0;
849         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
850
851         /* Second function's parameters follow the first
852          * function's.
853          */
854         if (qdev->port)
855                 offset = size;
856
857         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
858                 return -ETIMEDOUT;
859
860         for (i = 0; i < size; i++, p++) {
861                 status = ql_read_flash_word(qdev, i+offset, p);
862                 if (status) {
863                         netif_err(qdev, ifup, qdev->ndev,
864                                   "Error reading flash.\n");
865                         goto exit;
866                 }
867
868         }
869
870         status = ql_validate_flash(qdev,
871                         sizeof(struct flash_params_8012) / sizeof(u16),
872                         "8012");
873         if (status) {
874                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
875                 status = -EINVAL;
876                 goto exit;
877         }
878
879         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
880                 status = -EINVAL;
881                 goto exit;
882         }
883
884         memcpy(qdev->ndev->dev_addr,
885                 qdev->flash.flash_params_8012.mac_addr,
886                 qdev->ndev->addr_len);
887
888 exit:
889         ql_sem_unlock(qdev, SEM_FLASH_MASK);
890         return status;
891 }
892
893 /* xgmac register are located behind the xgmac_addr and xgmac_data
894  * register pair.  Each read/write requires us to wait for the ready
895  * bit before reading/writing the data.
896  */
897 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
898 {
899         int status;
900         /* wait for reg to come ready */
901         status = ql_wait_reg_rdy(qdev,
902                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
903         if (status)
904                 return status;
905         /* write the data to the data reg */
906         ql_write32(qdev, XGMAC_DATA, data);
907         /* trigger the write */
908         ql_write32(qdev, XGMAC_ADDR, reg);
909         return status;
910 }
911
912 /* xgmac register are located behind the xgmac_addr and xgmac_data
913  * register pair.  Each read/write requires us to wait for the ready
914  * bit before reading/writing the data.
915  */
916 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
917 {
918         int status = 0;
919         /* wait for reg to come ready */
920         status = ql_wait_reg_rdy(qdev,
921                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
922         if (status)
923                 goto exit;
924         /* set up for reg read */
925         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
926         /* wait for reg to come ready */
927         status = ql_wait_reg_rdy(qdev,
928                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
929         if (status)
930                 goto exit;
931         /* get the data */
932         *data = ql_read32(qdev, XGMAC_DATA);
933 exit:
934         return status;
935 }
936
937 /* This is used for reading the 64-bit statistics regs. */
938 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
939 {
940         int status = 0;
941         u32 hi = 0;
942         u32 lo = 0;
943
944         status = ql_read_xgmac_reg(qdev, reg, &lo);
945         if (status)
946                 goto exit;
947
948         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
949         if (status)
950                 goto exit;
951
952         *data = (u64) lo | ((u64) hi << 32);
953
954 exit:
955         return status;
956 }
957
958 static int ql_8000_port_initialize(struct ql_adapter *qdev)
959 {
960         int status;
961         /*
962          * Get MPI firmware version for driver banner
963          * and ethool info.
964          */
965         status = ql_mb_about_fw(qdev);
966         if (status)
967                 goto exit;
968         status = ql_mb_get_fw_state(qdev);
969         if (status)
970                 goto exit;
971         /* Wake up a worker to get/set the TX/RX frame sizes. */
972         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
973 exit:
974         return status;
975 }
976
977 /* Take the MAC Core out of reset.
978  * Enable statistics counting.
979  * Take the transmitter/receiver out of reset.
980  * This functionality may be done in the MPI firmware at a
981  * later date.
982  */
983 static int ql_8012_port_initialize(struct ql_adapter *qdev)
984 {
985         int status = 0;
986         u32 data;
987
988         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
989                 /* Another function has the semaphore, so
990                  * wait for the port init bit to come ready.
991                  */
992                 netif_info(qdev, link, qdev->ndev,
993                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
994                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
995                 if (status) {
996                         netif_crit(qdev, link, qdev->ndev,
997                                    "Port initialize timed out.\n");
998                 }
999                 return status;
1000         }
1001
1002         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1003         /* Set the core reset. */
1004         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1005         if (status)
1006                 goto end;
1007         data |= GLOBAL_CFG_RESET;
1008         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1009         if (status)
1010                 goto end;
1011
1012         /* Clear the core reset and turn on jumbo for receiver. */
1013         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1014         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1015         data |= GLOBAL_CFG_TX_STAT_EN;
1016         data |= GLOBAL_CFG_RX_STAT_EN;
1017         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1018         if (status)
1019                 goto end;
1020
1021         /* Enable transmitter, and clear it's reset. */
1022         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1023         if (status)
1024                 goto end;
1025         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1026         data |= TX_CFG_EN;      /* Enable the transmitter. */
1027         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1028         if (status)
1029                 goto end;
1030
1031         /* Enable receiver and clear it's reset. */
1032         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1033         if (status)
1034                 goto end;
1035         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1036         data |= RX_CFG_EN;      /* Enable the receiver. */
1037         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1038         if (status)
1039                 goto end;
1040
1041         /* Turn on jumbo. */
1042         status =
1043             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1044         if (status)
1045                 goto end;
1046         status =
1047             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1048         if (status)
1049                 goto end;
1050
1051         /* Signal to the world that the port is enabled.        */
1052         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1053 end:
1054         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1055         return status;
1056 }
1057
1058 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1059 {
1060         return PAGE_SIZE << qdev->lbq_buf_order;
1061 }
1062
1063 /* Get the next large buffer. */
1064 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1065 {
1066         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1067         rx_ring->lbq_curr_idx++;
1068         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1069                 rx_ring->lbq_curr_idx = 0;
1070         rx_ring->lbq_free_cnt++;
1071         return lbq_desc;
1072 }
1073
1074 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1075                 struct rx_ring *rx_ring)
1076 {
1077         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1078
1079         pci_dma_sync_single_for_cpu(qdev->pdev,
1080                                         dma_unmap_addr(lbq_desc, mapaddr),
1081                                     rx_ring->lbq_buf_size,
1082                                         PCI_DMA_FROMDEVICE);
1083
1084         /* If it's the last chunk of our master page then
1085          * we unmap it.
1086          */
1087         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1088                                         == ql_lbq_block_size(qdev))
1089                 pci_unmap_page(qdev->pdev,
1090                                 lbq_desc->p.pg_chunk.map,
1091                                 ql_lbq_block_size(qdev),
1092                                 PCI_DMA_FROMDEVICE);
1093         return lbq_desc;
1094 }
1095
1096 /* Get the next small buffer. */
1097 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1098 {
1099         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1100         rx_ring->sbq_curr_idx++;
1101         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1102                 rx_ring->sbq_curr_idx = 0;
1103         rx_ring->sbq_free_cnt++;
1104         return sbq_desc;
1105 }
1106
1107 /* Update an rx ring index. */
1108 static void ql_update_cq(struct rx_ring *rx_ring)
1109 {
1110         rx_ring->cnsmr_idx++;
1111         rx_ring->curr_entry++;
1112         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1113                 rx_ring->cnsmr_idx = 0;
1114                 rx_ring->curr_entry = rx_ring->cq_base;
1115         }
1116 }
1117
1118 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1119 {
1120         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1121 }
1122
1123 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1124                                                 struct bq_desc *lbq_desc)
1125 {
1126         if (!rx_ring->pg_chunk.page) {
1127                 u64 map;
1128                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1129                                                 GFP_ATOMIC,
1130                                                 qdev->lbq_buf_order);
1131                 if (unlikely(!rx_ring->pg_chunk.page)) {
1132                         netif_err(qdev, drv, qdev->ndev,
1133                                   "page allocation failed.\n");
1134                         return -ENOMEM;
1135                 }
1136                 rx_ring->pg_chunk.offset = 0;
1137                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1138                                         0, ql_lbq_block_size(qdev),
1139                                         PCI_DMA_FROMDEVICE);
1140                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1141                         __free_pages(rx_ring->pg_chunk.page,
1142                                         qdev->lbq_buf_order);
1143                         netif_err(qdev, drv, qdev->ndev,
1144                                   "PCI mapping failed.\n");
1145                         return -ENOMEM;
1146                 }
1147                 rx_ring->pg_chunk.map = map;
1148                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1149         }
1150
1151         /* Copy the current master pg_chunk info
1152          * to the current descriptor.
1153          */
1154         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1155
1156         /* Adjust the master page chunk for next
1157          * buffer get.
1158          */
1159         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1160         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1161                 rx_ring->pg_chunk.page = NULL;
1162                 lbq_desc->p.pg_chunk.last_flag = 1;
1163         } else {
1164                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1165                 get_page(rx_ring->pg_chunk.page);
1166                 lbq_desc->p.pg_chunk.last_flag = 0;
1167         }
1168         return 0;
1169 }
1170 /* Process (refill) a large buffer queue. */
1171 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1172 {
1173         u32 clean_idx = rx_ring->lbq_clean_idx;
1174         u32 start_idx = clean_idx;
1175         struct bq_desc *lbq_desc;
1176         u64 map;
1177         int i;
1178
1179         while (rx_ring->lbq_free_cnt > 32) {
1180                 for (i = 0; i < 16; i++) {
1181                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1182                                      "lbq: try cleaning clean_idx = %d.\n",
1183                                      clean_idx);
1184                         lbq_desc = &rx_ring->lbq[clean_idx];
1185                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1186                                 netif_err(qdev, ifup, qdev->ndev,
1187                                           "Could not get a page chunk.\n");
1188                                 return;
1189                         }
1190
1191                         map = lbq_desc->p.pg_chunk.map +
1192                                 lbq_desc->p.pg_chunk.offset;
1193                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1194                         dma_unmap_len_set(lbq_desc, maplen,
1195                                         rx_ring->lbq_buf_size);
1196                                 *lbq_desc->addr = cpu_to_le64(map);
1197
1198                         pci_dma_sync_single_for_device(qdev->pdev, map,
1199                                                 rx_ring->lbq_buf_size,
1200                                                 PCI_DMA_FROMDEVICE);
1201                         clean_idx++;
1202                         if (clean_idx == rx_ring->lbq_len)
1203                                 clean_idx = 0;
1204                 }
1205
1206                 rx_ring->lbq_clean_idx = clean_idx;
1207                 rx_ring->lbq_prod_idx += 16;
1208                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1209                         rx_ring->lbq_prod_idx = 0;
1210                 rx_ring->lbq_free_cnt -= 16;
1211         }
1212
1213         if (start_idx != clean_idx) {
1214                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1215                              "lbq: updating prod idx = %d.\n",
1216                              rx_ring->lbq_prod_idx);
1217                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1218                                 rx_ring->lbq_prod_idx_db_reg);
1219         }
1220 }
1221
1222 /* Process (refill) a small buffer queue. */
1223 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1224 {
1225         u32 clean_idx = rx_ring->sbq_clean_idx;
1226         u32 start_idx = clean_idx;
1227         struct bq_desc *sbq_desc;
1228         u64 map;
1229         int i;
1230
1231         while (rx_ring->sbq_free_cnt > 16) {
1232                 for (i = 0; i < 16; i++) {
1233                         sbq_desc = &rx_ring->sbq[clean_idx];
1234                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1235                                      "sbq: try cleaning clean_idx = %d.\n",
1236                                      clean_idx);
1237                         if (sbq_desc->p.skb == NULL) {
1238                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1239                                              qdev->ndev,
1240                                              "sbq: getting new skb for index %d.\n",
1241                                              sbq_desc->index);
1242                                 sbq_desc->p.skb =
1243                                     netdev_alloc_skb(qdev->ndev,
1244                                                      SMALL_BUFFER_SIZE);
1245                                 if (sbq_desc->p.skb == NULL) {
1246                                         netif_err(qdev, probe, qdev->ndev,
1247                                                   "Couldn't get an skb.\n");
1248                                         rx_ring->sbq_clean_idx = clean_idx;
1249                                         return;
1250                                 }
1251                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1252                                 map = pci_map_single(qdev->pdev,
1253                                                      sbq_desc->p.skb->data,
1254                                                      rx_ring->sbq_buf_size,
1255                                                      PCI_DMA_FROMDEVICE);
1256                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1257                                         netif_err(qdev, ifup, qdev->ndev,
1258                                                   "PCI mapping failed.\n");
1259                                         rx_ring->sbq_clean_idx = clean_idx;
1260                                         dev_kfree_skb_any(sbq_desc->p.skb);
1261                                         sbq_desc->p.skb = NULL;
1262                                         return;
1263                                 }
1264                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1265                                 dma_unmap_len_set(sbq_desc, maplen,
1266                                                   rx_ring->sbq_buf_size);
1267                                 *sbq_desc->addr = cpu_to_le64(map);
1268                         }
1269
1270                         clean_idx++;
1271                         if (clean_idx == rx_ring->sbq_len)
1272                                 clean_idx = 0;
1273                 }
1274                 rx_ring->sbq_clean_idx = clean_idx;
1275                 rx_ring->sbq_prod_idx += 16;
1276                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1277                         rx_ring->sbq_prod_idx = 0;
1278                 rx_ring->sbq_free_cnt -= 16;
1279         }
1280
1281         if (start_idx != clean_idx) {
1282                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1283                              "sbq: updating prod idx = %d.\n",
1284                              rx_ring->sbq_prod_idx);
1285                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1286                                 rx_ring->sbq_prod_idx_db_reg);
1287         }
1288 }
1289
1290 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1291                                     struct rx_ring *rx_ring)
1292 {
1293         ql_update_sbq(qdev, rx_ring);
1294         ql_update_lbq(qdev, rx_ring);
1295 }
1296
1297 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1298  * fails at some stage, or from the interrupt when a tx completes.
1299  */
1300 static void ql_unmap_send(struct ql_adapter *qdev,
1301                           struct tx_ring_desc *tx_ring_desc, int mapped)
1302 {
1303         int i;
1304         for (i = 0; i < mapped; i++) {
1305                 if (i == 0 || (i == 7 && mapped > 7)) {
1306                         /*
1307                          * Unmap the skb->data area, or the
1308                          * external sglist (AKA the Outbound
1309                          * Address List (OAL)).
1310                          * If its the zeroeth element, then it's
1311                          * the skb->data area.  If it's the 7th
1312                          * element and there is more than 6 frags,
1313                          * then its an OAL.
1314                          */
1315                         if (i == 7) {
1316                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1317                                              qdev->ndev,
1318                                              "unmapping OAL area.\n");
1319                         }
1320                         pci_unmap_single(qdev->pdev,
1321                                          dma_unmap_addr(&tx_ring_desc->map[i],
1322                                                         mapaddr),
1323                                          dma_unmap_len(&tx_ring_desc->map[i],
1324                                                        maplen),
1325                                          PCI_DMA_TODEVICE);
1326                 } else {
1327                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1328                                      "unmapping frag %d.\n", i);
1329                         pci_unmap_page(qdev->pdev,
1330                                        dma_unmap_addr(&tx_ring_desc->map[i],
1331                                                       mapaddr),
1332                                        dma_unmap_len(&tx_ring_desc->map[i],
1333                                                      maplen), PCI_DMA_TODEVICE);
1334                 }
1335         }
1336
1337 }
1338
1339 /* Map the buffers for this transmit.  This will return
1340  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1341  */
1342 static int ql_map_send(struct ql_adapter *qdev,
1343                        struct ob_mac_iocb_req *mac_iocb_ptr,
1344                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1345 {
1346         int len = skb_headlen(skb);
1347         dma_addr_t map;
1348         int frag_idx, err, map_idx = 0;
1349         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1350         int frag_cnt = skb_shinfo(skb)->nr_frags;
1351
1352         if (frag_cnt) {
1353                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1354                              "frag_cnt = %d.\n", frag_cnt);
1355         }
1356         /*
1357          * Map the skb buffer first.
1358          */
1359         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1360
1361         err = pci_dma_mapping_error(qdev->pdev, map);
1362         if (err) {
1363                 netif_err(qdev, tx_queued, qdev->ndev,
1364                           "PCI mapping failed with error: %d\n", err);
1365
1366                 return NETDEV_TX_BUSY;
1367         }
1368
1369         tbd->len = cpu_to_le32(len);
1370         tbd->addr = cpu_to_le64(map);
1371         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1372         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1373         map_idx++;
1374
1375         /*
1376          * This loop fills the remainder of the 8 address descriptors
1377          * in the IOCB.  If there are more than 7 fragments, then the
1378          * eighth address desc will point to an external list (OAL).
1379          * When this happens, the remainder of the frags will be stored
1380          * in this list.
1381          */
1382         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1383                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1384                 tbd++;
1385                 if (frag_idx == 6 && frag_cnt > 7) {
1386                         /* Let's tack on an sglist.
1387                          * Our control block will now
1388                          * look like this:
1389                          * iocb->seg[0] = skb->data
1390                          * iocb->seg[1] = frag[0]
1391                          * iocb->seg[2] = frag[1]
1392                          * iocb->seg[3] = frag[2]
1393                          * iocb->seg[4] = frag[3]
1394                          * iocb->seg[5] = frag[4]
1395                          * iocb->seg[6] = frag[5]
1396                          * iocb->seg[7] = ptr to OAL (external sglist)
1397                          * oal->seg[0] = frag[6]
1398                          * oal->seg[1] = frag[7]
1399                          * oal->seg[2] = frag[8]
1400                          * oal->seg[3] = frag[9]
1401                          * oal->seg[4] = frag[10]
1402                          *      etc...
1403                          */
1404                         /* Tack on the OAL in the eighth segment of IOCB. */
1405                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1406                                              sizeof(struct oal),
1407                                              PCI_DMA_TODEVICE);
1408                         err = pci_dma_mapping_error(qdev->pdev, map);
1409                         if (err) {
1410                                 netif_err(qdev, tx_queued, qdev->ndev,
1411                                           "PCI mapping outbound address list with error: %d\n",
1412                                           err);
1413                                 goto map_error;
1414                         }
1415
1416                         tbd->addr = cpu_to_le64(map);
1417                         /*
1418                          * The length is the number of fragments
1419                          * that remain to be mapped times the length
1420                          * of our sglist (OAL).
1421                          */
1422                         tbd->len =
1423                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1424                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1425                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1426                                            map);
1427                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1428                                           sizeof(struct oal));
1429                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1430                         map_idx++;
1431                 }
1432
1433                 map =
1434                     pci_map_page(qdev->pdev, frag->page,
1435                                  frag->page_offset, frag->size,
1436                                  PCI_DMA_TODEVICE);
1437
1438                 err = pci_dma_mapping_error(qdev->pdev, map);
1439                 if (err) {
1440                         netif_err(qdev, tx_queued, qdev->ndev,
1441                                   "PCI mapping frags failed with error: %d.\n",
1442                                   err);
1443                         goto map_error;
1444                 }
1445
1446                 tbd->addr = cpu_to_le64(map);
1447                 tbd->len = cpu_to_le32(frag->size);
1448                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1449                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1450                                   frag->size);
1451
1452         }
1453         /* Save the number of segments we've mapped. */
1454         tx_ring_desc->map_cnt = map_idx;
1455         /* Terminate the last segment. */
1456         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1457         return NETDEV_TX_OK;
1458
1459 map_error:
1460         /*
1461          * If the first frag mapping failed, then i will be zero.
1462          * This causes the unmap of the skb->data area.  Otherwise
1463          * we pass in the number of frags that mapped successfully
1464          * so they can be umapped.
1465          */
1466         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1467         return NETDEV_TX_BUSY;
1468 }
1469
1470 /* Process an inbound completion from an rx ring. */
1471 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1472                                         struct rx_ring *rx_ring,
1473                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1474                                         u32 length,
1475                                         u16 vlan_id)
1476 {
1477         struct sk_buff *skb;
1478         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1479         struct skb_frag_struct *rx_frag;
1480         int nr_frags;
1481         struct napi_struct *napi = &rx_ring->napi;
1482
1483         napi->dev = qdev->ndev;
1484
1485         skb = napi_get_frags(napi);
1486         if (!skb) {
1487                 netif_err(qdev, drv, qdev->ndev,
1488                           "Couldn't get an skb, exiting.\n");
1489                 rx_ring->rx_dropped++;
1490                 put_page(lbq_desc->p.pg_chunk.page);
1491                 return;
1492         }
1493         prefetch(lbq_desc->p.pg_chunk.va);
1494         rx_frag = skb_shinfo(skb)->frags;
1495         nr_frags = skb_shinfo(skb)->nr_frags;
1496         rx_frag += nr_frags;
1497         rx_frag->page = lbq_desc->p.pg_chunk.page;
1498         rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1499         rx_frag->size = length;
1500
1501         skb->len += length;
1502         skb->data_len += length;
1503         skb->truesize += length;
1504         skb_shinfo(skb)->nr_frags++;
1505
1506         rx_ring->rx_packets++;
1507         rx_ring->rx_bytes += length;
1508         skb->ip_summed = CHECKSUM_UNNECESSARY;
1509         skb_record_rx_queue(skb, rx_ring->cq_id);
1510         if (qdev->vlgrp && (vlan_id != 0xffff))
1511                 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1512         else
1513                 napi_gro_frags(napi);
1514 }
1515
1516 /* Process an inbound completion from an rx ring. */
1517 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1518                                         struct rx_ring *rx_ring,
1519                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1520                                         u32 length,
1521                                         u16 vlan_id)
1522 {
1523         struct net_device *ndev = qdev->ndev;
1524         struct sk_buff *skb = NULL;
1525         void *addr;
1526         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1527         struct napi_struct *napi = &rx_ring->napi;
1528
1529         skb = netdev_alloc_skb(ndev, length);
1530         if (!skb) {
1531                 netif_err(qdev, drv, qdev->ndev,
1532                           "Couldn't get an skb, need to unwind!.\n");
1533                 rx_ring->rx_dropped++;
1534                 put_page(lbq_desc->p.pg_chunk.page);
1535                 return;
1536         }
1537
1538         addr = lbq_desc->p.pg_chunk.va;
1539         prefetch(addr);
1540
1541
1542         /* Frame error, so drop the packet. */
1543         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1544                 netif_info(qdev, drv, qdev->ndev,
1545                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1546                 rx_ring->rx_errors++;
1547                 goto err_out;
1548         }
1549
1550         /* The max framesize filter on this chip is set higher than
1551          * MTU since FCoE uses 2k frames.
1552          */
1553         if (skb->len > ndev->mtu + ETH_HLEN) {
1554                 netif_err(qdev, drv, qdev->ndev,
1555                           "Segment too small, dropping.\n");
1556                 rx_ring->rx_dropped++;
1557                 goto err_out;
1558         }
1559         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1560         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1561                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562                      length);
1563         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1564                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565                                 length-ETH_HLEN);
1566         skb->len += length-ETH_HLEN;
1567         skb->data_len += length-ETH_HLEN;
1568         skb->truesize += length-ETH_HLEN;
1569
1570         rx_ring->rx_packets++;
1571         rx_ring->rx_bytes += skb->len;
1572         skb->protocol = eth_type_trans(skb, ndev);
1573         skb_checksum_none_assert(skb);
1574
1575         if ((ndev->features & NETIF_F_RXCSUM) &&
1576                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577                 /* TCP frame. */
1578                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1579                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1580                                      "TCP checksum done!\n");
1581                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1582                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1583                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1584                         /* Unfragmented ipv4 UDP frame. */
1585                         struct iphdr *iph = (struct iphdr *) skb->data;
1586                         if (!(iph->frag_off &
1587                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1588                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1589                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1590                                              qdev->ndev,
1591                                              "TCP checksum done!\n");
1592                         }
1593                 }
1594         }
1595
1596         skb_record_rx_queue(skb, rx_ring->cq_id);
1597         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1598                 if (qdev->vlgrp && (vlan_id != 0xffff))
1599                         vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1600                 else
1601                         napi_gro_receive(napi, skb);
1602         } else {
1603                 if (qdev->vlgrp && (vlan_id != 0xffff))
1604                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1605                 else
1606                         netif_receive_skb(skb);
1607         }
1608         return;
1609 err_out:
1610         dev_kfree_skb_any(skb);
1611         put_page(lbq_desc->p.pg_chunk.page);
1612 }
1613
1614 /* Process an inbound completion from an rx ring. */
1615 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1616                                         struct rx_ring *rx_ring,
1617                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1618                                         u32 length,
1619                                         u16 vlan_id)
1620 {
1621         struct net_device *ndev = qdev->ndev;
1622         struct sk_buff *skb = NULL;
1623         struct sk_buff *new_skb = NULL;
1624         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1625
1626         skb = sbq_desc->p.skb;
1627         /* Allocate new_skb and copy */
1628         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1629         if (new_skb == NULL) {
1630                 netif_err(qdev, probe, qdev->ndev,
1631                           "No skb available, drop the packet.\n");
1632                 rx_ring->rx_dropped++;
1633                 return;
1634         }
1635         skb_reserve(new_skb, NET_IP_ALIGN);
1636         memcpy(skb_put(new_skb, length), skb->data, length);
1637         skb = new_skb;
1638
1639         /* Frame error, so drop the packet. */
1640         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1641                 netif_info(qdev, drv, qdev->ndev,
1642                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1643                 dev_kfree_skb_any(skb);
1644                 rx_ring->rx_errors++;
1645                 return;
1646         }
1647
1648         /* loopback self test for ethtool */
1649         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1650                 ql_check_lb_frame(qdev, skb);
1651                 dev_kfree_skb_any(skb);
1652                 return;
1653         }
1654
1655         /* The max framesize filter on this chip is set higher than
1656          * MTU since FCoE uses 2k frames.
1657          */
1658         if (skb->len > ndev->mtu + ETH_HLEN) {
1659                 dev_kfree_skb_any(skb);
1660                 rx_ring->rx_dropped++;
1661                 return;
1662         }
1663
1664         prefetch(skb->data);
1665         skb->dev = ndev;
1666         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1667                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668                              "%s Multicast.\n",
1669                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1671                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1672                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1673                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1674                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1675         }
1676         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1677                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1678                              "Promiscuous Packet.\n");
1679
1680         rx_ring->rx_packets++;
1681         rx_ring->rx_bytes += skb->len;
1682         skb->protocol = eth_type_trans(skb, ndev);
1683         skb_checksum_none_assert(skb);
1684
1685         /* If rx checksum is on, and there are no
1686          * csum or frame errors.
1687          */
1688         if ((ndev->features & NETIF_F_RXCSUM) &&
1689                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1690                 /* TCP frame. */
1691                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1692                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1693                                      "TCP checksum done!\n");
1694                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1695                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1696                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1697                         /* Unfragmented ipv4 UDP frame. */
1698                         struct iphdr *iph = (struct iphdr *) skb->data;
1699                         if (!(iph->frag_off &
1700                                 ntohs(IP_MF|IP_OFFSET))) {
1701                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1702                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1703                                              qdev->ndev,
1704                                              "TCP checksum done!\n");
1705                         }
1706                 }
1707         }
1708
1709         skb_record_rx_queue(skb, rx_ring->cq_id);
1710         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1711                 if (qdev->vlgrp && (vlan_id != 0xffff))
1712                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1713                                                 vlan_id, skb);
1714                 else
1715                         napi_gro_receive(&rx_ring->napi, skb);
1716         } else {
1717                 if (qdev->vlgrp && (vlan_id != 0xffff))
1718                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1719                 else
1720                         netif_receive_skb(skb);
1721         }
1722 }
1723
1724 static void ql_realign_skb(struct sk_buff *skb, int len)
1725 {
1726         void *temp_addr = skb->data;
1727
1728         /* Undo the skb_reserve(skb,32) we did before
1729          * giving to hardware, and realign data on
1730          * a 2-byte boundary.
1731          */
1732         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1733         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1734         skb_copy_to_linear_data(skb, temp_addr,
1735                 (unsigned int)len);
1736 }
1737
1738 /*
1739  * This function builds an skb for the given inbound
1740  * completion.  It will be rewritten for readability in the near
1741  * future, but for not it works well.
1742  */
1743 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1744                                        struct rx_ring *rx_ring,
1745                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1746 {
1747         struct bq_desc *lbq_desc;
1748         struct bq_desc *sbq_desc;
1749         struct sk_buff *skb = NULL;
1750         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1751        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1752
1753         /*
1754          * Handle the header buffer if present.
1755          */
1756         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1757             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1758                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1759                              "Header of %d bytes in small buffer.\n", hdr_len);
1760                 /*
1761                  * Headers fit nicely into a small buffer.
1762                  */
1763                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1764                 pci_unmap_single(qdev->pdev,
1765                                 dma_unmap_addr(sbq_desc, mapaddr),
1766                                 dma_unmap_len(sbq_desc, maplen),
1767                                 PCI_DMA_FROMDEVICE);
1768                 skb = sbq_desc->p.skb;
1769                 ql_realign_skb(skb, hdr_len);
1770                 skb_put(skb, hdr_len);
1771                 sbq_desc->p.skb = NULL;
1772         }
1773
1774         /*
1775          * Handle the data buffer(s).
1776          */
1777         if (unlikely(!length)) {        /* Is there data too? */
1778                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779                              "No Data buffer in this packet.\n");
1780                 return skb;
1781         }
1782
1783         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1784                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1785                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786                                      "Headers in small, data of %d bytes in small, combine them.\n",
1787                                      length);
1788                         /*
1789                          * Data is less than small buffer size so it's
1790                          * stuffed in a small buffer.
1791                          * For this case we append the data
1792                          * from the "data" small buffer to the "header" small
1793                          * buffer.
1794                          */
1795                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1796                         pci_dma_sync_single_for_cpu(qdev->pdev,
1797                                                     dma_unmap_addr
1798                                                     (sbq_desc, mapaddr),
1799                                                     dma_unmap_len
1800                                                     (sbq_desc, maplen),
1801                                                     PCI_DMA_FROMDEVICE);
1802                         memcpy(skb_put(skb, length),
1803                                sbq_desc->p.skb->data, length);
1804                         pci_dma_sync_single_for_device(qdev->pdev,
1805                                                        dma_unmap_addr
1806                                                        (sbq_desc,
1807                                                         mapaddr),
1808                                                        dma_unmap_len
1809                                                        (sbq_desc,
1810                                                         maplen),
1811                                                        PCI_DMA_FROMDEVICE);
1812                 } else {
1813                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814                                      "%d bytes in a single small buffer.\n",
1815                                      length);
1816                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1817                         skb = sbq_desc->p.skb;
1818                         ql_realign_skb(skb, length);
1819                         skb_put(skb, length);
1820                         pci_unmap_single(qdev->pdev,
1821                                          dma_unmap_addr(sbq_desc,
1822                                                         mapaddr),
1823                                          dma_unmap_len(sbq_desc,
1824                                                        maplen),
1825                                          PCI_DMA_FROMDEVICE);
1826                         sbq_desc->p.skb = NULL;
1827                 }
1828         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1829                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1830                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831                                      "Header in small, %d bytes in large. Chain large to small!\n",
1832                                      length);
1833                         /*
1834                          * The data is in a single large buffer.  We
1835                          * chain it to the header buffer's skb and let
1836                          * it rip.
1837                          */
1838                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1839                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1840                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1841                                      lbq_desc->p.pg_chunk.offset, length);
1842                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1843                                                 lbq_desc->p.pg_chunk.offset,
1844                                                 length);
1845                         skb->len += length;
1846                         skb->data_len += length;
1847                         skb->truesize += length;
1848                 } else {
1849                         /*
1850                          * The headers and data are in a single large buffer. We
1851                          * copy it to a new skb and let it go. This can happen with
1852                          * jumbo mtu on a non-TCP/UDP frame.
1853                          */
1854                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1855                         skb = netdev_alloc_skb(qdev->ndev, length);
1856                         if (skb == NULL) {
1857                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1858                                              "No skb available, drop the packet.\n");
1859                                 return NULL;
1860                         }
1861                         pci_unmap_page(qdev->pdev,
1862                                        dma_unmap_addr(lbq_desc,
1863                                                       mapaddr),
1864                                        dma_unmap_len(lbq_desc, maplen),
1865                                        PCI_DMA_FROMDEVICE);
1866                         skb_reserve(skb, NET_IP_ALIGN);
1867                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1868                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1869                                      length);
1870                         skb_fill_page_desc(skb, 0,
1871                                                 lbq_desc->p.pg_chunk.page,
1872                                                 lbq_desc->p.pg_chunk.offset,
1873                                                 length);
1874                         skb->len += length;
1875                         skb->data_len += length;
1876                         skb->truesize += length;
1877                         length -= length;
1878                         __pskb_pull_tail(skb,
1879                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1880                                 VLAN_ETH_HLEN : ETH_HLEN);
1881                 }
1882         } else {
1883                 /*
1884                  * The data is in a chain of large buffers
1885                  * pointed to by a small buffer.  We loop
1886                  * thru and chain them to the our small header
1887                  * buffer's skb.
1888                  * frags:  There are 18 max frags and our small
1889                  *         buffer will hold 32 of them. The thing is,
1890                  *         we'll use 3 max for our 9000 byte jumbo
1891                  *         frames.  If the MTU goes up we could
1892                  *          eventually be in trouble.
1893                  */
1894                 int size, i = 0;
1895                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1896                 pci_unmap_single(qdev->pdev,
1897                                  dma_unmap_addr(sbq_desc, mapaddr),
1898                                  dma_unmap_len(sbq_desc, maplen),
1899                                  PCI_DMA_FROMDEVICE);
1900                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1901                         /*
1902                          * This is an non TCP/UDP IP frame, so
1903                          * the headers aren't split into a small
1904                          * buffer.  We have to use the small buffer
1905                          * that contains our sg list as our skb to
1906                          * send upstairs. Copy the sg list here to
1907                          * a local buffer and use it to find the
1908                          * pages to chain.
1909                          */
1910                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1911                                      "%d bytes of headers & data in chain of large.\n",
1912                                      length);
1913                         skb = sbq_desc->p.skb;
1914                         sbq_desc->p.skb = NULL;
1915                         skb_reserve(skb, NET_IP_ALIGN);
1916                 }
1917                 while (length > 0) {
1918                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1919                         size = (length < rx_ring->lbq_buf_size) ? length :
1920                                 rx_ring->lbq_buf_size;
1921
1922                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1923                                      "Adding page %d to skb for %d bytes.\n",
1924                                      i, size);
1925                         skb_fill_page_desc(skb, i,
1926                                                 lbq_desc->p.pg_chunk.page,
1927                                                 lbq_desc->p.pg_chunk.offset,
1928                                                 size);
1929                         skb->len += size;
1930                         skb->data_len += size;
1931                         skb->truesize += size;
1932                         length -= size;
1933                         i++;
1934                 }
1935                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1936                                 VLAN_ETH_HLEN : ETH_HLEN);
1937         }
1938         return skb;
1939 }
1940
1941 /* Process an inbound completion from an rx ring. */
1942 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1943                                    struct rx_ring *rx_ring,
1944                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1945                                    u16 vlan_id)
1946 {
1947         struct net_device *ndev = qdev->ndev;
1948         struct sk_buff *skb = NULL;
1949
1950         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1951
1952         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1953         if (unlikely(!skb)) {
1954                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1955                              "No skb available, drop packet.\n");
1956                 rx_ring->rx_dropped++;
1957                 return;
1958         }
1959
1960         /* Frame error, so drop the packet. */
1961         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1962                 netif_info(qdev, drv, qdev->ndev,
1963                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1964                 dev_kfree_skb_any(skb);
1965                 rx_ring->rx_errors++;
1966                 return;
1967         }
1968
1969         /* The max framesize filter on this chip is set higher than
1970          * MTU since FCoE uses 2k frames.
1971          */
1972         if (skb->len > ndev->mtu + ETH_HLEN) {
1973                 dev_kfree_skb_any(skb);
1974                 rx_ring->rx_dropped++;
1975                 return;
1976         }
1977
1978         /* loopback self test for ethtool */
1979         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1980                 ql_check_lb_frame(qdev, skb);
1981                 dev_kfree_skb_any(skb);
1982                 return;
1983         }
1984
1985         prefetch(skb->data);
1986         skb->dev = ndev;
1987         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1988                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1989                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1990                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1991                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1992                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1993                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1994                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1995                 rx_ring->rx_multicast++;
1996         }
1997         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1998                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1999                              "Promiscuous Packet.\n");
2000         }
2001
2002         skb->protocol = eth_type_trans(skb, ndev);
2003         skb_checksum_none_assert(skb);
2004
2005         /* If rx checksum is on, and there are no
2006          * csum or frame errors.
2007          */
2008         if ((ndev->features & NETIF_F_RXCSUM) &&
2009                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2010                 /* TCP frame. */
2011                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2012                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2013                                      "TCP checksum done!\n");
2014                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2015                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2016                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2017                 /* Unfragmented ipv4 UDP frame. */
2018                         struct iphdr *iph = (struct iphdr *) skb->data;
2019                         if (!(iph->frag_off &
2020                                 ntohs(IP_MF|IP_OFFSET))) {
2021                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2022                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2023                                              "TCP checksum done!\n");
2024                         }
2025                 }
2026         }
2027
2028         rx_ring->rx_packets++;
2029         rx_ring->rx_bytes += skb->len;
2030         skb_record_rx_queue(skb, rx_ring->cq_id);
2031         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2032                 if (qdev->vlgrp &&
2033                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2034                         (vlan_id != 0))
2035                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2036                                 vlan_id, skb);
2037                 else
2038                         napi_gro_receive(&rx_ring->napi, skb);
2039         } else {
2040                 if (qdev->vlgrp &&
2041                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2042                         (vlan_id != 0))
2043                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2044                 else
2045                         netif_receive_skb(skb);
2046         }
2047 }
2048
2049 /* Process an inbound completion from an rx ring. */
2050 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2051                                         struct rx_ring *rx_ring,
2052                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2053 {
2054         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2055         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2056                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2057                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2058
2059         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2060
2061         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2062                 /* The data and headers are split into
2063                  * separate buffers.
2064                  */
2065                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066                                                 vlan_id);
2067         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2068                 /* The data fit in a single small buffer.
2069                  * Allocate a new skb, copy the data and
2070                  * return the buffer to the free pool.
2071                  */
2072                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2073                                                 length, vlan_id);
2074         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2075                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2076                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2077                 /* TCP packet in a page chunk that's been checksummed.
2078                  * Tack it on to our GRO skb and let it go.
2079                  */
2080                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2081                                                 length, vlan_id);
2082         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2083                 /* Non-TCP packet in a page chunk. Allocate an
2084                  * skb, tack it on frags, and send it up.
2085                  */
2086                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2087                                                 length, vlan_id);
2088         } else {
2089                 /* Non-TCP/UDP large frames that span multiple buffers
2090                  * can be processed corrrectly by the split frame logic.
2091                  */
2092                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2093                                                 vlan_id);
2094         }
2095
2096         return (unsigned long)length;
2097 }
2098
2099 /* Process an outbound completion from an rx ring. */
2100 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2101                                    struct ob_mac_iocb_rsp *mac_rsp)
2102 {
2103         struct tx_ring *tx_ring;
2104         struct tx_ring_desc *tx_ring_desc;
2105
2106         QL_DUMP_OB_MAC_RSP(mac_rsp);
2107         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2108         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2109         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2110         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2111         tx_ring->tx_packets++;
2112         dev_kfree_skb(tx_ring_desc->skb);
2113         tx_ring_desc->skb = NULL;
2114
2115         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2116                                         OB_MAC_IOCB_RSP_S |
2117                                         OB_MAC_IOCB_RSP_L |
2118                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2119                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2120                         netif_warn(qdev, tx_done, qdev->ndev,
2121                                    "Total descriptor length did not match transfer length.\n");
2122                 }
2123                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2124                         netif_warn(qdev, tx_done, qdev->ndev,
2125                                    "Frame too short to be valid, not sent.\n");
2126                 }
2127                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2128                         netif_warn(qdev, tx_done, qdev->ndev,
2129                                    "Frame too long, but sent anyway.\n");
2130                 }
2131                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2132                         netif_warn(qdev, tx_done, qdev->ndev,
2133                                    "PCI backplane error. Frame not sent.\n");
2134                 }
2135         }
2136         atomic_inc(&tx_ring->tx_count);
2137 }
2138
2139 /* Fire up a handler to reset the MPI processor. */
2140 void ql_queue_fw_error(struct ql_adapter *qdev)
2141 {
2142         ql_link_off(qdev);
2143         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2144 }
2145
2146 void ql_queue_asic_error(struct ql_adapter *qdev)
2147 {
2148         ql_link_off(qdev);
2149         ql_disable_interrupts(qdev);
2150         /* Clear adapter up bit to signal the recovery
2151          * process that it shouldn't kill the reset worker
2152          * thread
2153          */
2154         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2155         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2156 }
2157
2158 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2159                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2160 {
2161         switch (ib_ae_rsp->event) {
2162         case MGMT_ERR_EVENT:
2163                 netif_err(qdev, rx_err, qdev->ndev,
2164                           "Management Processor Fatal Error.\n");
2165                 ql_queue_fw_error(qdev);
2166                 return;
2167
2168         case CAM_LOOKUP_ERR_EVENT:
2169                 netif_err(qdev, link, qdev->ndev,
2170                           "Multiple CAM hits lookup occurred.\n");
2171                 netif_err(qdev, drv, qdev->ndev,
2172                           "This event shouldn't occur.\n");
2173                 ql_queue_asic_error(qdev);
2174                 return;
2175
2176         case SOFT_ECC_ERROR_EVENT:
2177                 netif_err(qdev, rx_err, qdev->ndev,
2178                           "Soft ECC error detected.\n");
2179                 ql_queue_asic_error(qdev);
2180                 break;
2181
2182         case PCI_ERR_ANON_BUF_RD:
2183                 netif_err(qdev, rx_err, qdev->ndev,
2184                           "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2185                           ib_ae_rsp->q_id);
2186                 ql_queue_asic_error(qdev);
2187                 break;
2188
2189         default:
2190                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2191                           ib_ae_rsp->event);
2192                 ql_queue_asic_error(qdev);
2193                 break;
2194         }
2195 }
2196
2197 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2198 {
2199         struct ql_adapter *qdev = rx_ring->qdev;
2200         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2201         struct ob_mac_iocb_rsp *net_rsp = NULL;
2202         int count = 0;
2203
2204         struct tx_ring *tx_ring;
2205         /* While there are entries in the completion queue. */
2206         while (prod != rx_ring->cnsmr_idx) {
2207
2208                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2209                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2210                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2211
2212                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2213                 rmb();
2214                 switch (net_rsp->opcode) {
2215
2216                 case OPCODE_OB_MAC_TSO_IOCB:
2217                 case OPCODE_OB_MAC_IOCB:
2218                         ql_process_mac_tx_intr(qdev, net_rsp);
2219                         break;
2220                 default:
2221                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2222                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2223                                      net_rsp->opcode);
2224                 }
2225                 count++;
2226                 ql_update_cq(rx_ring);
2227                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2228         }
2229         if (!net_rsp)
2230                 return 0;
2231         ql_write_cq_idx(rx_ring);
2232         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2233         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2234                 if (atomic_read(&tx_ring->queue_stopped) &&
2235                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2236                         /*
2237                          * The queue got stopped because the tx_ring was full.
2238                          * Wake it up, because it's now at least 25% empty.
2239                          */
2240                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2241         }
2242
2243         return count;
2244 }
2245
2246 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2247 {
2248         struct ql_adapter *qdev = rx_ring->qdev;
2249         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2250         struct ql_net_rsp_iocb *net_rsp;
2251         int count = 0;
2252
2253         /* While there are entries in the completion queue. */
2254         while (prod != rx_ring->cnsmr_idx) {
2255
2256                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2257                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2258                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2259
2260                 net_rsp = rx_ring->curr_entry;
2261                 rmb();
2262                 switch (net_rsp->opcode) {
2263                 case OPCODE_IB_MAC_IOCB:
2264                         ql_process_mac_rx_intr(qdev, rx_ring,
2265                                                (struct ib_mac_iocb_rsp *)
2266                                                net_rsp);
2267                         break;
2268
2269                 case OPCODE_IB_AE_IOCB:
2270                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2271                                                 net_rsp);
2272                         break;
2273                 default:
2274                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2275                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2276                                      net_rsp->opcode);
2277                         break;
2278                 }
2279                 count++;
2280                 ql_update_cq(rx_ring);
2281                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2282                 if (count == budget)
2283                         break;
2284         }
2285         ql_update_buffer_queues(qdev, rx_ring);
2286         ql_write_cq_idx(rx_ring);
2287         return count;
2288 }
2289
2290 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2291 {
2292         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2293         struct ql_adapter *qdev = rx_ring->qdev;
2294         struct rx_ring *trx_ring;
2295         int i, work_done = 0;
2296         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2297
2298         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2299                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2300
2301         /* Service the TX rings first.  They start
2302          * right after the RSS rings. */
2303         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2304                 trx_ring = &qdev->rx_ring[i];
2305                 /* If this TX completion ring belongs to this vector and
2306                  * it's not empty then service it.
2307                  */
2308                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2309                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2310                                         trx_ring->cnsmr_idx)) {
2311                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2312                                      "%s: Servicing TX completion ring %d.\n",
2313                                      __func__, trx_ring->cq_id);
2314                         ql_clean_outbound_rx_ring(trx_ring);
2315                 }
2316         }
2317
2318         /*
2319          * Now service the RSS ring if it's active.
2320          */
2321         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2322                                         rx_ring->cnsmr_idx) {
2323                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2324                              "%s: Servicing RX completion ring %d.\n",
2325                              __func__, rx_ring->cq_id);
2326                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2327         }
2328
2329         if (work_done < budget) {
2330                 napi_complete(napi);
2331                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2332         }
2333         return work_done;
2334 }
2335
2336 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2337 {
2338         struct ql_adapter *qdev = netdev_priv(ndev);
2339
2340         qdev->vlgrp = grp;
2341         if (grp) {
2342                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2343                              "Turning on VLAN in NIC_RCV_CFG.\n");
2344                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2345                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2346         } else {
2347                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2348                              "Turning off VLAN in NIC_RCV_CFG.\n");
2349                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2350         }
2351 }
2352
2353 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2354 {
2355         struct ql_adapter *qdev = netdev_priv(ndev);
2356         u32 enable_bit = MAC_ADDR_E;
2357         int status;
2358
2359         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2360         if (status)
2361                 return;
2362         if (ql_set_mac_addr_reg
2363             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2364                 netif_err(qdev, ifup, qdev->ndev,
2365                           "Failed to init vlan address.\n");
2366         }
2367         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2368 }
2369
2370 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2371 {
2372         struct ql_adapter *qdev = netdev_priv(ndev);
2373         u32 enable_bit = 0;
2374         int status;
2375
2376         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2377         if (status)
2378                 return;
2379
2380         if (ql_set_mac_addr_reg
2381             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2382                 netif_err(qdev, ifup, qdev->ndev,
2383                           "Failed to clear vlan address.\n");
2384         }
2385         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2386
2387 }
2388
2389 static void qlge_restore_vlan(struct ql_adapter *qdev)
2390 {
2391         qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2392
2393         if (qdev->vlgrp) {
2394                 u16 vid;
2395                 for (vid = 0; vid < VLAN_N_VID; vid++) {
2396                         if (!vlan_group_get_device(qdev->vlgrp, vid))
2397                                 continue;
2398                         qlge_vlan_rx_add_vid(qdev->ndev, vid);
2399                 }
2400         }
2401 }
2402
2403 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2404 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2405 {
2406         struct rx_ring *rx_ring = dev_id;
2407         napi_schedule(&rx_ring->napi);
2408         return IRQ_HANDLED;
2409 }
2410
2411 /* This handles a fatal error, MPI activity, and the default
2412  * rx_ring in an MSI-X multiple vector environment.
2413  * In MSI/Legacy environment it also process the rest of
2414  * the rx_rings.
2415  */
2416 static irqreturn_t qlge_isr(int irq, void *dev_id)
2417 {
2418         struct rx_ring *rx_ring = dev_id;
2419         struct ql_adapter *qdev = rx_ring->qdev;
2420         struct intr_context *intr_context = &qdev->intr_context[0];
2421         u32 var;
2422         int work_done = 0;
2423
2424         spin_lock(&qdev->hw_lock);
2425         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2426                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2427                              "Shared Interrupt, Not ours!\n");
2428                 spin_unlock(&qdev->hw_lock);
2429                 return IRQ_NONE;
2430         }
2431         spin_unlock(&qdev->hw_lock);
2432
2433         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2434
2435         /*
2436          * Check for fatal error.
2437          */
2438         if (var & STS_FE) {
2439                 ql_queue_asic_error(qdev);
2440                 netif_err(qdev, intr, qdev->ndev,
2441                           "Got fatal error, STS = %x.\n", var);
2442                 var = ql_read32(qdev, ERR_STS);
2443                 netif_err(qdev, intr, qdev->ndev,
2444                           "Resetting chip. Error Status Register = 0x%x\n", var);
2445                 return IRQ_HANDLED;
2446         }
2447
2448         /*
2449          * Check MPI processor activity.
2450          */
2451         if ((var & STS_PI) &&
2452                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2453                 /*
2454                  * We've got an async event or mailbox completion.
2455                  * Handle it and clear the source of the interrupt.
2456                  */
2457                 netif_err(qdev, intr, qdev->ndev,
2458                           "Got MPI processor interrupt.\n");
2459                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2460                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2461                 queue_delayed_work_on(smp_processor_id(),
2462                                 qdev->workqueue, &qdev->mpi_work, 0);
2463                 work_done++;
2464         }
2465
2466         /*
2467          * Get the bit-mask that shows the active queues for this
2468          * pass.  Compare it to the queues that this irq services
2469          * and call napi if there's a match.
2470          */
2471         var = ql_read32(qdev, ISR1);
2472         if (var & intr_context->irq_mask) {
2473                 netif_info(qdev, intr, qdev->ndev,
2474                            "Waking handler for rx_ring[0].\n");
2475                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2476                 napi_schedule(&rx_ring->napi);
2477                 work_done++;
2478         }
2479         ql_enable_completion_interrupt(qdev, intr_context->intr);
2480         return work_done ? IRQ_HANDLED : IRQ_NONE;
2481 }
2482
2483 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2484 {
2485
2486         if (skb_is_gso(skb)) {
2487                 int err;
2488                 if (skb_header_cloned(skb)) {
2489                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2490                         if (err)
2491                                 return err;
2492                 }
2493
2494                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2495                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2496                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2497                 mac_iocb_ptr->total_hdrs_len =
2498                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2499                 mac_iocb_ptr->net_trans_offset =
2500                     cpu_to_le16(skb_network_offset(skb) |
2501                                 skb_transport_offset(skb)
2502                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2503                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2504                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2505                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2506                         struct iphdr *iph = ip_hdr(skb);
2507                         iph->check = 0;
2508                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2509                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2510                                                                  iph->daddr, 0,
2511                                                                  IPPROTO_TCP,
2512                                                                  0);
2513                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2514                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2515                         tcp_hdr(skb)->check =
2516                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2517                                              &ipv6_hdr(skb)->daddr,
2518                                              0, IPPROTO_TCP, 0);
2519                 }
2520                 return 1;
2521         }
2522         return 0;
2523 }
2524
2525 static void ql_hw_csum_setup(struct sk_buff *skb,
2526                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2527 {
2528         int len;
2529         struct iphdr *iph = ip_hdr(skb);
2530         __sum16 *check;
2531         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2532         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2533         mac_iocb_ptr->net_trans_offset =
2534                 cpu_to_le16(skb_network_offset(skb) |
2535                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2536
2537         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2538         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2539         if (likely(iph->protocol == IPPROTO_TCP)) {
2540                 check = &(tcp_hdr(skb)->check);
2541                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2542                 mac_iocb_ptr->total_hdrs_len =
2543                     cpu_to_le16(skb_transport_offset(skb) +
2544                                 (tcp_hdr(skb)->doff << 2));
2545         } else {
2546                 check = &(udp_hdr(skb)->check);
2547                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2548                 mac_iocb_ptr->total_hdrs_len =
2549                     cpu_to_le16(skb_transport_offset(skb) +
2550                                 sizeof(struct udphdr));
2551         }
2552         *check = ~csum_tcpudp_magic(iph->saddr,
2553                                     iph->daddr, len, iph->protocol, 0);
2554 }
2555
2556 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2557 {
2558         struct tx_ring_desc *tx_ring_desc;
2559         struct ob_mac_iocb_req *mac_iocb_ptr;
2560         struct ql_adapter *qdev = netdev_priv(ndev);
2561         int tso;
2562         struct tx_ring *tx_ring;
2563         u32 tx_ring_idx = (u32) skb->queue_mapping;
2564
2565         tx_ring = &qdev->tx_ring[tx_ring_idx];
2566
2567         if (skb_padto(skb, ETH_ZLEN))
2568                 return NETDEV_TX_OK;
2569
2570         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2571                 netif_info(qdev, tx_queued, qdev->ndev,
2572                            "%s: shutting down tx queue %d du to lack of resources.\n",
2573                            __func__, tx_ring_idx);
2574                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2575                 atomic_inc(&tx_ring->queue_stopped);
2576                 tx_ring->tx_errors++;
2577                 return NETDEV_TX_BUSY;
2578         }
2579         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2580         mac_iocb_ptr = tx_ring_desc->queue_entry;
2581         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2582
2583         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2584         mac_iocb_ptr->tid = tx_ring_desc->index;
2585         /* We use the upper 32-bits to store the tx queue for this IO.
2586          * When we get the completion we can use it to establish the context.
2587          */
2588         mac_iocb_ptr->txq_idx = tx_ring_idx;
2589         tx_ring_desc->skb = skb;
2590
2591         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2592
2593         if (vlan_tx_tag_present(skb)) {
2594                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2595                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2596                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2597                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2598         }
2599         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2600         if (tso < 0) {
2601                 dev_kfree_skb_any(skb);
2602                 return NETDEV_TX_OK;
2603         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2604                 ql_hw_csum_setup(skb,
2605                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2606         }
2607         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2608                         NETDEV_TX_OK) {
2609                 netif_err(qdev, tx_queued, qdev->ndev,
2610                           "Could not map the segments.\n");
2611                 tx_ring->tx_errors++;
2612                 return NETDEV_TX_BUSY;
2613         }
2614         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2615         tx_ring->prod_idx++;
2616         if (tx_ring->prod_idx == tx_ring->wq_len)
2617                 tx_ring->prod_idx = 0;
2618         wmb();
2619
2620         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2621         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2622                      "tx queued, slot %d, len %d\n",
2623                      tx_ring->prod_idx, skb->len);
2624
2625         atomic_dec(&tx_ring->tx_count);
2626         return NETDEV_TX_OK;
2627 }
2628
2629
2630 static void ql_free_shadow_space(struct ql_adapter *qdev)
2631 {
2632         if (qdev->rx_ring_shadow_reg_area) {
2633                 pci_free_consistent(qdev->pdev,
2634                                     PAGE_SIZE,
2635                                     qdev->rx_ring_shadow_reg_area,
2636                                     qdev->rx_ring_shadow_reg_dma);
2637                 qdev->rx_ring_shadow_reg_area = NULL;
2638         }
2639         if (qdev->tx_ring_shadow_reg_area) {
2640                 pci_free_consistent(qdev->pdev,
2641                                     PAGE_SIZE,
2642                                     qdev->tx_ring_shadow_reg_area,
2643                                     qdev->tx_ring_shadow_reg_dma);
2644                 qdev->tx_ring_shadow_reg_area = NULL;
2645         }
2646 }
2647
2648 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2649 {
2650         qdev->rx_ring_shadow_reg_area =
2651             pci_alloc_consistent(qdev->pdev,
2652                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2653         if (qdev->rx_ring_shadow_reg_area == NULL) {
2654                 netif_err(qdev, ifup, qdev->ndev,
2655                           "Allocation of RX shadow space failed.\n");
2656                 return -ENOMEM;
2657         }
2658         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2659         qdev->tx_ring_shadow_reg_area =
2660             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2661                                  &qdev->tx_ring_shadow_reg_dma);
2662         if (qdev->tx_ring_shadow_reg_area == NULL) {
2663                 netif_err(qdev, ifup, qdev->ndev,
2664                           "Allocation of TX shadow space failed.\n");
2665                 goto err_wqp_sh_area;
2666         }
2667         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2668         return 0;
2669
2670 err_wqp_sh_area:
2671         pci_free_consistent(qdev->pdev,
2672                             PAGE_SIZE,
2673                             qdev->rx_ring_shadow_reg_area,
2674                             qdev->rx_ring_shadow_reg_dma);
2675         return -ENOMEM;
2676 }
2677
2678 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2679 {
2680         struct tx_ring_desc *tx_ring_desc;
2681         int i;
2682         struct ob_mac_iocb_req *mac_iocb_ptr;
2683
2684         mac_iocb_ptr = tx_ring->wq_base;
2685         tx_ring_desc = tx_ring->q;
2686         for (i = 0; i < tx_ring->wq_len; i++) {
2687                 tx_ring_desc->index = i;
2688                 tx_ring_desc->skb = NULL;
2689                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2690                 mac_iocb_ptr++;
2691                 tx_ring_desc++;
2692         }
2693         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2694         atomic_set(&tx_ring->queue_stopped, 0);
2695 }
2696
2697 static void ql_free_tx_resources(struct ql_adapter *qdev,
2698                                  struct tx_ring *tx_ring)
2699 {
2700         if (tx_ring->wq_base) {
2701                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2702                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2703                 tx_ring->wq_base = NULL;
2704         }
2705         kfree(tx_ring->q);
2706         tx_ring->q = NULL;
2707 }
2708
2709 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2710                                  struct tx_ring *tx_ring)
2711 {
2712         tx_ring->wq_base =
2713             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2714                                  &tx_ring->wq_base_dma);
2715
2716         if ((tx_ring->wq_base == NULL) ||
2717             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2718                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2719                 return -ENOMEM;
2720         }
2721         tx_ring->q =
2722             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2723         if (tx_ring->q == NULL)
2724                 goto err;
2725
2726         return 0;
2727 err:
2728         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2729                             tx_ring->wq_base, tx_ring->wq_base_dma);
2730         return -ENOMEM;
2731 }
2732
2733 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2734 {
2735         struct bq_desc *lbq_desc;
2736
2737         uint32_t  curr_idx, clean_idx;
2738
2739         curr_idx = rx_ring->lbq_curr_idx;
2740         clean_idx = rx_ring->lbq_clean_idx;
2741         while (curr_idx != clean_idx) {
2742                 lbq_desc = &rx_ring->lbq[curr_idx];
2743
2744                 if (lbq_desc->p.pg_chunk.last_flag) {
2745                         pci_unmap_page(qdev->pdev,
2746                                 lbq_desc->p.pg_chunk.map,
2747                                 ql_lbq_block_size(qdev),
2748                                        PCI_DMA_FROMDEVICE);
2749                         lbq_desc->p.pg_chunk.last_flag = 0;
2750                 }
2751
2752                 put_page(lbq_desc->p.pg_chunk.page);
2753                 lbq_desc->p.pg_chunk.page = NULL;
2754
2755                 if (++curr_idx == rx_ring->lbq_len)
2756                         curr_idx = 0;
2757
2758         }
2759 }
2760
2761 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2762 {
2763         int i;
2764         struct bq_desc *sbq_desc;
2765
2766         for (i = 0; i < rx_ring->sbq_len; i++) {
2767                 sbq_desc = &rx_ring->sbq[i];
2768                 if (sbq_desc == NULL) {
2769                         netif_err(qdev, ifup, qdev->ndev,
2770                                   "sbq_desc %d is NULL.\n", i);
2771                         return;
2772                 }
2773                 if (sbq_desc->p.skb) {
2774                         pci_unmap_single(qdev->pdev,
2775                                          dma_unmap_addr(sbq_desc, mapaddr),
2776                                          dma_unmap_len(sbq_desc, maplen),
2777                                          PCI_DMA_FROMDEVICE);
2778                         dev_kfree_skb(sbq_desc->p.skb);
2779                         sbq_desc->p.skb = NULL;
2780                 }
2781         }
2782 }
2783
2784 /* Free all large and small rx buffers associated
2785  * with the completion queues for this device.
2786  */
2787 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2788 {
2789         int i;
2790         struct rx_ring *rx_ring;
2791
2792         for (i = 0; i < qdev->rx_ring_count; i++) {
2793                 rx_ring = &qdev->rx_ring[i];
2794                 if (rx_ring->lbq)
2795                         ql_free_lbq_buffers(qdev, rx_ring);
2796                 if (rx_ring->sbq)
2797                         ql_free_sbq_buffers(qdev, rx_ring);
2798         }
2799 }
2800
2801 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2802 {
2803         struct rx_ring *rx_ring;
2804         int i;
2805
2806         for (i = 0; i < qdev->rx_ring_count; i++) {
2807                 rx_ring = &qdev->rx_ring[i];
2808                 if (rx_ring->type != TX_Q)
2809                         ql_update_buffer_queues(qdev, rx_ring);
2810         }
2811 }
2812
2813 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2814                                 struct rx_ring *rx_ring)
2815 {
2816         int i;
2817         struct bq_desc *lbq_desc;
2818         __le64 *bq = rx_ring->lbq_base;
2819
2820         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2821         for (i = 0; i < rx_ring->lbq_len; i++) {
2822                 lbq_desc = &rx_ring->lbq[i];
2823                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2824                 lbq_desc->index = i;
2825                 lbq_desc->addr = bq;
2826                 bq++;
2827         }
2828 }
2829
2830 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2831                                 struct rx_ring *rx_ring)
2832 {
2833         int i;
2834         struct bq_desc *sbq_desc;
2835         __le64 *bq = rx_ring->sbq_base;
2836
2837         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2838         for (i = 0; i < rx_ring->sbq_len; i++) {
2839                 sbq_desc = &rx_ring->sbq[i];
2840                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2841                 sbq_desc->index = i;
2842                 sbq_desc->addr = bq;
2843                 bq++;
2844         }
2845 }
2846
2847 static void ql_free_rx_resources(struct ql_adapter *qdev,
2848                                  struct rx_ring *rx_ring)
2849 {
2850         /* Free the small buffer queue. */
2851         if (rx_ring->sbq_base) {
2852                 pci_free_consistent(qdev->pdev,
2853                                     rx_ring->sbq_size,
2854                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2855                 rx_ring->sbq_base = NULL;
2856         }
2857
2858         /* Free the small buffer queue control blocks. */
2859         kfree(rx_ring->sbq);
2860         rx_ring->sbq = NULL;
2861
2862         /* Free the large buffer queue. */
2863         if (rx_ring->lbq_base) {
2864                 pci_free_consistent(qdev->pdev,
2865                                     rx_ring->lbq_size,
2866                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2867                 rx_ring->lbq_base = NULL;
2868         }
2869
2870         /* Free the large buffer queue control blocks. */
2871         kfree(rx_ring->lbq);
2872         rx_ring->lbq = NULL;
2873
2874         /* Free the rx queue. */
2875         if (rx_ring->cq_base) {
2876                 pci_free_consistent(qdev->pdev,
2877                                     rx_ring->cq_size,
2878                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2879                 rx_ring->cq_base = NULL;
2880         }
2881 }
2882
2883 /* Allocate queues and buffers for this completions queue based
2884  * on the values in the parameter structure. */
2885 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2886                                  struct rx_ring *rx_ring)
2887 {
2888
2889         /*
2890          * Allocate the completion queue for this rx_ring.
2891          */
2892         rx_ring->cq_base =
2893             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2894                                  &rx_ring->cq_base_dma);
2895
2896         if (rx_ring->cq_base == NULL) {
2897                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2898                 return -ENOMEM;
2899         }
2900
2901         if (rx_ring->sbq_len) {
2902                 /*
2903                  * Allocate small buffer queue.
2904                  */
2905                 rx_ring->sbq_base =
2906                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2907                                          &rx_ring->sbq_base_dma);
2908
2909                 if (rx_ring->sbq_base == NULL) {
2910                         netif_err(qdev, ifup, qdev->ndev,
2911                                   "Small buffer queue allocation failed.\n");
2912                         goto err_mem;
2913                 }
2914
2915                 /*
2916                  * Allocate small buffer queue control blocks.
2917                  */
2918                 rx_ring->sbq =
2919                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2920                             GFP_KERNEL);
2921                 if (rx_ring->sbq == NULL) {
2922                         netif_err(qdev, ifup, qdev->ndev,
2923                                   "Small buffer queue control block allocation failed.\n");
2924                         goto err_mem;
2925                 }
2926
2927                 ql_init_sbq_ring(qdev, rx_ring);
2928         }
2929
2930         if (rx_ring->lbq_len) {
2931                 /*
2932                  * Allocate large buffer queue.
2933                  */
2934                 rx_ring->lbq_base =
2935                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2936                                          &rx_ring->lbq_base_dma);
2937
2938                 if (rx_ring->lbq_base == NULL) {
2939                         netif_err(qdev, ifup, qdev->ndev,
2940                                   "Large buffer queue allocation failed.\n");
2941                         goto err_mem;
2942                 }
2943                 /*
2944                  * Allocate large buffer queue control blocks.
2945                  */
2946                 rx_ring->lbq =
2947                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2948                             GFP_KERNEL);
2949                 if (rx_ring->lbq == NULL) {
2950                         netif_err(qdev, ifup, qdev->ndev,
2951                                   "Large buffer queue control block allocation failed.\n");
2952                         goto err_mem;
2953                 }
2954
2955                 ql_init_lbq_ring(qdev, rx_ring);
2956         }
2957
2958         return 0;
2959
2960 err_mem:
2961         ql_free_rx_resources(qdev, rx_ring);
2962         return -ENOMEM;
2963 }
2964
2965 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2966 {
2967         struct tx_ring *tx_ring;
2968         struct tx_ring_desc *tx_ring_desc;
2969         int i, j;
2970
2971         /*
2972          * Loop through all queues and free
2973          * any resources.
2974          */
2975         for (j = 0; j < qdev->tx_ring_count; j++) {
2976                 tx_ring = &qdev->tx_ring[j];
2977                 for (i = 0; i < tx_ring->wq_len; i++) {
2978                         tx_ring_desc = &tx_ring->q[i];
2979                         if (tx_ring_desc && tx_ring_desc->skb) {
2980                                 netif_err(qdev, ifdown, qdev->ndev,
2981                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2982                                           tx_ring_desc->skb, j,
2983                                           tx_ring_desc->index);
2984                                 ql_unmap_send(qdev, tx_ring_desc,
2985                                               tx_ring_desc->map_cnt);
2986                                 dev_kfree_skb(tx_ring_desc->skb);
2987                                 tx_ring_desc->skb = NULL;
2988                         }
2989                 }
2990         }
2991 }
2992
2993 static void ql_free_mem_resources(struct ql_adapter *qdev)
2994 {
2995         int i;
2996
2997         for (i = 0; i < qdev->tx_ring_count; i++)
2998                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2999         for (i = 0; i < qdev->rx_ring_count; i++)
3000                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3001         ql_free_shadow_space(qdev);
3002 }
3003
3004 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3005 {
3006         int i;
3007
3008         /* Allocate space for our shadow registers and such. */
3009         if (ql_alloc_shadow_space(qdev))
3010                 return -ENOMEM;
3011
3012         for (i = 0; i < qdev->rx_ring_count; i++) {
3013                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3014                         netif_err(qdev, ifup, qdev->ndev,
3015                                   "RX resource allocation failed.\n");
3016                         goto err_mem;
3017                 }
3018         }
3019         /* Allocate tx queue resources */
3020         for (i = 0; i < qdev->tx_ring_count; i++) {
3021                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3022                         netif_err(qdev, ifup, qdev->ndev,
3023                                   "TX resource allocation failed.\n");
3024                         goto err_mem;
3025                 }
3026         }
3027         return 0;
3028
3029 err_mem:
3030         ql_free_mem_resources(qdev);
3031         return -ENOMEM;
3032 }
3033
3034 /* Set up the rx ring control block and pass it to the chip.
3035  * The control block is defined as
3036  * "Completion Queue Initialization Control Block", or cqicb.
3037  */
3038 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3039 {
3040         struct cqicb *cqicb = &rx_ring->cqicb;
3041         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3042                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3043         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3044                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3045         void __iomem *doorbell_area =
3046             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3047         int err = 0;
3048         u16 bq_len;
3049         u64 tmp;
3050         __le64 *base_indirect_ptr;
3051         int page_entries;
3052
3053         /* Set up the shadow registers for this ring. */
3054         rx_ring->prod_idx_sh_reg = shadow_reg;
3055         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3056         *rx_ring->prod_idx_sh_reg = 0;
3057         shadow_reg += sizeof(u64);
3058         shadow_reg_dma += sizeof(u64);
3059         rx_ring->lbq_base_indirect = shadow_reg;
3060         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3061         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3062         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3063         rx_ring->sbq_base_indirect = shadow_reg;
3064         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3065
3066         /* PCI doorbell mem area + 0x00 for consumer index register */
3067         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3068         rx_ring->cnsmr_idx = 0;
3069         rx_ring->curr_entry = rx_ring->cq_base;
3070
3071         /* PCI doorbell mem area + 0x04 for valid register */
3072         rx_ring->valid_db_reg = doorbell_area + 0x04;
3073
3074         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3075         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3076
3077         /* PCI doorbell mem area + 0x1c */
3078         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3079
3080         memset((void *)cqicb, 0, sizeof(struct cqicb));
3081         cqicb->msix_vect = rx_ring->irq;
3082
3083         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3084         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3085
3086         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3087
3088         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3089
3090         /*
3091          * Set up the control block load flags.
3092          */
3093         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3094             FLAGS_LV |          /* Load MSI-X vector */
3095             FLAGS_LI;           /* Load irq delay values */
3096         if (rx_ring->lbq_len) {
3097                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3098                 tmp = (u64)rx_ring->lbq_base_dma;
3099                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3100                 page_entries = 0;
3101                 do {
3102                         *base_indirect_ptr = cpu_to_le64(tmp);
3103                         tmp += DB_PAGE_SIZE;
3104                         base_indirect_ptr++;
3105                         page_entries++;
3106                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3107                 cqicb->lbq_addr =
3108                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3109                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3110                         (u16) rx_ring->lbq_buf_size;
3111                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3112                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3113                         (u16) rx_ring->lbq_len;
3114                 cqicb->lbq_len = cpu_to_le16(bq_len);
3115                 rx_ring->lbq_prod_idx = 0;
3116                 rx_ring->lbq_curr_idx = 0;
3117                 rx_ring->lbq_clean_idx = 0;
3118                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3119         }
3120         if (rx_ring->sbq_len) {
3121                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3122                 tmp = (u64)rx_ring->sbq_base_dma;
3123                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3124                 page_entries = 0;
3125                 do {
3126                         *base_indirect_ptr = cpu_to_le64(tmp);
3127                         tmp += DB_PAGE_SIZE;
3128                         base_indirect_ptr++;
3129                         page_entries++;
3130                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3131                 cqicb->sbq_addr =
3132                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3133                 cqicb->sbq_buf_size =
3134                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3135                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3136                         (u16) rx_ring->sbq_len;
3137                 cqicb->sbq_len = cpu_to_le16(bq_len);
3138                 rx_ring->sbq_prod_idx = 0;
3139                 rx_ring->sbq_curr_idx = 0;
3140                 rx_ring->sbq_clean_idx = 0;
3141                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3142         }
3143         switch (rx_ring->type) {
3144         case TX_Q:
3145                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3146                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3147                 break;
3148         case RX_Q:
3149                 /* Inbound completion handling rx_rings run in
3150                  * separate NAPI contexts.
3151                  */
3152                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3153                                64);
3154                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3155                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3156                 break;
3157         default:
3158                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3159                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3160         }
3161         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3162                      "Initializing rx work queue.\n");
3163         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3164                            CFG_LCQ, rx_ring->cq_id);
3165         if (err) {
3166                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3167                 return err;
3168         }
3169         return err;
3170 }
3171
3172 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3173 {
3174         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3175         void __iomem *doorbell_area =
3176             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3177         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3178             (tx_ring->wq_id * sizeof(u64));
3179         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3180             (tx_ring->wq_id * sizeof(u64));
3181         int err = 0;
3182
3183         /*
3184          * Assign doorbell registers for this tx_ring.
3185          */
3186         /* TX PCI doorbell mem area for tx producer index */
3187         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3188         tx_ring->prod_idx = 0;
3189         /* TX PCI doorbell mem area + 0x04 */
3190         tx_ring->valid_db_reg = doorbell_area + 0x04;
3191
3192         /*
3193          * Assign shadow registers for this tx_ring.
3194          */
3195         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3196         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3197
3198         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3199         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3200                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3201         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3202         wqicb->rid = 0;
3203         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3204
3205         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3206
3207         ql_init_tx_ring(qdev, tx_ring);
3208
3209         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3210                            (u16) tx_ring->wq_id);
3211         if (err) {
3212                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3213                 return err;
3214         }
3215         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3216                      "Successfully loaded WQICB.\n");
3217         return err;
3218 }
3219
3220 static void ql_disable_msix(struct ql_adapter *qdev)
3221 {
3222         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3223                 pci_disable_msix(qdev->pdev);
3224                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3225                 kfree(qdev->msi_x_entry);
3226                 qdev->msi_x_entry = NULL;
3227         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3228                 pci_disable_msi(qdev->pdev);
3229                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3230         }
3231 }
3232
3233 /* We start by trying to get the number of vectors
3234  * stored in qdev->intr_count. If we don't get that
3235  * many then we reduce the count and try again.
3236  */
3237 static void ql_enable_msix(struct ql_adapter *qdev)
3238 {
3239         int i, err;
3240
3241         /* Get the MSIX vectors. */
3242         if (qlge_irq_type == MSIX_IRQ) {
3243                 /* Try to alloc space for the msix struct,
3244                  * if it fails then go to MSI/legacy.
3245                  */
3246                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3247                                             sizeof(struct msix_entry),
3248                                             GFP_KERNEL);
3249                 if (!qdev->msi_x_entry) {
3250                         qlge_irq_type = MSI_IRQ;
3251                         goto msi;
3252                 }
3253
3254                 for (i = 0; i < qdev->intr_count; i++)
3255                         qdev->msi_x_entry[i].entry = i;
3256
3257                 /* Loop to get our vectors.  We start with
3258                  * what we want and settle for what we get.
3259                  */
3260                 do {
3261                         err = pci_enable_msix(qdev->pdev,
3262                                 qdev->msi_x_entry, qdev->intr_count);
3263                         if (err > 0)
3264                                 qdev->intr_count = err;
3265                 } while (err > 0);
3266
3267                 if (err < 0) {
3268                         kfree(qdev->msi_x_entry);
3269                         qdev->msi_x_entry = NULL;
3270                         netif_warn(qdev, ifup, qdev->ndev,
3271                                    "MSI-X Enable failed, trying MSI.\n");
3272                         qdev->intr_count = 1;
3273                         qlge_irq_type = MSI_IRQ;
3274                 } else if (err == 0) {
3275                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3276                         netif_info(qdev, ifup, qdev->ndev,
3277                                    "MSI-X Enabled, got %d vectors.\n",
3278                                    qdev->intr_count);
3279                         return;
3280                 }
3281         }
3282 msi:
3283         qdev->intr_count = 1;
3284         if (qlge_irq_type == MSI_IRQ) {
3285                 if (!pci_enable_msi(qdev->pdev)) {
3286                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3287                         netif_info(qdev, ifup, qdev->ndev,
3288                                    "Running with MSI interrupts.\n");
3289                         return;
3290                 }
3291         }
3292         qlge_irq_type = LEG_IRQ;
3293         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3294                      "Running with legacy interrupts.\n");
3295 }
3296
3297 /* Each vector services 1 RSS ring and and 1 or more
3298  * TX completion rings.  This function loops through
3299  * the TX completion rings and assigns the vector that
3300  * will service it.  An example would be if there are
3301  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3302  * This would mean that vector 0 would service RSS ring 0
3303  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3304  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3305  */
3306 static void ql_set_tx_vect(struct ql_adapter *qdev)
3307 {
3308         int i, j, vect;
3309         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3310
3311         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3312                 /* Assign irq vectors to TX rx_rings.*/
3313                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3314                                          i < qdev->rx_ring_count; i++) {
3315                         if (j == tx_rings_per_vector) {
3316                                 vect++;
3317                                 j = 0;
3318                         }
3319                         qdev->rx_ring[i].irq = vect;
3320                         j++;
3321                 }
3322         } else {
3323                 /* For single vector all rings have an irq
3324                  * of zero.
3325                  */
3326                 for (i = 0; i < qdev->rx_ring_count; i++)
3327                         qdev->rx_ring[i].irq = 0;
3328         }
3329 }
3330
3331 /* Set the interrupt mask for this vector.  Each vector
3332  * will service 1 RSS ring and 1 or more TX completion
3333  * rings.  This function sets up a bit mask per vector
3334  * that indicates which rings it services.
3335  */
3336 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3337 {
3338         int j, vect = ctx->intr;
3339         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3340
3341         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3342                 /* Add the RSS ring serviced by this vector
3343                  * to the mask.
3344                  */
3345                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3346                 /* Add the TX ring(s) serviced by this vector
3347                  * to the mask. */
3348                 for (j = 0; j < tx_rings_per_vector; j++) {
3349                         ctx->irq_mask |=
3350                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3351                         (vect * tx_rings_per_vector) + j].cq_id);
3352                 }
3353         } else {
3354                 /* For single vector we just shift each queue's
3355                  * ID into the mask.
3356                  */
3357                 for (j = 0; j < qdev->rx_ring_count; j++)
3358                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3359         }
3360 }
3361
3362 /*
3363  * Here we build the intr_context structures based on
3364  * our rx_ring count and intr vector count.
3365  * The intr_context structure is used to hook each vector
3366  * to possibly different handlers.
3367  */
3368 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3369 {
3370         int i = 0;
3371         struct intr_context *intr_context = &qdev->intr_context[0];
3372
3373         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3374                 /* Each rx_ring has it's
3375                  * own intr_context since we have separate
3376                  * vectors for each queue.
3377                  */
3378                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3379                         qdev->rx_ring[i].irq = i;
3380                         intr_context->intr = i;
3381                         intr_context->qdev = qdev;
3382                         /* Set up this vector's bit-mask that indicates
3383                          * which queues it services.
3384                          */
3385                         ql_set_irq_mask(qdev, intr_context);
3386                         /*
3387                          * We set up each vectors enable/disable/read bits so
3388                          * there's no bit/mask calculations in the critical path.
3389                          */
3390                         intr_context->intr_en_mask =
3391                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3392                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3393                             | i;
3394                         intr_context->intr_dis_mask =
3395                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3396                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3397                             INTR_EN_IHD | i;
3398                         intr_context->intr_read_mask =
3399                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3400                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3401                             i;
3402                         if (i == 0) {
3403                                 /* The first vector/queue handles
3404                                  * broadcast/multicast, fatal errors,
3405                                  * and firmware events.  This in addition
3406                                  * to normal inbound NAPI processing.
3407                                  */
3408                                 intr_context->handler = qlge_isr;
3409                                 sprintf(intr_context->name, "%s-rx-%d",
3410                                         qdev->ndev->name, i);
3411                         } else {
3412                                 /*
3413                                  * Inbound queues handle unicast frames only.
3414                                  */
3415                                 intr_context->handler = qlge_msix_rx_isr;
3416                                 sprintf(intr_context->name, "%s-rx-%d",
3417                                         qdev->ndev->name, i);
3418                         }
3419                 }
3420         } else {
3421                 /*
3422                  * All rx_rings use the same intr_context since
3423                  * there is only one vector.
3424                  */
3425                 intr_context->intr = 0;
3426                 intr_context->qdev = qdev;
3427                 /*
3428                  * We set up each vectors enable/disable/read bits so
3429                  * there's no bit/mask calculations in the critical path.
3430                  */
3431                 intr_context->intr_en_mask =
3432                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3433                 intr_context->intr_dis_mask =
3434                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3435                     INTR_EN_TYPE_DISABLE;
3436                 intr_context->intr_read_mask =
3437                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3438                 /*
3439                  * Single interrupt means one handler for all rings.
3440                  */
3441                 intr_context->handler = qlge_isr;
3442                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3443                 /* Set up this vector's bit-mask that indicates
3444                  * which queues it services. In this case there is
3445                  * a single vector so it will service all RSS and
3446                  * TX completion rings.
3447                  */
3448                 ql_set_irq_mask(qdev, intr_context);
3449         }
3450         /* Tell the TX completion rings which MSIx vector
3451          * they will be using.
3452          */
3453         ql_set_tx_vect(qdev);
3454 }
3455
3456 static void ql_free_irq(struct ql_adapter *qdev)
3457 {
3458         int i;
3459         struct intr_context *intr_context = &qdev->intr_context[0];
3460
3461         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3462                 if (intr_context->hooked) {
3463                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3464                                 free_irq(qdev->msi_x_entry[i].vector,
3465                                          &qdev->rx_ring[i]);
3466                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3467                                              "freeing msix interrupt %d.\n", i);
3468                         } else {
3469                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3470                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3471                                              "freeing msi interrupt %d.\n", i);
3472                         }
3473                 }
3474         }
3475         ql_disable_msix(qdev);
3476 }
3477
3478 static int ql_request_irq(struct ql_adapter *qdev)
3479 {
3480         int i;
3481         int status = 0;
3482         struct pci_dev *pdev = qdev->pdev;
3483         struct intr_context *intr_context = &qdev->intr_context[0];
3484
3485         ql_resolve_queues_to_irqs(qdev);
3486
3487         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3488                 atomic_set(&intr_context->irq_cnt, 0);
3489                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3490                         status = request_irq(qdev->msi_x_entry[i].vector,
3491                                              intr_context->handler,
3492                                              0,
3493                                              intr_context->name,
3494                                              &qdev->rx_ring[i]);
3495                         if (status) {
3496                                 netif_err(qdev, ifup, qdev->ndev,
3497                                           "Failed request for MSIX interrupt %d.\n",
3498                                           i);
3499                                 goto err_irq;
3500                         } else {
3501                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3502                                              "Hooked intr %d, queue type %s, with name %s.\n",
3503                                              i,
3504                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3505                                              "DEFAULT_Q" :
3506                                              qdev->rx_ring[i].type == TX_Q ?
3507                                              "TX_Q" :
3508                                              qdev->rx_ring[i].type == RX_Q ?
3509                                              "RX_Q" : "",
3510                                              intr_context->name);
3511                         }
3512                 } else {
3513                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3514                                      "trying msi or legacy interrupts.\n");
3515                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3516                                      "%s: irq = %d.\n", __func__, pdev->irq);
3517                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3518                                      "%s: context->name = %s.\n", __func__,
3519                                      intr_context->name);
3520                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3521                                      "%s: dev_id = 0x%p.\n", __func__,
3522                                      &qdev->rx_ring[0]);
3523                         status =
3524                             request_irq(pdev->irq, qlge_isr,
3525                                         test_bit(QL_MSI_ENABLED,
3526                                                  &qdev->
3527                                                  flags) ? 0 : IRQF_SHARED,
3528                                         intr_context->name, &qdev->rx_ring[0]);
3529                         if (status)
3530                                 goto err_irq;
3531
3532                         netif_err(qdev, ifup, qdev->ndev,
3533                                   "Hooked intr %d, queue type %s, with name %s.\n",
3534                                   i,
3535                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3536                                   "DEFAULT_Q" :
3537                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3538                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3539                                   intr_context->name);
3540                 }
3541                 intr_context->hooked = 1;
3542         }
3543         return status;
3544 err_irq:
3545         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3546         ql_free_irq(qdev);
3547         return status;
3548 }
3549
3550 static int ql_start_rss(struct ql_adapter *qdev)
3551 {
3552         static const u8 init_hash_seed[] = {
3553                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3554                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3555                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3556                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3557                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3558         };
3559         struct ricb *ricb = &qdev->ricb;
3560         int status = 0;
3561         int i;
3562         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3563
3564         memset((void *)ricb, 0, sizeof(*ricb));
3565
3566         ricb->base_cq = RSS_L4K;
3567         ricb->flags =
3568                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3569         ricb->mask = cpu_to_le16((u16)(0x3ff));
3570
3571         /*
3572          * Fill out the Indirection Table.
3573          */
3574         for (i = 0; i < 1024; i++)
3575                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3576
3577         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3578         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3579
3580         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3581
3582         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3583         if (status) {
3584                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3585                 return status;
3586         }
3587         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3588                      "Successfully loaded RICB.\n");
3589         return status;
3590 }
3591
3592 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3593 {
3594         int i, status = 0;
3595
3596         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3597         if (status)
3598                 return status;
3599         /* Clear all the entries in the routing table. */
3600         for (i = 0; i < 16; i++) {
3601                 status = ql_set_routing_reg(qdev, i, 0, 0);
3602                 if (status) {
3603                         netif_err(qdev, ifup, qdev->ndev,
3604                                   "Failed to init routing register for CAM packets.\n");
3605                         break;
3606                 }
3607         }
3608         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3609         return status;
3610 }
3611
3612 /* Initialize the frame-to-queue routing. */
3613 static int ql_route_initialize(struct ql_adapter *qdev)
3614 {
3615         int status = 0;
3616
3617         /* Clear all the entries in the routing table. */
3618         status = ql_clear_routing_entries(qdev);
3619         if (status)
3620                 return status;
3621
3622         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3623         if (status)
3624                 return status;
3625
3626         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3627                                                 RT_IDX_IP_CSUM_ERR, 1);
3628         if (status) {
3629                 netif_err(qdev, ifup, qdev->ndev,
3630                         "Failed to init routing register "
3631                         "for IP CSUM error packets.\n");
3632                 goto exit;
3633         }
3634         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3635                                                 RT_IDX_TU_CSUM_ERR, 1);
3636         if (status) {
3637                 netif_err(qdev, ifup, qdev->ndev,
3638                         "Failed to init routing register "
3639                         "for TCP/UDP CSUM error packets.\n");
3640                 goto exit;
3641         }
3642         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3643         if (status) {
3644                 netif_err(qdev, ifup, qdev->ndev,
3645                           "Failed to init routing register for broadcast packets.\n");
3646                 goto exit;
3647         }
3648         /* If we have more than one inbound queue, then turn on RSS in the
3649          * routing block.
3650          */
3651         if (qdev->rss_ring_count > 1) {
3652                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3653                                         RT_IDX_RSS_MATCH, 1);
3654                 if (status) {
3655                         netif_err(qdev, ifup, qdev->ndev,
3656                                   "Failed to init routing register for MATCH RSS packets.\n");
3657                         goto exit;
3658                 }
3659         }
3660
3661         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3662                                     RT_IDX_CAM_HIT, 1);
3663         if (status)
3664                 netif_err(qdev, ifup, qdev->ndev,
3665                           "Failed to init routing register for CAM packets.\n");
3666 exit:
3667         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3668         return status;
3669 }
3670
3671 int ql_cam_route_initialize(struct ql_adapter *qdev)
3672 {
3673         int status, set;
3674
3675         /* If check if the link is up and use to
3676          * determine if we are setting or clearing
3677          * the MAC address in the CAM.
3678          */
3679         set = ql_read32(qdev, STS);
3680         set &= qdev->port_link_up;
3681         status = ql_set_mac_addr(qdev, set);
3682         if (status) {
3683                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3684                 return status;
3685         }
3686
3687         status = ql_route_initialize(qdev);
3688         if (status)
3689                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3690
3691         return status;
3692 }
3693
3694 static int ql_adapter_initialize(struct ql_adapter *qdev)
3695 {
3696         u32 value, mask;
3697         int i;
3698         int status = 0;
3699
3700         /*
3701          * Set up the System register to halt on errors.
3702          */
3703         value = SYS_EFE | SYS_FAE;
3704         mask = value << 16;
3705         ql_write32(qdev, SYS, mask | value);
3706
3707         /* Set the default queue, and VLAN behavior. */
3708         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3709         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3710         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3711
3712         /* Set the MPI interrupt to enabled. */
3713         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3714
3715         /* Enable the function, set pagesize, enable error checking. */
3716         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3717             FSC_EC | FSC_VM_PAGE_4K;
3718         value |= SPLT_SETTING;
3719
3720         /* Set/clear header splitting. */
3721         mask = FSC_VM_PAGESIZE_MASK |
3722             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3723         ql_write32(qdev, FSC, mask | value);
3724
3725         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3726
3727         /* Set RX packet routing to use port/pci function on which the
3728          * packet arrived on in addition to usual frame routing.
3729          * This is helpful on bonding where both interfaces can have
3730          * the same MAC address.
3731          */
3732         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3733         /* Reroute all packets to our Interface.
3734          * They may have been routed to MPI firmware
3735          * due to WOL.
3736          */
3737         value = ql_read32(qdev, MGMT_RCV_CFG);
3738         value &= ~MGMT_RCV_CFG_RM;
3739         mask = 0xffff0000;
3740
3741         /* Sticky reg needs clearing due to WOL. */
3742         ql_write32(qdev, MGMT_RCV_CFG, mask);
3743         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3744
3745         /* Default WOL is enable on Mezz cards */
3746         if (qdev->pdev->subsystem_device == 0x0068 ||
3747                         qdev->pdev->subsystem_device == 0x0180)
3748                 qdev->wol = WAKE_MAGIC;
3749
3750         /* Start up the rx queues. */
3751         for (i = 0; i < qdev->rx_ring_count; i++) {
3752                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3753                 if (status) {
3754                         netif_err(qdev, ifup, qdev->ndev,
3755                                   "Failed to start rx ring[%d].\n", i);
3756                         return status;
3757                 }
3758         }
3759
3760         /* If there is more than one inbound completion queue
3761          * then download a RICB to configure RSS.
3762          */
3763         if (qdev->rss_ring_count > 1) {
3764                 status = ql_start_rss(qdev);
3765                 if (status) {
3766                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3767                         return status;
3768                 }
3769         }
3770
3771         /* Start up the tx queues. */
3772         for (i = 0; i < qdev->tx_ring_count; i++) {
3773                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3774                 if (status) {
3775                         netif_err(qdev, ifup, qdev->ndev,
3776                                   "Failed to start tx ring[%d].\n", i);
3777                         return status;
3778                 }
3779         }
3780
3781         /* Initialize the port and set the max framesize. */
3782         status = qdev->nic_ops->port_initialize(qdev);
3783         if (status)
3784                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3785
3786         /* Set up the MAC address and frame routing filter. */
3787         status = ql_cam_route_initialize(qdev);
3788         if (status) {
3789                 netif_err(qdev, ifup, qdev->ndev,
3790                           "Failed to init CAM/Routing tables.\n");
3791                 return status;
3792         }
3793
3794         /* Start NAPI for the RSS queues. */
3795         for (i = 0; i < qdev->rss_ring_count; i++) {
3796                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3797                              "Enabling NAPI for rx_ring[%d].\n", i);
3798                 napi_enable(&qdev->rx_ring[i].napi);
3799         }
3800
3801         return status;
3802 }
3803
3804 /* Issue soft reset to chip. */
3805 static int ql_adapter_reset(struct ql_adapter *qdev)
3806 {
3807         u32 value;
3808         int status = 0;
3809         unsigned long end_jiffies;
3810
3811         /* Clear all the entries in the routing table. */
3812         status = ql_clear_routing_entries(qdev);
3813         if (status) {
3814                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3815                 return status;
3816         }
3817
3818         end_jiffies = jiffies +
3819                 max((unsigned long)1, usecs_to_jiffies(30));
3820
3821         /* Stop management traffic. */
3822         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3823
3824         /* Wait for the NIC and MGMNT FIFOs to empty. */
3825         ql_wait_fifo_empty(qdev);
3826
3827         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3828
3829         do {
3830                 value = ql_read32(qdev, RST_FO);
3831                 if ((value & RST_FO_FR) == 0)
3832                         break;
3833                 cpu_relax();
3834         } while (time_before(jiffies, end_jiffies));
3835
3836         if (value & RST_FO_FR) {
3837                 netif_err(qdev, ifdown, qdev->ndev,
3838                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3839                 status = -ETIMEDOUT;
3840         }
3841
3842         /* Resume management traffic. */
3843         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3844         return status;
3845 }
3846
3847 static void ql_display_dev_info(struct net_device *ndev)
3848 {
3849         struct ql_adapter *qdev = netdev_priv(ndev);
3850
3851         netif_info(qdev, probe, qdev->ndev,
3852                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3853                    "XG Roll = %d, XG Rev = %d.\n",
3854                    qdev->func,
3855                    qdev->port,
3856                    qdev->chip_rev_id & 0x0000000f,
3857                    qdev->chip_rev_id >> 4 & 0x0000000f,
3858                    qdev->chip_rev_id >> 8 & 0x0000000f,
3859                    qdev->chip_rev_id >> 12 & 0x0000000f);
3860         netif_info(qdev, probe, qdev->ndev,
3861                    "MAC address %pM\n", ndev->dev_addr);
3862 }
3863
3864 static int ql_wol(struct ql_adapter *qdev)
3865 {
3866         int status = 0;
3867         u32 wol = MB_WOL_DISABLE;
3868
3869         /* The CAM is still intact after a reset, but if we
3870          * are doing WOL, then we may need to program the
3871          * routing regs. We would also need to issue the mailbox
3872          * commands to instruct the MPI what to do per the ethtool
3873          * settings.
3874          */
3875
3876         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3877                         WAKE_MCAST | WAKE_BCAST)) {
3878                 netif_err(qdev, ifdown, qdev->ndev,
3879                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3880                           qdev->wol);
3881                 return -EINVAL;
3882         }
3883
3884         if (qdev->wol & WAKE_MAGIC) {
3885                 status = ql_mb_wol_set_magic(qdev, 1);
3886                 if (status) {
3887                         netif_err(qdev, ifdown, qdev->ndev,
3888                                   "Failed to set magic packet on %s.\n",
3889                                   qdev->ndev->name);
3890                         return status;
3891                 } else
3892                         netif_info(qdev, drv, qdev->ndev,
3893                                    "Enabled magic packet successfully on %s.\n",
3894                                    qdev->ndev->name);
3895
3896                 wol |= MB_WOL_MAGIC_PKT;
3897         }
3898
3899         if (qdev->wol) {
3900                 wol |= MB_WOL_MODE_ON;
3901                 status = ql_mb_wol_mode(qdev, wol);
3902                 netif_err(qdev, drv, qdev->ndev,
3903                           "WOL %s (wol code 0x%x) on %s\n",
3904                           (status == 0) ? "Successfully set" : "Failed",
3905                           wol, qdev->ndev->name);
3906         }
3907
3908         return status;
3909 }
3910
3911 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3912 {
3913
3914         /* Don't kill the reset worker thread if we
3915          * are in the process of recovery.
3916          */
3917         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3918                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3919         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3920         cancel_delayed_work_sync(&qdev->mpi_work);
3921         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3922         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3923         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3924 }
3925
3926 static int ql_adapter_down(struct ql_adapter *qdev)
3927 {
3928         int i, status = 0;
3929
3930         ql_link_off(qdev);
3931
3932         ql_cancel_all_work_sync(qdev);
3933
3934         for (i = 0; i < qdev->rss_ring_count; i++)
3935                 napi_disable(&qdev->rx_ring[i].napi);
3936
3937         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3938
3939         ql_disable_interrupts(qdev);
3940
3941         ql_tx_ring_clean(qdev);
3942
3943         /* Call netif_napi_del() from common point.
3944          */
3945         for (i = 0; i < qdev->rss_ring_count; i++)
3946                 netif_napi_del(&qdev->rx_ring[i].napi);
3947
3948         status = ql_adapter_reset(qdev);
3949         if (status)
3950                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3951                           qdev->func);
3952         ql_free_rx_buffers(qdev);
3953
3954         return status;
3955 }
3956
3957 static int ql_adapter_up(struct ql_adapter *qdev)
3958 {
3959         int err = 0;
3960
3961         err = ql_adapter_initialize(qdev);
3962         if (err) {
3963                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3964                 goto err_init;
3965         }
3966         set_bit(QL_ADAPTER_UP, &qdev->flags);
3967         ql_alloc_rx_buffers(qdev);
3968         /* If the port is initialized and the
3969          * link is up the turn on the carrier.
3970          */
3971         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3972                         (ql_read32(qdev, STS) & qdev->port_link_up))
3973                 ql_link_on(qdev);
3974         /* Restore rx mode. */
3975         clear_bit(QL_ALLMULTI, &qdev->flags);
3976         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3977         qlge_set_multicast_list(qdev->ndev);
3978
3979         /* Restore vlan setting. */
3980         qlge_restore_vlan(qdev);
3981
3982         ql_enable_interrupts(qdev);
3983         ql_enable_all_completion_interrupts(qdev);
3984         netif_tx_start_all_queues(qdev->ndev);
3985
3986         return 0;
3987 err_init:
3988         ql_adapter_reset(qdev);
3989         return err;
3990 }
3991
3992 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3993 {
3994         ql_free_mem_resources(qdev);
3995         ql_free_irq(qdev);
3996 }
3997
3998 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3999 {
4000         int status = 0;
4001
4002         if (ql_alloc_mem_resources(qdev)) {
4003                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4004                 return -ENOMEM;
4005         }
4006         status = ql_request_irq(qdev);
4007         return status;
4008 }
4009
4010 static int qlge_close(struct net_device *ndev)
4011 {
4012         struct ql_adapter *qdev = netdev_priv(ndev);
4013
4014         /* If we hit pci_channel_io_perm_failure
4015          * failure condition, then we already
4016          * brought the adapter down.
4017          */
4018         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4019                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4020                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4021                 return 0;
4022         }
4023
4024         /*
4025          * Wait for device to recover from a reset.
4026          * (Rarely happens, but possible.)
4027          */
4028         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4029                 msleep(1);
4030         ql_adapter_down(qdev);
4031         ql_release_adapter_resources(qdev);
4032         return 0;
4033 }
4034
4035 static int ql_configure_rings(struct ql_adapter *qdev)
4036 {
4037         int i;
4038         struct rx_ring *rx_ring;
4039         struct tx_ring *tx_ring;
4040         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4041         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4042                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4043
4044         qdev->lbq_buf_order = get_order(lbq_buf_len);
4045
4046         /* In a perfect world we have one RSS ring for each CPU
4047          * and each has it's own vector.  To do that we ask for
4048          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4049          * vector count to what we actually get.  We then
4050          * allocate an RSS ring for each.
4051          * Essentially, we are doing min(cpu_count, msix_vector_count).
4052          */
4053         qdev->intr_count = cpu_cnt;
4054         ql_enable_msix(qdev);
4055         /* Adjust the RSS ring count to the actual vector count. */
4056         qdev->rss_ring_count = qdev->intr_count;
4057         qdev->tx_ring_count = cpu_cnt;
4058         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4059
4060         for (i = 0; i < qdev->tx_ring_count; i++) {
4061                 tx_ring = &qdev->tx_ring[i];
4062                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4063                 tx_ring->qdev = qdev;
4064                 tx_ring->wq_id = i;
4065                 tx_ring->wq_len = qdev->tx_ring_size;
4066                 tx_ring->wq_size =
4067                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4068
4069                 /*
4070                  * The completion queue ID for the tx rings start
4071                  * immediately after the rss rings.
4072                  */
4073                 tx_ring->cq_id = qdev->rss_ring_count + i;
4074         }
4075
4076         for (i = 0; i < qdev->rx_ring_count; i++) {
4077                 rx_ring = &qdev->rx_ring[i];
4078                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4079                 rx_ring->qdev = qdev;
4080                 rx_ring->cq_id = i;
4081                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4082                 if (i < qdev->rss_ring_count) {
4083                         /*
4084                          * Inbound (RSS) queues.
4085                          */
4086                         rx_ring->cq_len = qdev->rx_ring_size;
4087                         rx_ring->cq_size =
4088                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4089                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4090                         rx_ring->lbq_size =
4091                             rx_ring->lbq_len * sizeof(__le64);
4092                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4093                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4094                                      "lbq_buf_size %d, order = %d\n",
4095                                      rx_ring->lbq_buf_size,
4096                                      qdev->lbq_buf_order);
4097                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4098                         rx_ring->sbq_size =
4099                             rx_ring->sbq_len * sizeof(__le64);
4100                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4101                         rx_ring->type = RX_Q;
4102                 } else {
4103                         /*
4104                          * Outbound queue handles outbound completions only.
4105                          */
4106                         /* outbound cq is same size as tx_ring it services. */
4107                         rx_ring->cq_len = qdev->tx_ring_size;
4108                         rx_ring->cq_size =
4109                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4110                         rx_ring->lbq_len = 0;
4111                         rx_ring->lbq_size = 0;
4112                         rx_ring->lbq_buf_size = 0;
4113                         rx_ring->sbq_len = 0;
4114                         rx_ring->sbq_size = 0;
4115                         rx_ring->sbq_buf_size = 0;
4116                         rx_ring->type = TX_Q;
4117                 }
4118         }
4119         return 0;
4120 }
4121
4122 static int qlge_open(struct net_device *ndev)
4123 {
4124         int err = 0;
4125         struct ql_adapter *qdev = netdev_priv(ndev);
4126
4127         err = ql_adapter_reset(qdev);
4128         if (err)
4129                 return err;
4130
4131         err = ql_configure_rings(qdev);
4132         if (err)
4133                 return err;
4134
4135         err = ql_get_adapter_resources(qdev);
4136         if (err)
4137                 goto error_up;
4138
4139         err = ql_adapter_up(qdev);
4140         if (err)
4141                 goto error_up;
4142
4143         return err;
4144
4145 error_up:
4146         ql_release_adapter_resources(qdev);
4147         return err;
4148 }
4149
4150 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4151 {
4152         struct rx_ring *rx_ring;
4153         int i, status;
4154         u32 lbq_buf_len;
4155
4156         /* Wait for an outstanding reset to complete. */
4157         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4158                 int i = 3;
4159                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4160                         netif_err(qdev, ifup, qdev->ndev,
4161                                   "Waiting for adapter UP...\n");
4162                         ssleep(1);
4163                 }
4164
4165                 if (!i) {
4166                         netif_err(qdev, ifup, qdev->ndev,
4167                                   "Timed out waiting for adapter UP\n");
4168                         return -ETIMEDOUT;
4169                 }
4170         }
4171
4172         status = ql_adapter_down(qdev);
4173         if (status)
4174                 goto error;
4175
4176         /* Get the new rx buffer size. */
4177         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4178                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4179         qdev->lbq_buf_order = get_order(lbq_buf_len);
4180
4181         for (i = 0; i < qdev->rss_ring_count; i++) {
4182                 rx_ring = &qdev->rx_ring[i];
4183                 /* Set the new size. */
4184                 rx_ring->lbq_buf_size = lbq_buf_len;
4185         }
4186
4187         status = ql_adapter_up(qdev);
4188         if (status)
4189                 goto error;
4190
4191         return status;
4192 error:
4193         netif_alert(qdev, ifup, qdev->ndev,
4194                     "Driver up/down cycle failed, closing device.\n");
4195         set_bit(QL_ADAPTER_UP, &qdev->flags);
4196         dev_close(qdev->ndev);
4197         return status;
4198 }
4199
4200 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4201 {
4202         struct ql_adapter *qdev = netdev_priv(ndev);
4203         int status;
4204
4205         if (ndev->mtu == 1500 && new_mtu == 9000) {
4206                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4207         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4208                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4209         } else
4210                 return -EINVAL;
4211
4212         queue_delayed_work(qdev->workqueue,
4213                         &qdev->mpi_port_cfg_work, 3*HZ);
4214
4215         ndev->mtu = new_mtu;
4216
4217         if (!netif_running(qdev->ndev)) {
4218                 return 0;
4219         }
4220
4221         status = ql_change_rx_buffers(qdev);
4222         if (status) {
4223                 netif_err(qdev, ifup, qdev->ndev,
4224                           "Changing MTU failed.\n");
4225         }
4226
4227         return status;
4228 }
4229
4230 static struct net_device_stats *qlge_get_stats(struct net_device
4231                                                *ndev)
4232 {
4233         struct ql_adapter *qdev = netdev_priv(ndev);
4234         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4235         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4236         unsigned long pkts, mcast, dropped, errors, bytes;
4237         int i;
4238
4239         /* Get RX stats. */
4240         pkts = mcast = dropped = errors = bytes = 0;
4241         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4242                         pkts += rx_ring->rx_packets;
4243                         bytes += rx_ring->rx_bytes;
4244                         dropped += rx_ring->rx_dropped;
4245                         errors += rx_ring->rx_errors;
4246                         mcast += rx_ring->rx_multicast;
4247         }
4248         ndev->stats.rx_packets = pkts;
4249         ndev->stats.rx_bytes = bytes;
4250         ndev->stats.rx_dropped = dropped;
4251         ndev->stats.rx_errors = errors;
4252         ndev->stats.multicast = mcast;
4253
4254         /* Get TX stats. */
4255         pkts = errors = bytes = 0;
4256         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4257                         pkts += tx_ring->tx_packets;
4258                         bytes += tx_ring->tx_bytes;
4259                         errors += tx_ring->tx_errors;
4260         }
4261         ndev->stats.tx_packets = pkts;
4262         ndev->stats.tx_bytes = bytes;
4263         ndev->stats.tx_errors = errors;
4264         return &ndev->stats;
4265 }
4266
4267 static void qlge_set_multicast_list(struct net_device *ndev)
4268 {
4269         struct ql_adapter *qdev = netdev_priv(ndev);
4270         struct netdev_hw_addr *ha;
4271         int i, status;
4272
4273         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4274         if (status)
4275                 return;
4276         /*
4277          * Set or clear promiscuous mode if a
4278          * transition is taking place.
4279          */
4280         if (ndev->flags & IFF_PROMISC) {
4281                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4282                         if (ql_set_routing_reg
4283                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4284                                 netif_err(qdev, hw, qdev->ndev,
4285                                           "Failed to set promiscuous mode.\n");
4286                         } else {
4287                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4288                         }
4289                 }
4290         } else {
4291                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4292                         if (ql_set_routing_reg
4293                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4294                                 netif_err(qdev, hw, qdev->ndev,
4295                                           "Failed to clear promiscuous mode.\n");
4296                         } else {
4297                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4298                         }
4299                 }
4300         }
4301
4302         /*
4303          * Set or clear all multicast mode if a
4304          * transition is taking place.
4305          */
4306         if ((ndev->flags & IFF_ALLMULTI) ||
4307             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4308                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4309                         if (ql_set_routing_reg
4310                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4311                                 netif_err(qdev, hw, qdev->ndev,
4312                                           "Failed to set all-multi mode.\n");
4313                         } else {
4314                                 set_bit(QL_ALLMULTI, &qdev->flags);
4315                         }
4316                 }
4317         } else {
4318                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4319                         if (ql_set_routing_reg
4320                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4321                                 netif_err(qdev, hw, qdev->ndev,
4322                                           "Failed to clear all-multi mode.\n");
4323                         } else {
4324                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4325                         }
4326                 }
4327         }
4328
4329         if (!netdev_mc_empty(ndev)) {
4330                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4331                 if (status)
4332                         goto exit;
4333                 i = 0;
4334                 netdev_for_each_mc_addr(ha, ndev) {
4335                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4336                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4337                                 netif_err(qdev, hw, qdev->ndev,
4338                                           "Failed to loadmulticast address.\n");
4339                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4340                                 goto exit;
4341                         }
4342                         i++;
4343                 }
4344                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4345                 if (ql_set_routing_reg
4346                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4347                         netif_err(qdev, hw, qdev->ndev,
4348                                   "Failed to set multicast match mode.\n");
4349                 } else {
4350                         set_bit(QL_ALLMULTI, &qdev->flags);
4351                 }
4352         }
4353 exit:
4354         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4355 }
4356
4357 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4358 {
4359         struct ql_adapter *qdev = netdev_priv(ndev);
4360         struct sockaddr *addr = p;
4361         int status;
4362
4363         if (!is_valid_ether_addr(addr->sa_data))
4364                 return -EADDRNOTAVAIL;
4365         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4366         /* Update local copy of current mac address. */
4367         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4368
4369         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4370         if (status)
4371                 return status;
4372         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4373                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4374         if (status)
4375                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4376         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4377         return status;
4378 }
4379
4380 static void qlge_tx_timeout(struct net_device *ndev)
4381 {
4382         struct ql_adapter *qdev = netdev_priv(ndev);
4383         ql_queue_asic_error(qdev);
4384 }
4385
4386 static void ql_asic_reset_work(struct work_struct *work)
4387 {
4388         struct ql_adapter *qdev =
4389             container_of(work, struct ql_adapter, asic_reset_work.work);
4390         int status;
4391         rtnl_lock();
4392         status = ql_adapter_down(qdev);
4393         if (status)
4394                 goto error;
4395
4396         status = ql_adapter_up(qdev);
4397         if (status)
4398                 goto error;
4399
4400         /* Restore rx mode. */
4401         clear_bit(QL_ALLMULTI, &qdev->flags);
4402         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4403         qlge_set_multicast_list(qdev->ndev);
4404
4405         rtnl_unlock();
4406         return;
4407 error:
4408         netif_alert(qdev, ifup, qdev->ndev,
4409                     "Driver up/down cycle failed, closing device\n");
4410
4411         set_bit(QL_ADAPTER_UP, &qdev->flags);
4412         dev_close(qdev->ndev);
4413         rtnl_unlock();
4414 }
4415
4416 static const struct nic_operations qla8012_nic_ops = {
4417         .get_flash              = ql_get_8012_flash_params,
4418         .port_initialize        = ql_8012_port_initialize,
4419 };
4420
4421 static const struct nic_operations qla8000_nic_ops = {
4422         .get_flash              = ql_get_8000_flash_params,
4423         .port_initialize        = ql_8000_port_initialize,
4424 };
4425
4426 /* Find the pcie function number for the other NIC
4427  * on this chip.  Since both NIC functions share a
4428  * common firmware we have the lowest enabled function
4429  * do any common work.  Examples would be resetting
4430  * after a fatal firmware error, or doing a firmware
4431  * coredump.
4432  */
4433 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4434 {
4435         int status = 0;
4436         u32 temp;
4437         u32 nic_func1, nic_func2;
4438
4439         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4440                         &temp);
4441         if (status)
4442                 return status;
4443
4444         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4445                         MPI_TEST_NIC_FUNC_MASK);
4446         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4447                         MPI_TEST_NIC_FUNC_MASK);
4448
4449         if (qdev->func == nic_func1)
4450                 qdev->alt_func = nic_func2;
4451         else if (qdev->func == nic_func2)
4452                 qdev->alt_func = nic_func1;
4453         else
4454                 status = -EIO;
4455
4456         return status;
4457 }
4458
4459 static int ql_get_board_info(struct ql_adapter *qdev)
4460 {
4461         int status;
4462         qdev->func =
4463             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4464         if (qdev->func > 3)
4465                 return -EIO;
4466
4467         status = ql_get_alt_pcie_func(qdev);
4468         if (status)
4469                 return status;
4470
4471         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4472         if (qdev->port) {
4473                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4474                 qdev->port_link_up = STS_PL1;
4475                 qdev->port_init = STS_PI1;
4476                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4477                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4478         } else {
4479                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4480                 qdev->port_link_up = STS_PL0;
4481                 qdev->port_init = STS_PI0;
4482                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4483                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4484         }
4485         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4486         qdev->device_id = qdev->pdev->device;
4487         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4488                 qdev->nic_ops = &qla8012_nic_ops;
4489         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4490                 qdev->nic_ops = &qla8000_nic_ops;
4491         return status;
4492 }
4493
4494 static void ql_release_all(struct pci_dev *pdev)
4495 {
4496         struct net_device *ndev = pci_get_drvdata(pdev);
4497         struct ql_adapter *qdev = netdev_priv(ndev);
4498
4499         if (qdev->workqueue) {
4500                 destroy_workqueue(qdev->workqueue);
4501                 qdev->workqueue = NULL;
4502         }
4503
4504         if (qdev->reg_base)
4505                 iounmap(qdev->reg_base);
4506         if (qdev->doorbell_area)
4507                 iounmap(qdev->doorbell_area);
4508         vfree(qdev->mpi_coredump);
4509         pci_release_regions(pdev);
4510         pci_set_drvdata(pdev, NULL);
4511 }
4512
4513 static int __devinit ql_init_device(struct pci_dev *pdev,
4514                                     struct net_device *ndev, int cards_found)
4515 {
4516         struct ql_adapter *qdev = netdev_priv(ndev);
4517         int err = 0;
4518
4519         memset((void *)qdev, 0, sizeof(*qdev));
4520         err = pci_enable_device(pdev);
4521         if (err) {
4522                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4523                 return err;
4524         }
4525
4526         qdev->ndev = ndev;
4527         qdev->pdev = pdev;
4528         pci_set_drvdata(pdev, ndev);
4529
4530         /* Set PCIe read request size */
4531         err = pcie_set_readrq(pdev, 4096);
4532         if (err) {
4533                 dev_err(&pdev->dev, "Set readrq failed.\n");
4534                 goto err_out1;
4535         }
4536
4537         err = pci_request_regions(pdev, DRV_NAME);
4538         if (err) {
4539                 dev_err(&pdev->dev, "PCI region request failed.\n");
4540                 return err;
4541         }
4542
4543         pci_set_master(pdev);
4544         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4545                 set_bit(QL_DMA64, &qdev->flags);
4546                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4547         } else {
4548                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4549                 if (!err)
4550                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4551         }
4552
4553         if (err) {
4554                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4555                 goto err_out2;
4556         }
4557
4558         /* Set PCIe reset type for EEH to fundamental. */
4559         pdev->needs_freset = 1;
4560         pci_save_state(pdev);
4561         qdev->reg_base =
4562             ioremap_nocache(pci_resource_start(pdev, 1),
4563                             pci_resource_len(pdev, 1));
4564         if (!qdev->reg_base) {
4565                 dev_err(&pdev->dev, "Register mapping failed.\n");
4566                 err = -ENOMEM;
4567                 goto err_out2;
4568         }
4569
4570         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4571         qdev->doorbell_area =
4572             ioremap_nocache(pci_resource_start(pdev, 3),
4573                             pci_resource_len(pdev, 3));
4574         if (!qdev->doorbell_area) {
4575                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4576                 err = -ENOMEM;
4577                 goto err_out2;
4578         }
4579
4580         err = ql_get_board_info(qdev);
4581         if (err) {
4582                 dev_err(&pdev->dev, "Register access failed.\n");
4583                 err = -EIO;
4584                 goto err_out2;
4585         }
4586         qdev->msg_enable = netif_msg_init(debug, default_msg);
4587         spin_lock_init(&qdev->hw_lock);
4588         spin_lock_init(&qdev->stats_lock);
4589
4590         if (qlge_mpi_coredump) {
4591                 qdev->mpi_coredump =
4592                         vmalloc(sizeof(struct ql_mpi_coredump));
4593                 if (qdev->mpi_coredump == NULL) {
4594                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4595                         err = -ENOMEM;
4596                         goto err_out2;
4597                 }
4598                 if (qlge_force_coredump)
4599                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4600         }
4601         /* make sure the EEPROM is good */
4602         err = qdev->nic_ops->get_flash(qdev);
4603         if (err) {
4604                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4605                 goto err_out2;
4606         }
4607
4608         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4609         /* Keep local copy of current mac address. */
4610         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4611
4612         /* Set up the default ring sizes. */
4613         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4614         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4615
4616         /* Set up the coalescing parameters. */
4617         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4618         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4619         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4620         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4621
4622         /*
4623          * Set up the operating parameters.
4624          */
4625         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4626         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4627         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4628         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4629         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4630         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4631         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4632         init_completion(&qdev->ide_completion);
4633         mutex_init(&qdev->mpi_mutex);
4634
4635         if (!cards_found) {
4636                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4637                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4638                          DRV_NAME, DRV_VERSION);
4639         }
4640         return 0;
4641 err_out2:
4642         ql_release_all(pdev);
4643 err_out1:
4644         pci_disable_device(pdev);
4645         return err;
4646 }
4647
4648 static const struct net_device_ops qlge_netdev_ops = {
4649         .ndo_open               = qlge_open,
4650         .ndo_stop               = qlge_close,
4651         .ndo_start_xmit         = qlge_send,
4652         .ndo_change_mtu         = qlge_change_mtu,
4653         .ndo_get_stats          = qlge_get_stats,
4654         .ndo_set_multicast_list = qlge_set_multicast_list,
4655         .ndo_set_mac_address    = qlge_set_mac_address,
4656         .ndo_validate_addr      = eth_validate_addr,
4657         .ndo_tx_timeout         = qlge_tx_timeout,
4658         .ndo_vlan_rx_register   = qlge_vlan_rx_register,
4659         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4660         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4661 };
4662
4663 static void ql_timer(unsigned long data)
4664 {
4665         struct ql_adapter *qdev = (struct ql_adapter *)data;
4666         u32 var = 0;
4667
4668         var = ql_read32(qdev, STS);
4669         if (pci_channel_offline(qdev->pdev)) {
4670                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4671                 return;
4672         }
4673
4674         mod_timer(&qdev->timer, jiffies + (5*HZ));
4675 }
4676
4677 static int __devinit qlge_probe(struct pci_dev *pdev,
4678                                 const struct pci_device_id *pci_entry)
4679 {
4680         struct net_device *ndev = NULL;
4681         struct ql_adapter *qdev = NULL;
4682         static int cards_found = 0;
4683         int err = 0;
4684
4685         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4686                         min(MAX_CPUS, (int)num_online_cpus()));
4687         if (!ndev)
4688                 return -ENOMEM;
4689
4690         err = ql_init_device(pdev, ndev, cards_found);
4691         if (err < 0) {
4692                 free_netdev(ndev);
4693                 return err;
4694         }
4695
4696         qdev = netdev_priv(ndev);
4697         SET_NETDEV_DEV(ndev, &pdev->dev);
4698         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4699                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4700                 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4701         ndev->features = ndev->hw_features |
4702                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4703
4704         if (test_bit(QL_DMA64, &qdev->flags))
4705                 ndev->features |= NETIF_F_HIGHDMA;
4706
4707         /*
4708          * Set up net_device structure.
4709          */
4710         ndev->tx_queue_len = qdev->tx_ring_size;
4711         ndev->irq = pdev->irq;
4712
4713         ndev->netdev_ops = &qlge_netdev_ops;
4714         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4715         ndev->watchdog_timeo = 10 * HZ;
4716
4717         err = register_netdev(ndev);
4718         if (err) {
4719                 dev_err(&pdev->dev, "net device registration failed.\n");
4720                 ql_release_all(pdev);
4721                 pci_disable_device(pdev);
4722                 return err;
4723         }
4724         /* Start up the timer to trigger EEH if
4725          * the bus goes dead
4726          */
4727         init_timer_deferrable(&qdev->timer);
4728         qdev->timer.data = (unsigned long)qdev;
4729         qdev->timer.function = ql_timer;
4730         qdev->timer.expires = jiffies + (5*HZ);
4731         add_timer(&qdev->timer);
4732         ql_link_off(qdev);
4733         ql_display_dev_info(ndev);
4734         atomic_set(&qdev->lb_count, 0);
4735         cards_found++;
4736         return 0;
4737 }
4738
4739 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4740 {
4741         return qlge_send(skb, ndev);
4742 }
4743
4744 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4745 {
4746         return ql_clean_inbound_rx_ring(rx_ring, budget);
4747 }
4748
4749 static void __devexit qlge_remove(struct pci_dev *pdev)
4750 {
4751         struct net_device *ndev = pci_get_drvdata(pdev);
4752         struct ql_adapter *qdev = netdev_priv(ndev);
4753         del_timer_sync(&qdev->timer);
4754         ql_cancel_all_work_sync(qdev);
4755         unregister_netdev(ndev);
4756         ql_release_all(pdev);
4757         pci_disable_device(pdev);
4758         free_netdev(ndev);
4759 }
4760
4761 /* Clean up resources without touching hardware. */
4762 static void ql_eeh_close(struct net_device *ndev)
4763 {
4764         int i;
4765         struct ql_adapter *qdev = netdev_priv(ndev);
4766
4767         if (netif_carrier_ok(ndev)) {
4768                 netif_carrier_off(ndev);
4769                 netif_stop_queue(ndev);
4770         }
4771
4772         /* Disabling the timer */
4773         del_timer_sync(&qdev->timer);
4774         ql_cancel_all_work_sync(qdev);
4775
4776         for (i = 0; i < qdev->rss_ring_count; i++)
4777                 netif_napi_del(&qdev->rx_ring[i].napi);
4778
4779         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4780         ql_tx_ring_clean(qdev);
4781         ql_free_rx_buffers(qdev);
4782         ql_release_adapter_resources(qdev);
4783 }
4784
4785 /*
4786  * This callback is called by the PCI subsystem whenever
4787  * a PCI bus error is detected.
4788  */
4789 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4790                                                enum pci_channel_state state)
4791 {
4792         struct net_device *ndev = pci_get_drvdata(pdev);
4793         struct ql_adapter *qdev = netdev_priv(ndev);
4794
4795         switch (state) {
4796         case pci_channel_io_normal:
4797                 return PCI_ERS_RESULT_CAN_RECOVER;
4798         case pci_channel_io_frozen:
4799                 netif_device_detach(ndev);
4800                 if (netif_running(ndev))
4801                         ql_eeh_close(ndev);
4802                 pci_disable_device(pdev);
4803                 return PCI_ERS_RESULT_NEED_RESET;
4804         case pci_channel_io_perm_failure:
4805                 dev_err(&pdev->dev,
4806                         "%s: pci_channel_io_perm_failure.\n", __func__);
4807                 ql_eeh_close(ndev);
4808                 set_bit(QL_EEH_FATAL, &qdev->flags);
4809                 return PCI_ERS_RESULT_DISCONNECT;
4810         }
4811
4812         /* Request a slot reset. */
4813         return PCI_ERS_RESULT_NEED_RESET;
4814 }
4815
4816 /*
4817  * This callback is called after the PCI buss has been reset.
4818  * Basically, this tries to restart the card from scratch.
4819  * This is a shortened version of the device probe/discovery code,
4820  * it resembles the first-half of the () routine.
4821  */
4822 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4823 {
4824         struct net_device *ndev = pci_get_drvdata(pdev);
4825         struct ql_adapter *qdev = netdev_priv(ndev);
4826
4827         pdev->error_state = pci_channel_io_normal;
4828
4829         pci_restore_state(pdev);
4830         if (pci_enable_device(pdev)) {
4831                 netif_err(qdev, ifup, qdev->ndev,
4832                           "Cannot re-enable PCI device after reset.\n");
4833                 return PCI_ERS_RESULT_DISCONNECT;
4834         }
4835         pci_set_master(pdev);
4836
4837         if (ql_adapter_reset(qdev)) {
4838                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4839                 set_bit(QL_EEH_FATAL, &qdev->flags);
4840                 return PCI_ERS_RESULT_DISCONNECT;
4841         }
4842
4843         return PCI_ERS_RESULT_RECOVERED;
4844 }
4845
4846 static void qlge_io_resume(struct pci_dev *pdev)
4847 {
4848         struct net_device *ndev = pci_get_drvdata(pdev);
4849         struct ql_adapter *qdev = netdev_priv(ndev);
4850         int err = 0;
4851
4852         if (netif_running(ndev)) {
4853                 err = qlge_open(ndev);
4854                 if (err) {
4855                         netif_err(qdev, ifup, qdev->ndev,
4856                                   "Device initialization failed after reset.\n");
4857                         return;
4858                 }
4859         } else {
4860                 netif_err(qdev, ifup, qdev->ndev,
4861                           "Device was not running prior to EEH.\n");
4862         }
4863         mod_timer(&qdev->timer, jiffies + (5*HZ));
4864         netif_device_attach(ndev);
4865 }
4866
4867 static struct pci_error_handlers qlge_err_handler = {
4868         .error_detected = qlge_io_error_detected,
4869         .slot_reset = qlge_io_slot_reset,
4870         .resume = qlge_io_resume,
4871 };
4872
4873 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4874 {
4875         struct net_device *ndev = pci_get_drvdata(pdev);
4876         struct ql_adapter *qdev = netdev_priv(ndev);
4877         int err;
4878
4879         netif_device_detach(ndev);
4880         del_timer_sync(&qdev->timer);
4881
4882         if (netif_running(ndev)) {
4883                 err = ql_adapter_down(qdev);
4884                 if (!err)
4885                         return err;
4886         }
4887
4888         ql_wol(qdev);
4889         err = pci_save_state(pdev);
4890         if (err)
4891                 return err;
4892
4893         pci_disable_device(pdev);
4894
4895         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4896
4897         return 0;
4898 }
4899
4900 #ifdef CONFIG_PM
4901 static int qlge_resume(struct pci_dev *pdev)
4902 {
4903         struct net_device *ndev = pci_get_drvdata(pdev);
4904         struct ql_adapter *qdev = netdev_priv(ndev);
4905         int err;
4906
4907         pci_set_power_state(pdev, PCI_D0);
4908         pci_restore_state(pdev);
4909         err = pci_enable_device(pdev);
4910         if (err) {
4911                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4912                 return err;
4913         }
4914         pci_set_master(pdev);
4915
4916         pci_enable_wake(pdev, PCI_D3hot, 0);
4917         pci_enable_wake(pdev, PCI_D3cold, 0);
4918
4919         if (netif_running(ndev)) {
4920                 err = ql_adapter_up(qdev);
4921                 if (err)
4922                         return err;
4923         }
4924
4925         mod_timer(&qdev->timer, jiffies + (5*HZ));
4926         netif_device_attach(ndev);
4927
4928         return 0;
4929 }
4930 #endif /* CONFIG_PM */
4931
4932 static void qlge_shutdown(struct pci_dev *pdev)
4933 {
4934         qlge_suspend(pdev, PMSG_SUSPEND);
4935 }
4936
4937 static struct pci_driver qlge_driver = {
4938         .name = DRV_NAME,
4939         .id_table = qlge_pci_tbl,
4940         .probe = qlge_probe,
4941         .remove = __devexit_p(qlge_remove),
4942 #ifdef CONFIG_PM
4943         .suspend = qlge_suspend,
4944         .resume = qlge_resume,
4945 #endif
4946         .shutdown = qlge_shutdown,
4947         .err_handler = &qlge_err_handler
4948 };
4949
4950 static int __init qlge_init_module(void)
4951 {
4952         return pci_register_driver(&qlge_driver);
4953 }
4954
4955 static void __exit qlge_exit(void)
4956 {
4957         pci_unregister_driver(&qlge_driver);
4958 }
4959
4960 module_init(qlge_init_module);
4961 module_exit(qlge_exit);