Merge branches 'x86-fixes-for-linus', 'sched-fixes-for-linus', 'timers-fixes-for...
[pandora-kernel.git] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
42
43 #include "qlge.h"
44
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg =
54     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER |    */
56     NETIF_MSG_IFDOWN |
57     NETIF_MSG_IFUP |
58     NETIF_MSG_RX_ERR |
59     NETIF_MSG_TX_ERR |
60 /*  NETIF_MSG_TX_QUEUED | */
61 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65 static int debug = -1;  /* defaults above */
66 module_param(debug, int, 0664);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, 0664);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79                 "Option to enable MPI firmware dump. "
80                 "Default is OFF - Do Not allocate memory. ");
81
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85                 "Option to allow force of firmware core dump. "
86                 "Default is OFF - Do not allow.");
87
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91         /* required last entry */
92         {0,}
93 };
94
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97 static int ql_wol(struct ql_adapter *qdev);
98 static void qlge_set_multicast_list(struct net_device *ndev);
99
100 /* This hardware semaphore causes exclusive access to
101  * resources shared between the NIC driver, MPI firmware,
102  * FCOE firmware and the FC driver.
103  */
104 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
105 {
106         u32 sem_bits = 0;
107
108         switch (sem_mask) {
109         case SEM_XGMAC0_MASK:
110                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
111                 break;
112         case SEM_XGMAC1_MASK:
113                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
114                 break;
115         case SEM_ICB_MASK:
116                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
117                 break;
118         case SEM_MAC_ADDR_MASK:
119                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
120                 break;
121         case SEM_FLASH_MASK:
122                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
123                 break;
124         case SEM_PROBE_MASK:
125                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
126                 break;
127         case SEM_RT_IDX_MASK:
128                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
129                 break;
130         case SEM_PROC_REG_MASK:
131                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
132                 break;
133         default:
134                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
135                 return -EINVAL;
136         }
137
138         ql_write32(qdev, SEM, sem_bits | sem_mask);
139         return !(ql_read32(qdev, SEM) & sem_bits);
140 }
141
142 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
143 {
144         unsigned int wait_count = 30;
145         do {
146                 if (!ql_sem_trylock(qdev, sem_mask))
147                         return 0;
148                 udelay(100);
149         } while (--wait_count);
150         return -ETIMEDOUT;
151 }
152
153 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
154 {
155         ql_write32(qdev, SEM, sem_mask);
156         ql_read32(qdev, SEM);   /* flush */
157 }
158
159 /* This function waits for a specific bit to come ready
160  * in a given register.  It is used mostly by the initialize
161  * process, but is also used in kernel thread API such as
162  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
163  */
164 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
165 {
166         u32 temp;
167         int count = UDELAY_COUNT;
168
169         while (count) {
170                 temp = ql_read32(qdev, reg);
171
172                 /* check for errors */
173                 if (temp & err_bit) {
174                         netif_alert(qdev, probe, qdev->ndev,
175                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
176                                     reg, temp);
177                         return -EIO;
178                 } else if (temp & bit)
179                         return 0;
180                 udelay(UDELAY_DELAY);
181                 count--;
182         }
183         netif_alert(qdev, probe, qdev->ndev,
184                     "Timed out waiting for reg %x to come ready.\n", reg);
185         return -ETIMEDOUT;
186 }
187
188 /* The CFG register is used to download TX and RX control blocks
189  * to the chip. This function waits for an operation to complete.
190  */
191 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
192 {
193         int count = UDELAY_COUNT;
194         u32 temp;
195
196         while (count) {
197                 temp = ql_read32(qdev, CFG);
198                 if (temp & CFG_LE)
199                         return -EIO;
200                 if (!(temp & bit))
201                         return 0;
202                 udelay(UDELAY_DELAY);
203                 count--;
204         }
205         return -ETIMEDOUT;
206 }
207
208
209 /* Used to issue init control blocks to hw. Maps control block,
210  * sets address, triggers download, waits for completion.
211  */
212 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
213                  u16 q_id)
214 {
215         u64 map;
216         int status = 0;
217         int direction;
218         u32 mask;
219         u32 value;
220
221         direction =
222             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
223             PCI_DMA_FROMDEVICE;
224
225         map = pci_map_single(qdev->pdev, ptr, size, direction);
226         if (pci_dma_mapping_error(qdev->pdev, map)) {
227                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
228                 return -ENOMEM;
229         }
230
231         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232         if (status)
233                 return status;
234
235         status = ql_wait_cfg(qdev, bit);
236         if (status) {
237                 netif_err(qdev, ifup, qdev->ndev,
238                           "Timed out waiting for CFG to come ready.\n");
239                 goto exit;
240         }
241
242         ql_write32(qdev, ICB_L, (u32) map);
243         ql_write32(qdev, ICB_H, (u32) (map >> 32));
244
245         mask = CFG_Q_MASK | (bit << 16);
246         value = bit | (q_id << CFG_Q_SHIFT);
247         ql_write32(qdev, CFG, (mask | value));
248
249         /*
250          * Wait for the bit to clear after signaling hw.
251          */
252         status = ql_wait_cfg(qdev, bit);
253 exit:
254         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
255         pci_unmap_single(qdev->pdev, map, size, direction);
256         return status;
257 }
258
259 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
260 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
261                         u32 *value)
262 {
263         u32 offset = 0;
264         int status;
265
266         switch (type) {
267         case MAC_ADDR_TYPE_MULTI_MAC:
268         case MAC_ADDR_TYPE_CAM_MAC:
269                 {
270                         status =
271                             ql_wait_reg_rdy(qdev,
272                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
273                         if (status)
274                                 goto exit;
275                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
277                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278                         status =
279                             ql_wait_reg_rdy(qdev,
280                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
281                         if (status)
282                                 goto exit;
283                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
284                         status =
285                             ql_wait_reg_rdy(qdev,
286                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
287                         if (status)
288                                 goto exit;
289                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
291                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292                         status =
293                             ql_wait_reg_rdy(qdev,
294                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
295                         if (status)
296                                 goto exit;
297                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
299                                 status =
300                                     ql_wait_reg_rdy(qdev,
301                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
302                                 if (status)
303                                         goto exit;
304                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
305                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
306                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
307                                 status =
308                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
309                                                     MAC_ADDR_MR, 0);
310                                 if (status)
311                                         goto exit;
312                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
313                         }
314                         break;
315                 }
316         case MAC_ADDR_TYPE_VLAN:
317         case MAC_ADDR_TYPE_MULTI_FLTR:
318         default:
319                 netif_crit(qdev, ifup, qdev->ndev,
320                            "Address type %d not yet supported.\n", type);
321                 status = -EPERM;
322         }
323 exit:
324         return status;
325 }
326
327 /* Set up a MAC, multicast or VLAN address for the
328  * inbound frame matching.
329  */
330 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
331                                u16 index)
332 {
333         u32 offset = 0;
334         int status = 0;
335
336         switch (type) {
337         case MAC_ADDR_TYPE_MULTI_MAC:
338                 {
339                         u32 upper = (addr[0] << 8) | addr[1];
340                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
341                                         (addr[4] << 8) | (addr[5]);
342
343                         status =
344                                 ql_wait_reg_rdy(qdev,
345                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
346                         if (status)
347                                 goto exit;
348                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
349                                 (index << MAC_ADDR_IDX_SHIFT) |
350                                 type | MAC_ADDR_E);
351                         ql_write32(qdev, MAC_ADDR_DATA, lower);
352                         status =
353                                 ql_wait_reg_rdy(qdev,
354                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
355                         if (status)
356                                 goto exit;
357                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
358                                 (index << MAC_ADDR_IDX_SHIFT) |
359                                 type | MAC_ADDR_E);
360
361                         ql_write32(qdev, MAC_ADDR_DATA, upper);
362                         status =
363                                 ql_wait_reg_rdy(qdev,
364                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
365                         if (status)
366                                 goto exit;
367                         break;
368                 }
369         case MAC_ADDR_TYPE_CAM_MAC:
370                 {
371                         u32 cam_output;
372                         u32 upper = (addr[0] << 8) | addr[1];
373                         u32 lower =
374                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
375                             (addr[5]);
376
377                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
378                                      "Adding %s address %pM at index %d in the CAM.\n",
379                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
380                                      "MULTICAST" : "UNICAST",
381                                      addr, index);
382
383                         status =
384                             ql_wait_reg_rdy(qdev,
385                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
386                         if (status)
387                                 goto exit;
388                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
389                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
390                                    type);       /* type */
391                         ql_write32(qdev, MAC_ADDR_DATA, lower);
392                         status =
393                             ql_wait_reg_rdy(qdev,
394                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
395                         if (status)
396                                 goto exit;
397                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
398                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
399                                    type);       /* type */
400                         ql_write32(qdev, MAC_ADDR_DATA, upper);
401                         status =
402                             ql_wait_reg_rdy(qdev,
403                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
404                         if (status)
405                                 goto exit;
406                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
407                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
408                                    type);       /* type */
409                         /* This field should also include the queue id
410                            and possibly the function id.  Right now we hardcode
411                            the route field to NIC core.
412                          */
413                         cam_output = (CAM_OUT_ROUTE_NIC |
414                                       (qdev->
415                                        func << CAM_OUT_FUNC_SHIFT) |
416                                         (0 << CAM_OUT_CQ_ID_SHIFT));
417                         if (qdev->vlgrp)
418                                 cam_output |= CAM_OUT_RV;
419                         /* route to NIC core */
420                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
421                         break;
422                 }
423         case MAC_ADDR_TYPE_VLAN:
424                 {
425                         u32 enable_bit = *((u32 *) &addr[0]);
426                         /* For VLAN, the addr actually holds a bit that
427                          * either enables or disables the vlan id we are
428                          * addressing. It's either MAC_ADDR_E on or off.
429                          * That's bit-27 we're talking about.
430                          */
431                         netif_info(qdev, ifup, qdev->ndev,
432                                    "%s VLAN ID %d %s the CAM.\n",
433                                    enable_bit ? "Adding" : "Removing",
434                                    index,
435                                    enable_bit ? "to" : "from");
436
437                         status =
438                             ql_wait_reg_rdy(qdev,
439                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
440                         if (status)
441                                 goto exit;
442                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
443                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
444                                    type |       /* type */
445                                    enable_bit); /* enable/disable */
446                         break;
447                 }
448         case MAC_ADDR_TYPE_MULTI_FLTR:
449         default:
450                 netif_crit(qdev, ifup, qdev->ndev,
451                            "Address type %d not yet supported.\n", type);
452                 status = -EPERM;
453         }
454 exit:
455         return status;
456 }
457
458 /* Set or clear MAC address in hardware. We sometimes
459  * have to clear it to prevent wrong frame routing
460  * especially in a bonding environment.
461  */
462 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
463 {
464         int status;
465         char zero_mac_addr[ETH_ALEN];
466         char *addr;
467
468         if (set) {
469                 addr = &qdev->current_mac_addr[0];
470                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
471                              "Set Mac addr %pM\n", addr);
472         } else {
473                 memset(zero_mac_addr, 0, ETH_ALEN);
474                 addr = &zero_mac_addr[0];
475                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
476                              "Clearing MAC address\n");
477         }
478         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
479         if (status)
480                 return status;
481         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
482                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
483         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
484         if (status)
485                 netif_err(qdev, ifup, qdev->ndev,
486                           "Failed to init mac address.\n");
487         return status;
488 }
489
490 void ql_link_on(struct ql_adapter *qdev)
491 {
492         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
493         netif_carrier_on(qdev->ndev);
494         ql_set_mac_addr(qdev, 1);
495 }
496
497 void ql_link_off(struct ql_adapter *qdev)
498 {
499         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
500         netif_carrier_off(qdev->ndev);
501         ql_set_mac_addr(qdev, 0);
502 }
503
504 /* Get a specific frame routing value from the CAM.
505  * Used for debug and reg dump.
506  */
507 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508 {
509         int status = 0;
510
511         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
512         if (status)
513                 goto exit;
514
515         ql_write32(qdev, RT_IDX,
516                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
517         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
518         if (status)
519                 goto exit;
520         *value = ql_read32(qdev, RT_DATA);
521 exit:
522         return status;
523 }
524
525 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
526  * to route different frame types to various inbound queues.  We send broadcast/
527  * multicast/error frames to the default queue for slow handling,
528  * and CAM hit/RSS frames to the fast handling queues.
529  */
530 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
531                               int enable)
532 {
533         int status = -EINVAL; /* Return error if no mask match. */
534         u32 value = 0;
535
536         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
537                      "%s %s mask %s the routing reg.\n",
538                      enable ? "Adding" : "Removing",
539                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
540                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
541                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
542                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
543                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
544                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
545                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
546                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
547                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
548                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
549                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
550                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
551                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
552                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
553                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
554                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
555                      "(Bad index != RT_IDX)",
556                      enable ? "to" : "from");
557
558         switch (mask) {
559         case RT_IDX_CAM_HIT:
560                 {
561                         value = RT_IDX_DST_CAM_Q |      /* dest */
562                             RT_IDX_TYPE_NICQ |  /* type */
563                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
564                         break;
565                 }
566         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
567                 {
568                         value = RT_IDX_DST_DFLT_Q |     /* dest */
569                             RT_IDX_TYPE_NICQ |  /* type */
570                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
571                         break;
572                 }
573         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
574                 {
575                         value = RT_IDX_DST_DFLT_Q |     /* dest */
576                             RT_IDX_TYPE_NICQ |  /* type */
577                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
578                         break;
579                 }
580         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
581                 {
582                         value = RT_IDX_DST_DFLT_Q | /* dest */
583                                 RT_IDX_TYPE_NICQ | /* type */
584                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
585                                 RT_IDX_IDX_SHIFT); /* index */
586                         break;
587                 }
588         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
589                 {
590                         value = RT_IDX_DST_DFLT_Q | /* dest */
591                                 RT_IDX_TYPE_NICQ | /* type */
592                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
593                                 RT_IDX_IDX_SHIFT); /* index */
594                         break;
595                 }
596         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
597                 {
598                         value = RT_IDX_DST_DFLT_Q |     /* dest */
599                             RT_IDX_TYPE_NICQ |  /* type */
600                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
601                         break;
602                 }
603         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
604                 {
605                         value = RT_IDX_DST_DFLT_Q |     /* dest */
606                             RT_IDX_TYPE_NICQ |  /* type */
607                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
608                         break;
609                 }
610         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
611                 {
612                         value = RT_IDX_DST_DFLT_Q |     /* dest */
613                             RT_IDX_TYPE_NICQ |  /* type */
614                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
615                         break;
616                 }
617         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
618                 {
619                         value = RT_IDX_DST_RSS |        /* dest */
620                             RT_IDX_TYPE_NICQ |  /* type */
621                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
622                         break;
623                 }
624         case 0:         /* Clear the E-bit on an entry. */
625                 {
626                         value = RT_IDX_DST_DFLT_Q |     /* dest */
627                             RT_IDX_TYPE_NICQ |  /* type */
628                             (index << RT_IDX_IDX_SHIFT);/* index */
629                         break;
630                 }
631         default:
632                 netif_err(qdev, ifup, qdev->ndev,
633                           "Mask type %d not yet supported.\n", mask);
634                 status = -EPERM;
635                 goto exit;
636         }
637
638         if (value) {
639                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
640                 if (status)
641                         goto exit;
642                 value |= (enable ? RT_IDX_E : 0);
643                 ql_write32(qdev, RT_IDX, value);
644                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
645         }
646 exit:
647         return status;
648 }
649
650 static void ql_enable_interrupts(struct ql_adapter *qdev)
651 {
652         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
653 }
654
655 static void ql_disable_interrupts(struct ql_adapter *qdev)
656 {
657         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
658 }
659
660 /* If we're running with multiple MSI-X vectors then we enable on the fly.
661  * Otherwise, we may have multiple outstanding workers and don't want to
662  * enable until the last one finishes. In this case, the irq_cnt gets
663  * incremented every time we queue a worker and decremented every time
664  * a worker finishes.  Once it hits zero we enable the interrupt.
665  */
666 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
667 {
668         u32 var = 0;
669         unsigned long hw_flags = 0;
670         struct intr_context *ctx = qdev->intr_context + intr;
671
672         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
673                 /* Always enable if we're MSIX multi interrupts and
674                  * it's not the default (zeroeth) interrupt.
675                  */
676                 ql_write32(qdev, INTR_EN,
677                            ctx->intr_en_mask);
678                 var = ql_read32(qdev, STS);
679                 return var;
680         }
681
682         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
683         if (atomic_dec_and_test(&ctx->irq_cnt)) {
684                 ql_write32(qdev, INTR_EN,
685                            ctx->intr_en_mask);
686                 var = ql_read32(qdev, STS);
687         }
688         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
689         return var;
690 }
691
692 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
693 {
694         u32 var = 0;
695         struct intr_context *ctx;
696
697         /* HW disables for us if we're MSIX multi interrupts and
698          * it's not the default (zeroeth) interrupt.
699          */
700         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
701                 return 0;
702
703         ctx = qdev->intr_context + intr;
704         spin_lock(&qdev->hw_lock);
705         if (!atomic_read(&ctx->irq_cnt)) {
706                 ql_write32(qdev, INTR_EN,
707                 ctx->intr_dis_mask);
708                 var = ql_read32(qdev, STS);
709         }
710         atomic_inc(&ctx->irq_cnt);
711         spin_unlock(&qdev->hw_lock);
712         return var;
713 }
714
715 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
716 {
717         int i;
718         for (i = 0; i < qdev->intr_count; i++) {
719                 /* The enable call does a atomic_dec_and_test
720                  * and enables only if the result is zero.
721                  * So we precharge it here.
722                  */
723                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
724                         i == 0))
725                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
726                 ql_enable_completion_interrupt(qdev, i);
727         }
728
729 }
730
731 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
732 {
733         int status, i;
734         u16 csum = 0;
735         __le16 *flash = (__le16 *)&qdev->flash;
736
737         status = strncmp((char *)&qdev->flash, str, 4);
738         if (status) {
739                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
740                 return  status;
741         }
742
743         for (i = 0; i < size; i++)
744                 csum += le16_to_cpu(*flash++);
745
746         if (csum)
747                 netif_err(qdev, ifup, qdev->ndev,
748                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
749
750         return csum;
751 }
752
753 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
754 {
755         int status = 0;
756         /* wait for reg to come ready */
757         status = ql_wait_reg_rdy(qdev,
758                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
759         if (status)
760                 goto exit;
761         /* set up for reg read */
762         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
763         /* wait for reg to come ready */
764         status = ql_wait_reg_rdy(qdev,
765                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
766         if (status)
767                 goto exit;
768          /* This data is stored on flash as an array of
769          * __le32.  Since ql_read32() returns cpu endian
770          * we need to swap it back.
771          */
772         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
773 exit:
774         return status;
775 }
776
777 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
778 {
779         u32 i, size;
780         int status;
781         __le32 *p = (__le32 *)&qdev->flash;
782         u32 offset;
783         u8 mac_addr[6];
784
785         /* Get flash offset for function and adjust
786          * for dword access.
787          */
788         if (!qdev->port)
789                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
790         else
791                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
792
793         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
794                 return -ETIMEDOUT;
795
796         size = sizeof(struct flash_params_8000) / sizeof(u32);
797         for (i = 0; i < size; i++, p++) {
798                 status = ql_read_flash_word(qdev, i+offset, p);
799                 if (status) {
800                         netif_err(qdev, ifup, qdev->ndev,
801                                   "Error reading flash.\n");
802                         goto exit;
803                 }
804         }
805
806         status = ql_validate_flash(qdev,
807                         sizeof(struct flash_params_8000) / sizeof(u16),
808                         "8000");
809         if (status) {
810                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
811                 status = -EINVAL;
812                 goto exit;
813         }
814
815         /* Extract either manufacturer or BOFM modified
816          * MAC address.
817          */
818         if (qdev->flash.flash_params_8000.data_type1 == 2)
819                 memcpy(mac_addr,
820                         qdev->flash.flash_params_8000.mac_addr1,
821                         qdev->ndev->addr_len);
822         else
823                 memcpy(mac_addr,
824                         qdev->flash.flash_params_8000.mac_addr,
825                         qdev->ndev->addr_len);
826
827         if (!is_valid_ether_addr(mac_addr)) {
828                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
829                 status = -EINVAL;
830                 goto exit;
831         }
832
833         memcpy(qdev->ndev->dev_addr,
834                 mac_addr,
835                 qdev->ndev->addr_len);
836
837 exit:
838         ql_sem_unlock(qdev, SEM_FLASH_MASK);
839         return status;
840 }
841
842 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
843 {
844         int i;
845         int status;
846         __le32 *p = (__le32 *)&qdev->flash;
847         u32 offset = 0;
848         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
849
850         /* Second function's parameters follow the first
851          * function's.
852          */
853         if (qdev->port)
854                 offset = size;
855
856         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
857                 return -ETIMEDOUT;
858
859         for (i = 0; i < size; i++, p++) {
860                 status = ql_read_flash_word(qdev, i+offset, p);
861                 if (status) {
862                         netif_err(qdev, ifup, qdev->ndev,
863                                   "Error reading flash.\n");
864                         goto exit;
865                 }
866
867         }
868
869         status = ql_validate_flash(qdev,
870                         sizeof(struct flash_params_8012) / sizeof(u16),
871                         "8012");
872         if (status) {
873                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
874                 status = -EINVAL;
875                 goto exit;
876         }
877
878         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
879                 status = -EINVAL;
880                 goto exit;
881         }
882
883         memcpy(qdev->ndev->dev_addr,
884                 qdev->flash.flash_params_8012.mac_addr,
885                 qdev->ndev->addr_len);
886
887 exit:
888         ql_sem_unlock(qdev, SEM_FLASH_MASK);
889         return status;
890 }
891
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893  * register pair.  Each read/write requires us to wait for the ready
894  * bit before reading/writing the data.
895  */
896 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
897 {
898         int status;
899         /* wait for reg to come ready */
900         status = ql_wait_reg_rdy(qdev,
901                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902         if (status)
903                 return status;
904         /* write the data to the data reg */
905         ql_write32(qdev, XGMAC_DATA, data);
906         /* trigger the write */
907         ql_write32(qdev, XGMAC_ADDR, reg);
908         return status;
909 }
910
911 /* xgmac register are located behind the xgmac_addr and xgmac_data
912  * register pair.  Each read/write requires us to wait for the ready
913  * bit before reading/writing the data.
914  */
915 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
916 {
917         int status = 0;
918         /* wait for reg to come ready */
919         status = ql_wait_reg_rdy(qdev,
920                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
921         if (status)
922                 goto exit;
923         /* set up for reg read */
924         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
925         /* wait for reg to come ready */
926         status = ql_wait_reg_rdy(qdev,
927                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
928         if (status)
929                 goto exit;
930         /* get the data */
931         *data = ql_read32(qdev, XGMAC_DATA);
932 exit:
933         return status;
934 }
935
936 /* This is used for reading the 64-bit statistics regs. */
937 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
938 {
939         int status = 0;
940         u32 hi = 0;
941         u32 lo = 0;
942
943         status = ql_read_xgmac_reg(qdev, reg, &lo);
944         if (status)
945                 goto exit;
946
947         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
948         if (status)
949                 goto exit;
950
951         *data = (u64) lo | ((u64) hi << 32);
952
953 exit:
954         return status;
955 }
956
957 static int ql_8000_port_initialize(struct ql_adapter *qdev)
958 {
959         int status;
960         /*
961          * Get MPI firmware version for driver banner
962          * and ethool info.
963          */
964         status = ql_mb_about_fw(qdev);
965         if (status)
966                 goto exit;
967         status = ql_mb_get_fw_state(qdev);
968         if (status)
969                 goto exit;
970         /* Wake up a worker to get/set the TX/RX frame sizes. */
971         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
972 exit:
973         return status;
974 }
975
976 /* Take the MAC Core out of reset.
977  * Enable statistics counting.
978  * Take the transmitter/receiver out of reset.
979  * This functionality may be done in the MPI firmware at a
980  * later date.
981  */
982 static int ql_8012_port_initialize(struct ql_adapter *qdev)
983 {
984         int status = 0;
985         u32 data;
986
987         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
988                 /* Another function has the semaphore, so
989                  * wait for the port init bit to come ready.
990                  */
991                 netif_info(qdev, link, qdev->ndev,
992                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
993                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
994                 if (status) {
995                         netif_crit(qdev, link, qdev->ndev,
996                                    "Port initialize timed out.\n");
997                 }
998                 return status;
999         }
1000
1001         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1002         /* Set the core reset. */
1003         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1004         if (status)
1005                 goto end;
1006         data |= GLOBAL_CFG_RESET;
1007         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1008         if (status)
1009                 goto end;
1010
1011         /* Clear the core reset and turn on jumbo for receiver. */
1012         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1013         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1014         data |= GLOBAL_CFG_TX_STAT_EN;
1015         data |= GLOBAL_CFG_RX_STAT_EN;
1016         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1017         if (status)
1018                 goto end;
1019
1020         /* Enable transmitter, and clear it's reset. */
1021         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1022         if (status)
1023                 goto end;
1024         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1025         data |= TX_CFG_EN;      /* Enable the transmitter. */
1026         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1027         if (status)
1028                 goto end;
1029
1030         /* Enable receiver and clear it's reset. */
1031         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1032         if (status)
1033                 goto end;
1034         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1035         data |= RX_CFG_EN;      /* Enable the receiver. */
1036         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1037         if (status)
1038                 goto end;
1039
1040         /* Turn on jumbo. */
1041         status =
1042             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1043         if (status)
1044                 goto end;
1045         status =
1046             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1047         if (status)
1048                 goto end;
1049
1050         /* Signal to the world that the port is enabled.        */
1051         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1052 end:
1053         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1054         return status;
1055 }
1056
1057 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1058 {
1059         return PAGE_SIZE << qdev->lbq_buf_order;
1060 }
1061
1062 /* Get the next large buffer. */
1063 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1064 {
1065         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1066         rx_ring->lbq_curr_idx++;
1067         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1068                 rx_ring->lbq_curr_idx = 0;
1069         rx_ring->lbq_free_cnt++;
1070         return lbq_desc;
1071 }
1072
1073 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1074                 struct rx_ring *rx_ring)
1075 {
1076         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1077
1078         pci_dma_sync_single_for_cpu(qdev->pdev,
1079                                         dma_unmap_addr(lbq_desc, mapaddr),
1080                                     rx_ring->lbq_buf_size,
1081                                         PCI_DMA_FROMDEVICE);
1082
1083         /* If it's the last chunk of our master page then
1084          * we unmap it.
1085          */
1086         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1087                                         == ql_lbq_block_size(qdev))
1088                 pci_unmap_page(qdev->pdev,
1089                                 lbq_desc->p.pg_chunk.map,
1090                                 ql_lbq_block_size(qdev),
1091                                 PCI_DMA_FROMDEVICE);
1092         return lbq_desc;
1093 }
1094
1095 /* Get the next small buffer. */
1096 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1097 {
1098         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1099         rx_ring->sbq_curr_idx++;
1100         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1101                 rx_ring->sbq_curr_idx = 0;
1102         rx_ring->sbq_free_cnt++;
1103         return sbq_desc;
1104 }
1105
1106 /* Update an rx ring index. */
1107 static void ql_update_cq(struct rx_ring *rx_ring)
1108 {
1109         rx_ring->cnsmr_idx++;
1110         rx_ring->curr_entry++;
1111         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1112                 rx_ring->cnsmr_idx = 0;
1113                 rx_ring->curr_entry = rx_ring->cq_base;
1114         }
1115 }
1116
1117 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1118 {
1119         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1120 }
1121
1122 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1123                                                 struct bq_desc *lbq_desc)
1124 {
1125         if (!rx_ring->pg_chunk.page) {
1126                 u64 map;
1127                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1128                                                 GFP_ATOMIC,
1129                                                 qdev->lbq_buf_order);
1130                 if (unlikely(!rx_ring->pg_chunk.page)) {
1131                         netif_err(qdev, drv, qdev->ndev,
1132                                   "page allocation failed.\n");
1133                         return -ENOMEM;
1134                 }
1135                 rx_ring->pg_chunk.offset = 0;
1136                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1137                                         0, ql_lbq_block_size(qdev),
1138                                         PCI_DMA_FROMDEVICE);
1139                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1140                         __free_pages(rx_ring->pg_chunk.page,
1141                                         qdev->lbq_buf_order);
1142                         netif_err(qdev, drv, qdev->ndev,
1143                                   "PCI mapping failed.\n");
1144                         return -ENOMEM;
1145                 }
1146                 rx_ring->pg_chunk.map = map;
1147                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1148         }
1149
1150         /* Copy the current master pg_chunk info
1151          * to the current descriptor.
1152          */
1153         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1154
1155         /* Adjust the master page chunk for next
1156          * buffer get.
1157          */
1158         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1159         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1160                 rx_ring->pg_chunk.page = NULL;
1161                 lbq_desc->p.pg_chunk.last_flag = 1;
1162         } else {
1163                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1164                 get_page(rx_ring->pg_chunk.page);
1165                 lbq_desc->p.pg_chunk.last_flag = 0;
1166         }
1167         return 0;
1168 }
1169 /* Process (refill) a large buffer queue. */
1170 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1171 {
1172         u32 clean_idx = rx_ring->lbq_clean_idx;
1173         u32 start_idx = clean_idx;
1174         struct bq_desc *lbq_desc;
1175         u64 map;
1176         int i;
1177
1178         while (rx_ring->lbq_free_cnt > 32) {
1179                 for (i = 0; i < 16; i++) {
1180                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181                                      "lbq: try cleaning clean_idx = %d.\n",
1182                                      clean_idx);
1183                         lbq_desc = &rx_ring->lbq[clean_idx];
1184                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1185                                 netif_err(qdev, ifup, qdev->ndev,
1186                                           "Could not get a page chunk.\n");
1187                                 return;
1188                         }
1189
1190                         map = lbq_desc->p.pg_chunk.map +
1191                                 lbq_desc->p.pg_chunk.offset;
1192                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1193                         dma_unmap_len_set(lbq_desc, maplen,
1194                                         rx_ring->lbq_buf_size);
1195                                 *lbq_desc->addr = cpu_to_le64(map);
1196
1197                         pci_dma_sync_single_for_device(qdev->pdev, map,
1198                                                 rx_ring->lbq_buf_size,
1199                                                 PCI_DMA_FROMDEVICE);
1200                         clean_idx++;
1201                         if (clean_idx == rx_ring->lbq_len)
1202                                 clean_idx = 0;
1203                 }
1204
1205                 rx_ring->lbq_clean_idx = clean_idx;
1206                 rx_ring->lbq_prod_idx += 16;
1207                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1208                         rx_ring->lbq_prod_idx = 0;
1209                 rx_ring->lbq_free_cnt -= 16;
1210         }
1211
1212         if (start_idx != clean_idx) {
1213                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1214                              "lbq: updating prod idx = %d.\n",
1215                              rx_ring->lbq_prod_idx);
1216                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1217                                 rx_ring->lbq_prod_idx_db_reg);
1218         }
1219 }
1220
1221 /* Process (refill) a small buffer queue. */
1222 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1223 {
1224         u32 clean_idx = rx_ring->sbq_clean_idx;
1225         u32 start_idx = clean_idx;
1226         struct bq_desc *sbq_desc;
1227         u64 map;
1228         int i;
1229
1230         while (rx_ring->sbq_free_cnt > 16) {
1231                 for (i = 0; i < 16; i++) {
1232                         sbq_desc = &rx_ring->sbq[clean_idx];
1233                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1234                                      "sbq: try cleaning clean_idx = %d.\n",
1235                                      clean_idx);
1236                         if (sbq_desc->p.skb == NULL) {
1237                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1238                                              qdev->ndev,
1239                                              "sbq: getting new skb for index %d.\n",
1240                                              sbq_desc->index);
1241                                 sbq_desc->p.skb =
1242                                     netdev_alloc_skb(qdev->ndev,
1243                                                      SMALL_BUFFER_SIZE);
1244                                 if (sbq_desc->p.skb == NULL) {
1245                                         netif_err(qdev, probe, qdev->ndev,
1246                                                   "Couldn't get an skb.\n");
1247                                         rx_ring->sbq_clean_idx = clean_idx;
1248                                         return;
1249                                 }
1250                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1251                                 map = pci_map_single(qdev->pdev,
1252                                                      sbq_desc->p.skb->data,
1253                                                      rx_ring->sbq_buf_size,
1254                                                      PCI_DMA_FROMDEVICE);
1255                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1256                                         netif_err(qdev, ifup, qdev->ndev,
1257                                                   "PCI mapping failed.\n");
1258                                         rx_ring->sbq_clean_idx = clean_idx;
1259                                         dev_kfree_skb_any(sbq_desc->p.skb);
1260                                         sbq_desc->p.skb = NULL;
1261                                         return;
1262                                 }
1263                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1264                                 dma_unmap_len_set(sbq_desc, maplen,
1265                                                   rx_ring->sbq_buf_size);
1266                                 *sbq_desc->addr = cpu_to_le64(map);
1267                         }
1268
1269                         clean_idx++;
1270                         if (clean_idx == rx_ring->sbq_len)
1271                                 clean_idx = 0;
1272                 }
1273                 rx_ring->sbq_clean_idx = clean_idx;
1274                 rx_ring->sbq_prod_idx += 16;
1275                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1276                         rx_ring->sbq_prod_idx = 0;
1277                 rx_ring->sbq_free_cnt -= 16;
1278         }
1279
1280         if (start_idx != clean_idx) {
1281                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1282                              "sbq: updating prod idx = %d.\n",
1283                              rx_ring->sbq_prod_idx);
1284                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1285                                 rx_ring->sbq_prod_idx_db_reg);
1286         }
1287 }
1288
1289 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1290                                     struct rx_ring *rx_ring)
1291 {
1292         ql_update_sbq(qdev, rx_ring);
1293         ql_update_lbq(qdev, rx_ring);
1294 }
1295
1296 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1297  * fails at some stage, or from the interrupt when a tx completes.
1298  */
1299 static void ql_unmap_send(struct ql_adapter *qdev,
1300                           struct tx_ring_desc *tx_ring_desc, int mapped)
1301 {
1302         int i;
1303         for (i = 0; i < mapped; i++) {
1304                 if (i == 0 || (i == 7 && mapped > 7)) {
1305                         /*
1306                          * Unmap the skb->data area, or the
1307                          * external sglist (AKA the Outbound
1308                          * Address List (OAL)).
1309                          * If its the zeroeth element, then it's
1310                          * the skb->data area.  If it's the 7th
1311                          * element and there is more than 6 frags,
1312                          * then its an OAL.
1313                          */
1314                         if (i == 7) {
1315                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1316                                              qdev->ndev,
1317                                              "unmapping OAL area.\n");
1318                         }
1319                         pci_unmap_single(qdev->pdev,
1320                                          dma_unmap_addr(&tx_ring_desc->map[i],
1321                                                         mapaddr),
1322                                          dma_unmap_len(&tx_ring_desc->map[i],
1323                                                        maplen),
1324                                          PCI_DMA_TODEVICE);
1325                 } else {
1326                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1327                                      "unmapping frag %d.\n", i);
1328                         pci_unmap_page(qdev->pdev,
1329                                        dma_unmap_addr(&tx_ring_desc->map[i],
1330                                                       mapaddr),
1331                                        dma_unmap_len(&tx_ring_desc->map[i],
1332                                                      maplen), PCI_DMA_TODEVICE);
1333                 }
1334         }
1335
1336 }
1337
1338 /* Map the buffers for this transmit.  This will return
1339  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1340  */
1341 static int ql_map_send(struct ql_adapter *qdev,
1342                        struct ob_mac_iocb_req *mac_iocb_ptr,
1343                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1344 {
1345         int len = skb_headlen(skb);
1346         dma_addr_t map;
1347         int frag_idx, err, map_idx = 0;
1348         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1349         int frag_cnt = skb_shinfo(skb)->nr_frags;
1350
1351         if (frag_cnt) {
1352                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1353                              "frag_cnt = %d.\n", frag_cnt);
1354         }
1355         /*
1356          * Map the skb buffer first.
1357          */
1358         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1359
1360         err = pci_dma_mapping_error(qdev->pdev, map);
1361         if (err) {
1362                 netif_err(qdev, tx_queued, qdev->ndev,
1363                           "PCI mapping failed with error: %d\n", err);
1364
1365                 return NETDEV_TX_BUSY;
1366         }
1367
1368         tbd->len = cpu_to_le32(len);
1369         tbd->addr = cpu_to_le64(map);
1370         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1371         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1372         map_idx++;
1373
1374         /*
1375          * This loop fills the remainder of the 8 address descriptors
1376          * in the IOCB.  If there are more than 7 fragments, then the
1377          * eighth address desc will point to an external list (OAL).
1378          * When this happens, the remainder of the frags will be stored
1379          * in this list.
1380          */
1381         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1382                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1383                 tbd++;
1384                 if (frag_idx == 6 && frag_cnt > 7) {
1385                         /* Let's tack on an sglist.
1386                          * Our control block will now
1387                          * look like this:
1388                          * iocb->seg[0] = skb->data
1389                          * iocb->seg[1] = frag[0]
1390                          * iocb->seg[2] = frag[1]
1391                          * iocb->seg[3] = frag[2]
1392                          * iocb->seg[4] = frag[3]
1393                          * iocb->seg[5] = frag[4]
1394                          * iocb->seg[6] = frag[5]
1395                          * iocb->seg[7] = ptr to OAL (external sglist)
1396                          * oal->seg[0] = frag[6]
1397                          * oal->seg[1] = frag[7]
1398                          * oal->seg[2] = frag[8]
1399                          * oal->seg[3] = frag[9]
1400                          * oal->seg[4] = frag[10]
1401                          *      etc...
1402                          */
1403                         /* Tack on the OAL in the eighth segment of IOCB. */
1404                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1405                                              sizeof(struct oal),
1406                                              PCI_DMA_TODEVICE);
1407                         err = pci_dma_mapping_error(qdev->pdev, map);
1408                         if (err) {
1409                                 netif_err(qdev, tx_queued, qdev->ndev,
1410                                           "PCI mapping outbound address list with error: %d\n",
1411                                           err);
1412                                 goto map_error;
1413                         }
1414
1415                         tbd->addr = cpu_to_le64(map);
1416                         /*
1417                          * The length is the number of fragments
1418                          * that remain to be mapped times the length
1419                          * of our sglist (OAL).
1420                          */
1421                         tbd->len =
1422                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1423                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1424                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1425                                            map);
1426                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1427                                           sizeof(struct oal));
1428                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1429                         map_idx++;
1430                 }
1431
1432                 map =
1433                     pci_map_page(qdev->pdev, frag->page,
1434                                  frag->page_offset, frag->size,
1435                                  PCI_DMA_TODEVICE);
1436
1437                 err = pci_dma_mapping_error(qdev->pdev, map);
1438                 if (err) {
1439                         netif_err(qdev, tx_queued, qdev->ndev,
1440                                   "PCI mapping frags failed with error: %d.\n",
1441                                   err);
1442                         goto map_error;
1443                 }
1444
1445                 tbd->addr = cpu_to_le64(map);
1446                 tbd->len = cpu_to_le32(frag->size);
1447                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449                                   frag->size);
1450
1451         }
1452         /* Save the number of segments we've mapped. */
1453         tx_ring_desc->map_cnt = map_idx;
1454         /* Terminate the last segment. */
1455         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456         return NETDEV_TX_OK;
1457
1458 map_error:
1459         /*
1460          * If the first frag mapping failed, then i will be zero.
1461          * This causes the unmap of the skb->data area.  Otherwise
1462          * we pass in the number of frags that mapped successfully
1463          * so they can be umapped.
1464          */
1465         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466         return NETDEV_TX_BUSY;
1467 }
1468
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471                                         struct rx_ring *rx_ring,
1472                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                         u32 length,
1474                                         u16 vlan_id)
1475 {
1476         struct sk_buff *skb;
1477         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478         struct skb_frag_struct *rx_frag;
1479         int nr_frags;
1480         struct napi_struct *napi = &rx_ring->napi;
1481
1482         napi->dev = qdev->ndev;
1483
1484         skb = napi_get_frags(napi);
1485         if (!skb) {
1486                 netif_err(qdev, drv, qdev->ndev,
1487                           "Couldn't get an skb, exiting.\n");
1488                 rx_ring->rx_dropped++;
1489                 put_page(lbq_desc->p.pg_chunk.page);
1490                 return;
1491         }
1492         prefetch(lbq_desc->p.pg_chunk.va);
1493         rx_frag = skb_shinfo(skb)->frags;
1494         nr_frags = skb_shinfo(skb)->nr_frags;
1495         rx_frag += nr_frags;
1496         rx_frag->page = lbq_desc->p.pg_chunk.page;
1497         rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1498         rx_frag->size = length;
1499
1500         skb->len += length;
1501         skb->data_len += length;
1502         skb->truesize += length;
1503         skb_shinfo(skb)->nr_frags++;
1504
1505         rx_ring->rx_packets++;
1506         rx_ring->rx_bytes += length;
1507         skb->ip_summed = CHECKSUM_UNNECESSARY;
1508         skb_record_rx_queue(skb, rx_ring->cq_id);
1509         if (qdev->vlgrp && (vlan_id != 0xffff))
1510                 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1511         else
1512                 napi_gro_frags(napi);
1513 }
1514
1515 /* Process an inbound completion from an rx ring. */
1516 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517                                         struct rx_ring *rx_ring,
1518                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1519                                         u32 length,
1520                                         u16 vlan_id)
1521 {
1522         struct net_device *ndev = qdev->ndev;
1523         struct sk_buff *skb = NULL;
1524         void *addr;
1525         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526         struct napi_struct *napi = &rx_ring->napi;
1527
1528         skb = netdev_alloc_skb(ndev, length);
1529         if (!skb) {
1530                 netif_err(qdev, drv, qdev->ndev,
1531                           "Couldn't get an skb, need to unwind!.\n");
1532                 rx_ring->rx_dropped++;
1533                 put_page(lbq_desc->p.pg_chunk.page);
1534                 return;
1535         }
1536
1537         addr = lbq_desc->p.pg_chunk.va;
1538         prefetch(addr);
1539
1540
1541         /* Frame error, so drop the packet. */
1542         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1543                 netif_info(qdev, drv, qdev->ndev,
1544                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1545                 rx_ring->rx_errors++;
1546                 goto err_out;
1547         }
1548
1549         /* The max framesize filter on this chip is set higher than
1550          * MTU since FCoE uses 2k frames.
1551          */
1552         if (skb->len > ndev->mtu + ETH_HLEN) {
1553                 netif_err(qdev, drv, qdev->ndev,
1554                           "Segment too small, dropping.\n");
1555                 rx_ring->rx_dropped++;
1556                 goto err_out;
1557         }
1558         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1559         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1561                      length);
1562         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1563                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1564                                 length-ETH_HLEN);
1565         skb->len += length-ETH_HLEN;
1566         skb->data_len += length-ETH_HLEN;
1567         skb->truesize += length-ETH_HLEN;
1568
1569         rx_ring->rx_packets++;
1570         rx_ring->rx_bytes += skb->len;
1571         skb->protocol = eth_type_trans(skb, ndev);
1572         skb_checksum_none_assert(skb);
1573
1574         if (qdev->rx_csum &&
1575                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1576                 /* TCP frame. */
1577                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1578                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1579                                      "TCP checksum done!\n");
1580                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1581                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1582                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1583                         /* Unfragmented ipv4 UDP frame. */
1584                         struct iphdr *iph = (struct iphdr *) skb->data;
1585                         if (!(iph->frag_off &
1586                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1587                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1589                                              qdev->ndev,
1590                                              "TCP checksum done!\n");
1591                         }
1592                 }
1593         }
1594
1595         skb_record_rx_queue(skb, rx_ring->cq_id);
1596         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1597                 if (qdev->vlgrp && (vlan_id != 0xffff))
1598                         vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1599                 else
1600                         napi_gro_receive(napi, skb);
1601         } else {
1602                 if (qdev->vlgrp && (vlan_id != 0xffff))
1603                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1604                 else
1605                         netif_receive_skb(skb);
1606         }
1607         return;
1608 err_out:
1609         dev_kfree_skb_any(skb);
1610         put_page(lbq_desc->p.pg_chunk.page);
1611 }
1612
1613 /* Process an inbound completion from an rx ring. */
1614 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1615                                         struct rx_ring *rx_ring,
1616                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1617                                         u32 length,
1618                                         u16 vlan_id)
1619 {
1620         struct net_device *ndev = qdev->ndev;
1621         struct sk_buff *skb = NULL;
1622         struct sk_buff *new_skb = NULL;
1623         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1624
1625         skb = sbq_desc->p.skb;
1626         /* Allocate new_skb and copy */
1627         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1628         if (new_skb == NULL) {
1629                 netif_err(qdev, probe, qdev->ndev,
1630                           "No skb available, drop the packet.\n");
1631                 rx_ring->rx_dropped++;
1632                 return;
1633         }
1634         skb_reserve(new_skb, NET_IP_ALIGN);
1635         memcpy(skb_put(new_skb, length), skb->data, length);
1636         skb = new_skb;
1637
1638         /* Frame error, so drop the packet. */
1639         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1640                 netif_info(qdev, drv, qdev->ndev,
1641                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1642                 dev_kfree_skb_any(skb);
1643                 rx_ring->rx_errors++;
1644                 return;
1645         }
1646
1647         /* loopback self test for ethtool */
1648         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1649                 ql_check_lb_frame(qdev, skb);
1650                 dev_kfree_skb_any(skb);
1651                 return;
1652         }
1653
1654         /* The max framesize filter on this chip is set higher than
1655          * MTU since FCoE uses 2k frames.
1656          */
1657         if (skb->len > ndev->mtu + ETH_HLEN) {
1658                 dev_kfree_skb_any(skb);
1659                 rx_ring->rx_dropped++;
1660                 return;
1661         }
1662
1663         prefetch(skb->data);
1664         skb->dev = ndev;
1665         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1666                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667                              "%s Multicast.\n",
1668                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1670                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1671                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1672                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1673                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1674         }
1675         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1676                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1677                              "Promiscuous Packet.\n");
1678
1679         rx_ring->rx_packets++;
1680         rx_ring->rx_bytes += skb->len;
1681         skb->protocol = eth_type_trans(skb, ndev);
1682         skb_checksum_none_assert(skb);
1683
1684         /* If rx checksum is on, and there are no
1685          * csum or frame errors.
1686          */
1687         if (qdev->rx_csum &&
1688                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1689                 /* TCP frame. */
1690                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1691                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692                                      "TCP checksum done!\n");
1693                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1694                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1695                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1696                         /* Unfragmented ipv4 UDP frame. */
1697                         struct iphdr *iph = (struct iphdr *) skb->data;
1698                         if (!(iph->frag_off &
1699                                 ntohs(IP_MF|IP_OFFSET))) {
1700                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1701                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1702                                              qdev->ndev,
1703                                              "TCP checksum done!\n");
1704                         }
1705                 }
1706         }
1707
1708         skb_record_rx_queue(skb, rx_ring->cq_id);
1709         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1710                 if (qdev->vlgrp && (vlan_id != 0xffff))
1711                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1712                                                 vlan_id, skb);
1713                 else
1714                         napi_gro_receive(&rx_ring->napi, skb);
1715         } else {
1716                 if (qdev->vlgrp && (vlan_id != 0xffff))
1717                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1718                 else
1719                         netif_receive_skb(skb);
1720         }
1721 }
1722
1723 static void ql_realign_skb(struct sk_buff *skb, int len)
1724 {
1725         void *temp_addr = skb->data;
1726
1727         /* Undo the skb_reserve(skb,32) we did before
1728          * giving to hardware, and realign data on
1729          * a 2-byte boundary.
1730          */
1731         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1732         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1733         skb_copy_to_linear_data(skb, temp_addr,
1734                 (unsigned int)len);
1735 }
1736
1737 /*
1738  * This function builds an skb for the given inbound
1739  * completion.  It will be rewritten for readability in the near
1740  * future, but for not it works well.
1741  */
1742 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1743                                        struct rx_ring *rx_ring,
1744                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1745 {
1746         struct bq_desc *lbq_desc;
1747         struct bq_desc *sbq_desc;
1748         struct sk_buff *skb = NULL;
1749         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1750        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1751
1752         /*
1753          * Handle the header buffer if present.
1754          */
1755         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1756             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1757                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1758                              "Header of %d bytes in small buffer.\n", hdr_len);
1759                 /*
1760                  * Headers fit nicely into a small buffer.
1761                  */
1762                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1763                 pci_unmap_single(qdev->pdev,
1764                                 dma_unmap_addr(sbq_desc, mapaddr),
1765                                 dma_unmap_len(sbq_desc, maplen),
1766                                 PCI_DMA_FROMDEVICE);
1767                 skb = sbq_desc->p.skb;
1768                 ql_realign_skb(skb, hdr_len);
1769                 skb_put(skb, hdr_len);
1770                 sbq_desc->p.skb = NULL;
1771         }
1772
1773         /*
1774          * Handle the data buffer(s).
1775          */
1776         if (unlikely(!length)) {        /* Is there data too? */
1777                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1778                              "No Data buffer in this packet.\n");
1779                 return skb;
1780         }
1781
1782         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1783                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1784                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1785                                      "Headers in small, data of %d bytes in small, combine them.\n",
1786                                      length);
1787                         /*
1788                          * Data is less than small buffer size so it's
1789                          * stuffed in a small buffer.
1790                          * For this case we append the data
1791                          * from the "data" small buffer to the "header" small
1792                          * buffer.
1793                          */
1794                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1795                         pci_dma_sync_single_for_cpu(qdev->pdev,
1796                                                     dma_unmap_addr
1797                                                     (sbq_desc, mapaddr),
1798                                                     dma_unmap_len
1799                                                     (sbq_desc, maplen),
1800                                                     PCI_DMA_FROMDEVICE);
1801                         memcpy(skb_put(skb, length),
1802                                sbq_desc->p.skb->data, length);
1803                         pci_dma_sync_single_for_device(qdev->pdev,
1804                                                        dma_unmap_addr
1805                                                        (sbq_desc,
1806                                                         mapaddr),
1807                                                        dma_unmap_len
1808                                                        (sbq_desc,
1809                                                         maplen),
1810                                                        PCI_DMA_FROMDEVICE);
1811                 } else {
1812                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1813                                      "%d bytes in a single small buffer.\n",
1814                                      length);
1815                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1816                         skb = sbq_desc->p.skb;
1817                         ql_realign_skb(skb, length);
1818                         skb_put(skb, length);
1819                         pci_unmap_single(qdev->pdev,
1820                                          dma_unmap_addr(sbq_desc,
1821                                                         mapaddr),
1822                                          dma_unmap_len(sbq_desc,
1823                                                        maplen),
1824                                          PCI_DMA_FROMDEVICE);
1825                         sbq_desc->p.skb = NULL;
1826                 }
1827         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1828                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1829                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830                                      "Header in small, %d bytes in large. Chain large to small!\n",
1831                                      length);
1832                         /*
1833                          * The data is in a single large buffer.  We
1834                          * chain it to the header buffer's skb and let
1835                          * it rip.
1836                          */
1837                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1840                                      lbq_desc->p.pg_chunk.offset, length);
1841                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1842                                                 lbq_desc->p.pg_chunk.offset,
1843                                                 length);
1844                         skb->len += length;
1845                         skb->data_len += length;
1846                         skb->truesize += length;
1847                 } else {
1848                         /*
1849                          * The headers and data are in a single large buffer. We
1850                          * copy it to a new skb and let it go. This can happen with
1851                          * jumbo mtu on a non-TCP/UDP frame.
1852                          */
1853                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854                         skb = netdev_alloc_skb(qdev->ndev, length);
1855                         if (skb == NULL) {
1856                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1857                                              "No skb available, drop the packet.\n");
1858                                 return NULL;
1859                         }
1860                         pci_unmap_page(qdev->pdev,
1861                                        dma_unmap_addr(lbq_desc,
1862                                                       mapaddr),
1863                                        dma_unmap_len(lbq_desc, maplen),
1864                                        PCI_DMA_FROMDEVICE);
1865                         skb_reserve(skb, NET_IP_ALIGN);
1866                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1868                                      length);
1869                         skb_fill_page_desc(skb, 0,
1870                                                 lbq_desc->p.pg_chunk.page,
1871                                                 lbq_desc->p.pg_chunk.offset,
1872                                                 length);
1873                         skb->len += length;
1874                         skb->data_len += length;
1875                         skb->truesize += length;
1876                         length -= length;
1877                         __pskb_pull_tail(skb,
1878                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1879                                 VLAN_ETH_HLEN : ETH_HLEN);
1880                 }
1881         } else {
1882                 /*
1883                  * The data is in a chain of large buffers
1884                  * pointed to by a small buffer.  We loop
1885                  * thru and chain them to the our small header
1886                  * buffer's skb.
1887                  * frags:  There are 18 max frags and our small
1888                  *         buffer will hold 32 of them. The thing is,
1889                  *         we'll use 3 max for our 9000 byte jumbo
1890                  *         frames.  If the MTU goes up we could
1891                  *          eventually be in trouble.
1892                  */
1893                 int size, i = 0;
1894                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1895                 pci_unmap_single(qdev->pdev,
1896                                  dma_unmap_addr(sbq_desc, mapaddr),
1897                                  dma_unmap_len(sbq_desc, maplen),
1898                                  PCI_DMA_FROMDEVICE);
1899                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1900                         /*
1901                          * This is an non TCP/UDP IP frame, so
1902                          * the headers aren't split into a small
1903                          * buffer.  We have to use the small buffer
1904                          * that contains our sg list as our skb to
1905                          * send upstairs. Copy the sg list here to
1906                          * a local buffer and use it to find the
1907                          * pages to chain.
1908                          */
1909                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1910                                      "%d bytes of headers & data in chain of large.\n",
1911                                      length);
1912                         skb = sbq_desc->p.skb;
1913                         sbq_desc->p.skb = NULL;
1914                         skb_reserve(skb, NET_IP_ALIGN);
1915                 }
1916                 while (length > 0) {
1917                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1918                         size = (length < rx_ring->lbq_buf_size) ? length :
1919                                 rx_ring->lbq_buf_size;
1920
1921                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1922                                      "Adding page %d to skb for %d bytes.\n",
1923                                      i, size);
1924                         skb_fill_page_desc(skb, i,
1925                                                 lbq_desc->p.pg_chunk.page,
1926                                                 lbq_desc->p.pg_chunk.offset,
1927                                                 size);
1928                         skb->len += size;
1929                         skb->data_len += size;
1930                         skb->truesize += size;
1931                         length -= size;
1932                         i++;
1933                 }
1934                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1935                                 VLAN_ETH_HLEN : ETH_HLEN);
1936         }
1937         return skb;
1938 }
1939
1940 /* Process an inbound completion from an rx ring. */
1941 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1942                                    struct rx_ring *rx_ring,
1943                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1944                                    u16 vlan_id)
1945 {
1946         struct net_device *ndev = qdev->ndev;
1947         struct sk_buff *skb = NULL;
1948
1949         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1950
1951         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1952         if (unlikely(!skb)) {
1953                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1954                              "No skb available, drop packet.\n");
1955                 rx_ring->rx_dropped++;
1956                 return;
1957         }
1958
1959         /* Frame error, so drop the packet. */
1960         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1961                 netif_info(qdev, drv, qdev->ndev,
1962                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1963                 dev_kfree_skb_any(skb);
1964                 rx_ring->rx_errors++;
1965                 return;
1966         }
1967
1968         /* The max framesize filter on this chip is set higher than
1969          * MTU since FCoE uses 2k frames.
1970          */
1971         if (skb->len > ndev->mtu + ETH_HLEN) {
1972                 dev_kfree_skb_any(skb);
1973                 rx_ring->rx_dropped++;
1974                 return;
1975         }
1976
1977         /* loopback self test for ethtool */
1978         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1979                 ql_check_lb_frame(qdev, skb);
1980                 dev_kfree_skb_any(skb);
1981                 return;
1982         }
1983
1984         prefetch(skb->data);
1985         skb->dev = ndev;
1986         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1987                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1988                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1989                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1990                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1991                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1992                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1993                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1994                 rx_ring->rx_multicast++;
1995         }
1996         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1997                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1998                              "Promiscuous Packet.\n");
1999         }
2000
2001         skb->protocol = eth_type_trans(skb, ndev);
2002         skb_checksum_none_assert(skb);
2003
2004         /* If rx checksum is on, and there are no
2005          * csum or frame errors.
2006          */
2007         if (qdev->rx_csum &&
2008                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2009                 /* TCP frame. */
2010                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2011                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012                                      "TCP checksum done!\n");
2013                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2014                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2015                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2016                 /* Unfragmented ipv4 UDP frame. */
2017                         struct iphdr *iph = (struct iphdr *) skb->data;
2018                         if (!(iph->frag_off &
2019                                 ntohs(IP_MF|IP_OFFSET))) {
2020                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2021                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2022                                              "TCP checksum done!\n");
2023                         }
2024                 }
2025         }
2026
2027         rx_ring->rx_packets++;
2028         rx_ring->rx_bytes += skb->len;
2029         skb_record_rx_queue(skb, rx_ring->cq_id);
2030         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2031                 if (qdev->vlgrp &&
2032                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2033                         (vlan_id != 0))
2034                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2035                                 vlan_id, skb);
2036                 else
2037                         napi_gro_receive(&rx_ring->napi, skb);
2038         } else {
2039                 if (qdev->vlgrp &&
2040                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2041                         (vlan_id != 0))
2042                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2043                 else
2044                         netif_receive_skb(skb);
2045         }
2046 }
2047
2048 /* Process an inbound completion from an rx ring. */
2049 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2050                                         struct rx_ring *rx_ring,
2051                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2052 {
2053         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2054         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2055                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2056                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2057
2058         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2059
2060         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2061                 /* The data and headers are split into
2062                  * separate buffers.
2063                  */
2064                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2065                                                 vlan_id);
2066         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2067                 /* The data fit in a single small buffer.
2068                  * Allocate a new skb, copy the data and
2069                  * return the buffer to the free pool.
2070                  */
2071                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2072                                                 length, vlan_id);
2073         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2074                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2075                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2076                 /* TCP packet in a page chunk that's been checksummed.
2077                  * Tack it on to our GRO skb and let it go.
2078                  */
2079                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2080                                                 length, vlan_id);
2081         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2082                 /* Non-TCP packet in a page chunk. Allocate an
2083                  * skb, tack it on frags, and send it up.
2084                  */
2085                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2086                                                 length, vlan_id);
2087         } else {
2088                 /* Non-TCP/UDP large frames that span multiple buffers
2089                  * can be processed corrrectly by the split frame logic.
2090                  */
2091                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2092                                                 vlan_id);
2093         }
2094
2095         return (unsigned long)length;
2096 }
2097
2098 /* Process an outbound completion from an rx ring. */
2099 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2100                                    struct ob_mac_iocb_rsp *mac_rsp)
2101 {
2102         struct tx_ring *tx_ring;
2103         struct tx_ring_desc *tx_ring_desc;
2104
2105         QL_DUMP_OB_MAC_RSP(mac_rsp);
2106         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2107         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2108         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2109         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2110         tx_ring->tx_packets++;
2111         dev_kfree_skb(tx_ring_desc->skb);
2112         tx_ring_desc->skb = NULL;
2113
2114         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2115                                         OB_MAC_IOCB_RSP_S |
2116                                         OB_MAC_IOCB_RSP_L |
2117                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2118                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2119                         netif_warn(qdev, tx_done, qdev->ndev,
2120                                    "Total descriptor length did not match transfer length.\n");
2121                 }
2122                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2123                         netif_warn(qdev, tx_done, qdev->ndev,
2124                                    "Frame too short to be valid, not sent.\n");
2125                 }
2126                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2127                         netif_warn(qdev, tx_done, qdev->ndev,
2128                                    "Frame too long, but sent anyway.\n");
2129                 }
2130                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2131                         netif_warn(qdev, tx_done, qdev->ndev,
2132                                    "PCI backplane error. Frame not sent.\n");
2133                 }
2134         }
2135         atomic_inc(&tx_ring->tx_count);
2136 }
2137
2138 /* Fire up a handler to reset the MPI processor. */
2139 void ql_queue_fw_error(struct ql_adapter *qdev)
2140 {
2141         ql_link_off(qdev);
2142         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2143 }
2144
2145 void ql_queue_asic_error(struct ql_adapter *qdev)
2146 {
2147         ql_link_off(qdev);
2148         ql_disable_interrupts(qdev);
2149         /* Clear adapter up bit to signal the recovery
2150          * process that it shouldn't kill the reset worker
2151          * thread
2152          */
2153         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2154         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2155 }
2156
2157 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2158                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2159 {
2160         switch (ib_ae_rsp->event) {
2161         case MGMT_ERR_EVENT:
2162                 netif_err(qdev, rx_err, qdev->ndev,
2163                           "Management Processor Fatal Error.\n");
2164                 ql_queue_fw_error(qdev);
2165                 return;
2166
2167         case CAM_LOOKUP_ERR_EVENT:
2168                 netif_err(qdev, link, qdev->ndev,
2169                           "Multiple CAM hits lookup occurred.\n");
2170                 netif_err(qdev, drv, qdev->ndev,
2171                           "This event shouldn't occur.\n");
2172                 ql_queue_asic_error(qdev);
2173                 return;
2174
2175         case SOFT_ECC_ERROR_EVENT:
2176                 netif_err(qdev, rx_err, qdev->ndev,
2177                           "Soft ECC error detected.\n");
2178                 ql_queue_asic_error(qdev);
2179                 break;
2180
2181         case PCI_ERR_ANON_BUF_RD:
2182                 netif_err(qdev, rx_err, qdev->ndev,
2183                           "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2184                           ib_ae_rsp->q_id);
2185                 ql_queue_asic_error(qdev);
2186                 break;
2187
2188         default:
2189                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2190                           ib_ae_rsp->event);
2191                 ql_queue_asic_error(qdev);
2192                 break;
2193         }
2194 }
2195
2196 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2197 {
2198         struct ql_adapter *qdev = rx_ring->qdev;
2199         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2200         struct ob_mac_iocb_rsp *net_rsp = NULL;
2201         int count = 0;
2202
2203         struct tx_ring *tx_ring;
2204         /* While there are entries in the completion queue. */
2205         while (prod != rx_ring->cnsmr_idx) {
2206
2207                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2210
2211                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2212                 rmb();
2213                 switch (net_rsp->opcode) {
2214
2215                 case OPCODE_OB_MAC_TSO_IOCB:
2216                 case OPCODE_OB_MAC_IOCB:
2217                         ql_process_mac_tx_intr(qdev, net_rsp);
2218                         break;
2219                 default:
2220                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2222                                      net_rsp->opcode);
2223                 }
2224                 count++;
2225                 ql_update_cq(rx_ring);
2226                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2227         }
2228         if (!net_rsp)
2229                 return 0;
2230         ql_write_cq_idx(rx_ring);
2231         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2232         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2233                 if (atomic_read(&tx_ring->queue_stopped) &&
2234                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2235                         /*
2236                          * The queue got stopped because the tx_ring was full.
2237                          * Wake it up, because it's now at least 25% empty.
2238                          */
2239                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2240         }
2241
2242         return count;
2243 }
2244
2245 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2246 {
2247         struct ql_adapter *qdev = rx_ring->qdev;
2248         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2249         struct ql_net_rsp_iocb *net_rsp;
2250         int count = 0;
2251
2252         /* While there are entries in the completion queue. */
2253         while (prod != rx_ring->cnsmr_idx) {
2254
2255                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2257                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2258
2259                 net_rsp = rx_ring->curr_entry;
2260                 rmb();
2261                 switch (net_rsp->opcode) {
2262                 case OPCODE_IB_MAC_IOCB:
2263                         ql_process_mac_rx_intr(qdev, rx_ring,
2264                                                (struct ib_mac_iocb_rsp *)
2265                                                net_rsp);
2266                         break;
2267
2268                 case OPCODE_IB_AE_IOCB:
2269                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2270                                                 net_rsp);
2271                         break;
2272                 default:
2273                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2274                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2275                                      net_rsp->opcode);
2276                         break;
2277                 }
2278                 count++;
2279                 ql_update_cq(rx_ring);
2280                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2281                 if (count == budget)
2282                         break;
2283         }
2284         ql_update_buffer_queues(qdev, rx_ring);
2285         ql_write_cq_idx(rx_ring);
2286         return count;
2287 }
2288
2289 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2290 {
2291         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2292         struct ql_adapter *qdev = rx_ring->qdev;
2293         struct rx_ring *trx_ring;
2294         int i, work_done = 0;
2295         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2296
2297         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2298                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2299
2300         /* Service the TX rings first.  They start
2301          * right after the RSS rings. */
2302         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2303                 trx_ring = &qdev->rx_ring[i];
2304                 /* If this TX completion ring belongs to this vector and
2305                  * it's not empty then service it.
2306                  */
2307                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2308                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2309                                         trx_ring->cnsmr_idx)) {
2310                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2311                                      "%s: Servicing TX completion ring %d.\n",
2312                                      __func__, trx_ring->cq_id);
2313                         ql_clean_outbound_rx_ring(trx_ring);
2314                 }
2315         }
2316
2317         /*
2318          * Now service the RSS ring if it's active.
2319          */
2320         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2321                                         rx_ring->cnsmr_idx) {
2322                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2323                              "%s: Servicing RX completion ring %d.\n",
2324                              __func__, rx_ring->cq_id);
2325                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2326         }
2327
2328         if (work_done < budget) {
2329                 napi_complete(napi);
2330                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2331         }
2332         return work_done;
2333 }
2334
2335 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2336 {
2337         struct ql_adapter *qdev = netdev_priv(ndev);
2338
2339         qdev->vlgrp = grp;
2340         if (grp) {
2341                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2342                              "Turning on VLAN in NIC_RCV_CFG.\n");
2343                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2344                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2345         } else {
2346                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2347                              "Turning off VLAN in NIC_RCV_CFG.\n");
2348                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349         }
2350 }
2351
2352 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2353 {
2354         struct ql_adapter *qdev = netdev_priv(ndev);
2355         u32 enable_bit = MAC_ADDR_E;
2356         int status;
2357
2358         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359         if (status)
2360                 return;
2361         if (ql_set_mac_addr_reg
2362             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2363                 netif_err(qdev, ifup, qdev->ndev,
2364                           "Failed to init vlan address.\n");
2365         }
2366         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2367 }
2368
2369 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2370 {
2371         struct ql_adapter *qdev = netdev_priv(ndev);
2372         u32 enable_bit = 0;
2373         int status;
2374
2375         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376         if (status)
2377                 return;
2378
2379         if (ql_set_mac_addr_reg
2380             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2381                 netif_err(qdev, ifup, qdev->ndev,
2382                           "Failed to clear vlan address.\n");
2383         }
2384         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2385
2386 }
2387
2388 static void qlge_restore_vlan(struct ql_adapter *qdev)
2389 {
2390         qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2391
2392         if (qdev->vlgrp) {
2393                 u16 vid;
2394                 for (vid = 0; vid < VLAN_N_VID; vid++) {
2395                         if (!vlan_group_get_device(qdev->vlgrp, vid))
2396                                 continue;
2397                         qlge_vlan_rx_add_vid(qdev->ndev, vid);
2398                 }
2399         }
2400 }
2401
2402 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2403 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2404 {
2405         struct rx_ring *rx_ring = dev_id;
2406         napi_schedule(&rx_ring->napi);
2407         return IRQ_HANDLED;
2408 }
2409
2410 /* This handles a fatal error, MPI activity, and the default
2411  * rx_ring in an MSI-X multiple vector environment.
2412  * In MSI/Legacy environment it also process the rest of
2413  * the rx_rings.
2414  */
2415 static irqreturn_t qlge_isr(int irq, void *dev_id)
2416 {
2417         struct rx_ring *rx_ring = dev_id;
2418         struct ql_adapter *qdev = rx_ring->qdev;
2419         struct intr_context *intr_context = &qdev->intr_context[0];
2420         u32 var;
2421         int work_done = 0;
2422
2423         spin_lock(&qdev->hw_lock);
2424         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2425                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2426                              "Shared Interrupt, Not ours!\n");
2427                 spin_unlock(&qdev->hw_lock);
2428                 return IRQ_NONE;
2429         }
2430         spin_unlock(&qdev->hw_lock);
2431
2432         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2433
2434         /*
2435          * Check for fatal error.
2436          */
2437         if (var & STS_FE) {
2438                 ql_queue_asic_error(qdev);
2439                 netif_err(qdev, intr, qdev->ndev,
2440                           "Got fatal error, STS = %x.\n", var);
2441                 var = ql_read32(qdev, ERR_STS);
2442                 netif_err(qdev, intr, qdev->ndev,
2443                           "Resetting chip. Error Status Register = 0x%x\n", var);
2444                 return IRQ_HANDLED;
2445         }
2446
2447         /*
2448          * Check MPI processor activity.
2449          */
2450         if ((var & STS_PI) &&
2451                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2452                 /*
2453                  * We've got an async event or mailbox completion.
2454                  * Handle it and clear the source of the interrupt.
2455                  */
2456                 netif_err(qdev, intr, qdev->ndev,
2457                           "Got MPI processor interrupt.\n");
2458                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2459                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2460                 queue_delayed_work_on(smp_processor_id(),
2461                                 qdev->workqueue, &qdev->mpi_work, 0);
2462                 work_done++;
2463         }
2464
2465         /*
2466          * Get the bit-mask that shows the active queues for this
2467          * pass.  Compare it to the queues that this irq services
2468          * and call napi if there's a match.
2469          */
2470         var = ql_read32(qdev, ISR1);
2471         if (var & intr_context->irq_mask) {
2472                 netif_info(qdev, intr, qdev->ndev,
2473                            "Waking handler for rx_ring[0].\n");
2474                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2475                 napi_schedule(&rx_ring->napi);
2476                 work_done++;
2477         }
2478         ql_enable_completion_interrupt(qdev, intr_context->intr);
2479         return work_done ? IRQ_HANDLED : IRQ_NONE;
2480 }
2481
2482 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2483 {
2484
2485         if (skb_is_gso(skb)) {
2486                 int err;
2487                 if (skb_header_cloned(skb)) {
2488                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2489                         if (err)
2490                                 return err;
2491                 }
2492
2493                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2494                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2495                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2496                 mac_iocb_ptr->total_hdrs_len =
2497                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2498                 mac_iocb_ptr->net_trans_offset =
2499                     cpu_to_le16(skb_network_offset(skb) |
2500                                 skb_transport_offset(skb)
2501                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2502                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2503                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2504                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2505                         struct iphdr *iph = ip_hdr(skb);
2506                         iph->check = 0;
2507                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2508                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2509                                                                  iph->daddr, 0,
2510                                                                  IPPROTO_TCP,
2511                                                                  0);
2512                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2513                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2514                         tcp_hdr(skb)->check =
2515                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2516                                              &ipv6_hdr(skb)->daddr,
2517                                              0, IPPROTO_TCP, 0);
2518                 }
2519                 return 1;
2520         }
2521         return 0;
2522 }
2523
2524 static void ql_hw_csum_setup(struct sk_buff *skb,
2525                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2526 {
2527         int len;
2528         struct iphdr *iph = ip_hdr(skb);
2529         __sum16 *check;
2530         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2531         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2532         mac_iocb_ptr->net_trans_offset =
2533                 cpu_to_le16(skb_network_offset(skb) |
2534                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2535
2536         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2537         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2538         if (likely(iph->protocol == IPPROTO_TCP)) {
2539                 check = &(tcp_hdr(skb)->check);
2540                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2541                 mac_iocb_ptr->total_hdrs_len =
2542                     cpu_to_le16(skb_transport_offset(skb) +
2543                                 (tcp_hdr(skb)->doff << 2));
2544         } else {
2545                 check = &(udp_hdr(skb)->check);
2546                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2547                 mac_iocb_ptr->total_hdrs_len =
2548                     cpu_to_le16(skb_transport_offset(skb) +
2549                                 sizeof(struct udphdr));
2550         }
2551         *check = ~csum_tcpudp_magic(iph->saddr,
2552                                     iph->daddr, len, iph->protocol, 0);
2553 }
2554
2555 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2556 {
2557         struct tx_ring_desc *tx_ring_desc;
2558         struct ob_mac_iocb_req *mac_iocb_ptr;
2559         struct ql_adapter *qdev = netdev_priv(ndev);
2560         int tso;
2561         struct tx_ring *tx_ring;
2562         u32 tx_ring_idx = (u32) skb->queue_mapping;
2563
2564         tx_ring = &qdev->tx_ring[tx_ring_idx];
2565
2566         if (skb_padto(skb, ETH_ZLEN))
2567                 return NETDEV_TX_OK;
2568
2569         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2570                 netif_info(qdev, tx_queued, qdev->ndev,
2571                            "%s: shutting down tx queue %d du to lack of resources.\n",
2572                            __func__, tx_ring_idx);
2573                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2574                 atomic_inc(&tx_ring->queue_stopped);
2575                 tx_ring->tx_errors++;
2576                 return NETDEV_TX_BUSY;
2577         }
2578         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2579         mac_iocb_ptr = tx_ring_desc->queue_entry;
2580         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2581
2582         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2583         mac_iocb_ptr->tid = tx_ring_desc->index;
2584         /* We use the upper 32-bits to store the tx queue for this IO.
2585          * When we get the completion we can use it to establish the context.
2586          */
2587         mac_iocb_ptr->txq_idx = tx_ring_idx;
2588         tx_ring_desc->skb = skb;
2589
2590         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2591
2592         if (vlan_tx_tag_present(skb)) {
2593                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2594                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2595                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2596                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2597         }
2598         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2599         if (tso < 0) {
2600                 dev_kfree_skb_any(skb);
2601                 return NETDEV_TX_OK;
2602         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2603                 ql_hw_csum_setup(skb,
2604                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2605         }
2606         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2607                         NETDEV_TX_OK) {
2608                 netif_err(qdev, tx_queued, qdev->ndev,
2609                           "Could not map the segments.\n");
2610                 tx_ring->tx_errors++;
2611                 return NETDEV_TX_BUSY;
2612         }
2613         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2614         tx_ring->prod_idx++;
2615         if (tx_ring->prod_idx == tx_ring->wq_len)
2616                 tx_ring->prod_idx = 0;
2617         wmb();
2618
2619         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2620         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2621                      "tx queued, slot %d, len %d\n",
2622                      tx_ring->prod_idx, skb->len);
2623
2624         atomic_dec(&tx_ring->tx_count);
2625         return NETDEV_TX_OK;
2626 }
2627
2628
2629 static void ql_free_shadow_space(struct ql_adapter *qdev)
2630 {
2631         if (qdev->rx_ring_shadow_reg_area) {
2632                 pci_free_consistent(qdev->pdev,
2633                                     PAGE_SIZE,
2634                                     qdev->rx_ring_shadow_reg_area,
2635                                     qdev->rx_ring_shadow_reg_dma);
2636                 qdev->rx_ring_shadow_reg_area = NULL;
2637         }
2638         if (qdev->tx_ring_shadow_reg_area) {
2639                 pci_free_consistent(qdev->pdev,
2640                                     PAGE_SIZE,
2641                                     qdev->tx_ring_shadow_reg_area,
2642                                     qdev->tx_ring_shadow_reg_dma);
2643                 qdev->tx_ring_shadow_reg_area = NULL;
2644         }
2645 }
2646
2647 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2648 {
2649         qdev->rx_ring_shadow_reg_area =
2650             pci_alloc_consistent(qdev->pdev,
2651                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2652         if (qdev->rx_ring_shadow_reg_area == NULL) {
2653                 netif_err(qdev, ifup, qdev->ndev,
2654                           "Allocation of RX shadow space failed.\n");
2655                 return -ENOMEM;
2656         }
2657         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2658         qdev->tx_ring_shadow_reg_area =
2659             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2660                                  &qdev->tx_ring_shadow_reg_dma);
2661         if (qdev->tx_ring_shadow_reg_area == NULL) {
2662                 netif_err(qdev, ifup, qdev->ndev,
2663                           "Allocation of TX shadow space failed.\n");
2664                 goto err_wqp_sh_area;
2665         }
2666         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2667         return 0;
2668
2669 err_wqp_sh_area:
2670         pci_free_consistent(qdev->pdev,
2671                             PAGE_SIZE,
2672                             qdev->rx_ring_shadow_reg_area,
2673                             qdev->rx_ring_shadow_reg_dma);
2674         return -ENOMEM;
2675 }
2676
2677 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2678 {
2679         struct tx_ring_desc *tx_ring_desc;
2680         int i;
2681         struct ob_mac_iocb_req *mac_iocb_ptr;
2682
2683         mac_iocb_ptr = tx_ring->wq_base;
2684         tx_ring_desc = tx_ring->q;
2685         for (i = 0; i < tx_ring->wq_len; i++) {
2686                 tx_ring_desc->index = i;
2687                 tx_ring_desc->skb = NULL;
2688                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2689                 mac_iocb_ptr++;
2690                 tx_ring_desc++;
2691         }
2692         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2693         atomic_set(&tx_ring->queue_stopped, 0);
2694 }
2695
2696 static void ql_free_tx_resources(struct ql_adapter *qdev,
2697                                  struct tx_ring *tx_ring)
2698 {
2699         if (tx_ring->wq_base) {
2700                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2701                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2702                 tx_ring->wq_base = NULL;
2703         }
2704         kfree(tx_ring->q);
2705         tx_ring->q = NULL;
2706 }
2707
2708 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2709                                  struct tx_ring *tx_ring)
2710 {
2711         tx_ring->wq_base =
2712             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2713                                  &tx_ring->wq_base_dma);
2714
2715         if ((tx_ring->wq_base == NULL) ||
2716             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2717                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2718                 return -ENOMEM;
2719         }
2720         tx_ring->q =
2721             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2722         if (tx_ring->q == NULL)
2723                 goto err;
2724
2725         return 0;
2726 err:
2727         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2728                             tx_ring->wq_base, tx_ring->wq_base_dma);
2729         return -ENOMEM;
2730 }
2731
2732 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2733 {
2734         struct bq_desc *lbq_desc;
2735
2736         uint32_t  curr_idx, clean_idx;
2737
2738         curr_idx = rx_ring->lbq_curr_idx;
2739         clean_idx = rx_ring->lbq_clean_idx;
2740         while (curr_idx != clean_idx) {
2741                 lbq_desc = &rx_ring->lbq[curr_idx];
2742
2743                 if (lbq_desc->p.pg_chunk.last_flag) {
2744                         pci_unmap_page(qdev->pdev,
2745                                 lbq_desc->p.pg_chunk.map,
2746                                 ql_lbq_block_size(qdev),
2747                                        PCI_DMA_FROMDEVICE);
2748                         lbq_desc->p.pg_chunk.last_flag = 0;
2749                 }
2750
2751                 put_page(lbq_desc->p.pg_chunk.page);
2752                 lbq_desc->p.pg_chunk.page = NULL;
2753
2754                 if (++curr_idx == rx_ring->lbq_len)
2755                         curr_idx = 0;
2756
2757         }
2758 }
2759
2760 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2761 {
2762         int i;
2763         struct bq_desc *sbq_desc;
2764
2765         for (i = 0; i < rx_ring->sbq_len; i++) {
2766                 sbq_desc = &rx_ring->sbq[i];
2767                 if (sbq_desc == NULL) {
2768                         netif_err(qdev, ifup, qdev->ndev,
2769                                   "sbq_desc %d is NULL.\n", i);
2770                         return;
2771                 }
2772                 if (sbq_desc->p.skb) {
2773                         pci_unmap_single(qdev->pdev,
2774                                          dma_unmap_addr(sbq_desc, mapaddr),
2775                                          dma_unmap_len(sbq_desc, maplen),
2776                                          PCI_DMA_FROMDEVICE);
2777                         dev_kfree_skb(sbq_desc->p.skb);
2778                         sbq_desc->p.skb = NULL;
2779                 }
2780         }
2781 }
2782
2783 /* Free all large and small rx buffers associated
2784  * with the completion queues for this device.
2785  */
2786 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2787 {
2788         int i;
2789         struct rx_ring *rx_ring;
2790
2791         for (i = 0; i < qdev->rx_ring_count; i++) {
2792                 rx_ring = &qdev->rx_ring[i];
2793                 if (rx_ring->lbq)
2794                         ql_free_lbq_buffers(qdev, rx_ring);
2795                 if (rx_ring->sbq)
2796                         ql_free_sbq_buffers(qdev, rx_ring);
2797         }
2798 }
2799
2800 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2801 {
2802         struct rx_ring *rx_ring;
2803         int i;
2804
2805         for (i = 0; i < qdev->rx_ring_count; i++) {
2806                 rx_ring = &qdev->rx_ring[i];
2807                 if (rx_ring->type != TX_Q)
2808                         ql_update_buffer_queues(qdev, rx_ring);
2809         }
2810 }
2811
2812 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2813                                 struct rx_ring *rx_ring)
2814 {
2815         int i;
2816         struct bq_desc *lbq_desc;
2817         __le64 *bq = rx_ring->lbq_base;
2818
2819         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2820         for (i = 0; i < rx_ring->lbq_len; i++) {
2821                 lbq_desc = &rx_ring->lbq[i];
2822                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2823                 lbq_desc->index = i;
2824                 lbq_desc->addr = bq;
2825                 bq++;
2826         }
2827 }
2828
2829 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2830                                 struct rx_ring *rx_ring)
2831 {
2832         int i;
2833         struct bq_desc *sbq_desc;
2834         __le64 *bq = rx_ring->sbq_base;
2835
2836         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2837         for (i = 0; i < rx_ring->sbq_len; i++) {
2838                 sbq_desc = &rx_ring->sbq[i];
2839                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2840                 sbq_desc->index = i;
2841                 sbq_desc->addr = bq;
2842                 bq++;
2843         }
2844 }
2845
2846 static void ql_free_rx_resources(struct ql_adapter *qdev,
2847                                  struct rx_ring *rx_ring)
2848 {
2849         /* Free the small buffer queue. */
2850         if (rx_ring->sbq_base) {
2851                 pci_free_consistent(qdev->pdev,
2852                                     rx_ring->sbq_size,
2853                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2854                 rx_ring->sbq_base = NULL;
2855         }
2856
2857         /* Free the small buffer queue control blocks. */
2858         kfree(rx_ring->sbq);
2859         rx_ring->sbq = NULL;
2860
2861         /* Free the large buffer queue. */
2862         if (rx_ring->lbq_base) {
2863                 pci_free_consistent(qdev->pdev,
2864                                     rx_ring->lbq_size,
2865                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2866                 rx_ring->lbq_base = NULL;
2867         }
2868
2869         /* Free the large buffer queue control blocks. */
2870         kfree(rx_ring->lbq);
2871         rx_ring->lbq = NULL;
2872
2873         /* Free the rx queue. */
2874         if (rx_ring->cq_base) {
2875                 pci_free_consistent(qdev->pdev,
2876                                     rx_ring->cq_size,
2877                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2878                 rx_ring->cq_base = NULL;
2879         }
2880 }
2881
2882 /* Allocate queues and buffers for this completions queue based
2883  * on the values in the parameter structure. */
2884 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2885                                  struct rx_ring *rx_ring)
2886 {
2887
2888         /*
2889          * Allocate the completion queue for this rx_ring.
2890          */
2891         rx_ring->cq_base =
2892             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2893                                  &rx_ring->cq_base_dma);
2894
2895         if (rx_ring->cq_base == NULL) {
2896                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2897                 return -ENOMEM;
2898         }
2899
2900         if (rx_ring->sbq_len) {
2901                 /*
2902                  * Allocate small buffer queue.
2903                  */
2904                 rx_ring->sbq_base =
2905                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2906                                          &rx_ring->sbq_base_dma);
2907
2908                 if (rx_ring->sbq_base == NULL) {
2909                         netif_err(qdev, ifup, qdev->ndev,
2910                                   "Small buffer queue allocation failed.\n");
2911                         goto err_mem;
2912                 }
2913
2914                 /*
2915                  * Allocate small buffer queue control blocks.
2916                  */
2917                 rx_ring->sbq =
2918                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2919                             GFP_KERNEL);
2920                 if (rx_ring->sbq == NULL) {
2921                         netif_err(qdev, ifup, qdev->ndev,
2922                                   "Small buffer queue control block allocation failed.\n");
2923                         goto err_mem;
2924                 }
2925
2926                 ql_init_sbq_ring(qdev, rx_ring);
2927         }
2928
2929         if (rx_ring->lbq_len) {
2930                 /*
2931                  * Allocate large buffer queue.
2932                  */
2933                 rx_ring->lbq_base =
2934                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2935                                          &rx_ring->lbq_base_dma);
2936
2937                 if (rx_ring->lbq_base == NULL) {
2938                         netif_err(qdev, ifup, qdev->ndev,
2939                                   "Large buffer queue allocation failed.\n");
2940                         goto err_mem;
2941                 }
2942                 /*
2943                  * Allocate large buffer queue control blocks.
2944                  */
2945                 rx_ring->lbq =
2946                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2947                             GFP_KERNEL);
2948                 if (rx_ring->lbq == NULL) {
2949                         netif_err(qdev, ifup, qdev->ndev,
2950                                   "Large buffer queue control block allocation failed.\n");
2951                         goto err_mem;
2952                 }
2953
2954                 ql_init_lbq_ring(qdev, rx_ring);
2955         }
2956
2957         return 0;
2958
2959 err_mem:
2960         ql_free_rx_resources(qdev, rx_ring);
2961         return -ENOMEM;
2962 }
2963
2964 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2965 {
2966         struct tx_ring *tx_ring;
2967         struct tx_ring_desc *tx_ring_desc;
2968         int i, j;
2969
2970         /*
2971          * Loop through all queues and free
2972          * any resources.
2973          */
2974         for (j = 0; j < qdev->tx_ring_count; j++) {
2975                 tx_ring = &qdev->tx_ring[j];
2976                 for (i = 0; i < tx_ring->wq_len; i++) {
2977                         tx_ring_desc = &tx_ring->q[i];
2978                         if (tx_ring_desc && tx_ring_desc->skb) {
2979                                 netif_err(qdev, ifdown, qdev->ndev,
2980                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2981                                           tx_ring_desc->skb, j,
2982                                           tx_ring_desc->index);
2983                                 ql_unmap_send(qdev, tx_ring_desc,
2984                                               tx_ring_desc->map_cnt);
2985                                 dev_kfree_skb(tx_ring_desc->skb);
2986                                 tx_ring_desc->skb = NULL;
2987                         }
2988                 }
2989         }
2990 }
2991
2992 static void ql_free_mem_resources(struct ql_adapter *qdev)
2993 {
2994         int i;
2995
2996         for (i = 0; i < qdev->tx_ring_count; i++)
2997                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2998         for (i = 0; i < qdev->rx_ring_count; i++)
2999                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3000         ql_free_shadow_space(qdev);
3001 }
3002
3003 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3004 {
3005         int i;
3006
3007         /* Allocate space for our shadow registers and such. */
3008         if (ql_alloc_shadow_space(qdev))
3009                 return -ENOMEM;
3010
3011         for (i = 0; i < qdev->rx_ring_count; i++) {
3012                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3013                         netif_err(qdev, ifup, qdev->ndev,
3014                                   "RX resource allocation failed.\n");
3015                         goto err_mem;
3016                 }
3017         }
3018         /* Allocate tx queue resources */
3019         for (i = 0; i < qdev->tx_ring_count; i++) {
3020                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3021                         netif_err(qdev, ifup, qdev->ndev,
3022                                   "TX resource allocation failed.\n");
3023                         goto err_mem;
3024                 }
3025         }
3026         return 0;
3027
3028 err_mem:
3029         ql_free_mem_resources(qdev);
3030         return -ENOMEM;
3031 }
3032
3033 /* Set up the rx ring control block and pass it to the chip.
3034  * The control block is defined as
3035  * "Completion Queue Initialization Control Block", or cqicb.
3036  */
3037 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3038 {
3039         struct cqicb *cqicb = &rx_ring->cqicb;
3040         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3041                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3042         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3043                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3044         void __iomem *doorbell_area =
3045             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3046         int err = 0;
3047         u16 bq_len;
3048         u64 tmp;
3049         __le64 *base_indirect_ptr;
3050         int page_entries;
3051
3052         /* Set up the shadow registers for this ring. */
3053         rx_ring->prod_idx_sh_reg = shadow_reg;
3054         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3055         *rx_ring->prod_idx_sh_reg = 0;
3056         shadow_reg += sizeof(u64);
3057         shadow_reg_dma += sizeof(u64);
3058         rx_ring->lbq_base_indirect = shadow_reg;
3059         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3060         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3061         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3062         rx_ring->sbq_base_indirect = shadow_reg;
3063         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3064
3065         /* PCI doorbell mem area + 0x00 for consumer index register */
3066         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3067         rx_ring->cnsmr_idx = 0;
3068         rx_ring->curr_entry = rx_ring->cq_base;
3069
3070         /* PCI doorbell mem area + 0x04 for valid register */
3071         rx_ring->valid_db_reg = doorbell_area + 0x04;
3072
3073         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3074         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3075
3076         /* PCI doorbell mem area + 0x1c */
3077         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3078
3079         memset((void *)cqicb, 0, sizeof(struct cqicb));
3080         cqicb->msix_vect = rx_ring->irq;
3081
3082         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3083         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3084
3085         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3086
3087         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3088
3089         /*
3090          * Set up the control block load flags.
3091          */
3092         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3093             FLAGS_LV |          /* Load MSI-X vector */
3094             FLAGS_LI;           /* Load irq delay values */
3095         if (rx_ring->lbq_len) {
3096                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3097                 tmp = (u64)rx_ring->lbq_base_dma;
3098                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3099                 page_entries = 0;
3100                 do {
3101                         *base_indirect_ptr = cpu_to_le64(tmp);
3102                         tmp += DB_PAGE_SIZE;
3103                         base_indirect_ptr++;
3104                         page_entries++;
3105                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3106                 cqicb->lbq_addr =
3107                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3108                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3109                         (u16) rx_ring->lbq_buf_size;
3110                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3111                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3112                         (u16) rx_ring->lbq_len;
3113                 cqicb->lbq_len = cpu_to_le16(bq_len);
3114                 rx_ring->lbq_prod_idx = 0;
3115                 rx_ring->lbq_curr_idx = 0;
3116                 rx_ring->lbq_clean_idx = 0;
3117                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3118         }
3119         if (rx_ring->sbq_len) {
3120                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3121                 tmp = (u64)rx_ring->sbq_base_dma;
3122                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3123                 page_entries = 0;
3124                 do {
3125                         *base_indirect_ptr = cpu_to_le64(tmp);
3126                         tmp += DB_PAGE_SIZE;
3127                         base_indirect_ptr++;
3128                         page_entries++;
3129                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3130                 cqicb->sbq_addr =
3131                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3132                 cqicb->sbq_buf_size =
3133                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3134                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3135                         (u16) rx_ring->sbq_len;
3136                 cqicb->sbq_len = cpu_to_le16(bq_len);
3137                 rx_ring->sbq_prod_idx = 0;
3138                 rx_ring->sbq_curr_idx = 0;
3139                 rx_ring->sbq_clean_idx = 0;
3140                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3141         }
3142         switch (rx_ring->type) {
3143         case TX_Q:
3144                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3145                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3146                 break;
3147         case RX_Q:
3148                 /* Inbound completion handling rx_rings run in
3149                  * separate NAPI contexts.
3150                  */
3151                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3152                                64);
3153                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3154                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3155                 break;
3156         default:
3157                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3158                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3159         }
3160         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3161                      "Initializing rx work queue.\n");
3162         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3163                            CFG_LCQ, rx_ring->cq_id);
3164         if (err) {
3165                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3166                 return err;
3167         }
3168         return err;
3169 }
3170
3171 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3172 {
3173         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3174         void __iomem *doorbell_area =
3175             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3176         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3177             (tx_ring->wq_id * sizeof(u64));
3178         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3179             (tx_ring->wq_id * sizeof(u64));
3180         int err = 0;
3181
3182         /*
3183          * Assign doorbell registers for this tx_ring.
3184          */
3185         /* TX PCI doorbell mem area for tx producer index */
3186         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3187         tx_ring->prod_idx = 0;
3188         /* TX PCI doorbell mem area + 0x04 */
3189         tx_ring->valid_db_reg = doorbell_area + 0x04;
3190
3191         /*
3192          * Assign shadow registers for this tx_ring.
3193          */
3194         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3195         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3196
3197         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3198         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3199                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3200         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3201         wqicb->rid = 0;
3202         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3203
3204         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3205
3206         ql_init_tx_ring(qdev, tx_ring);
3207
3208         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3209                            (u16) tx_ring->wq_id);
3210         if (err) {
3211                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3212                 return err;
3213         }
3214         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3215                      "Successfully loaded WQICB.\n");
3216         return err;
3217 }
3218
3219 static void ql_disable_msix(struct ql_adapter *qdev)
3220 {
3221         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3222                 pci_disable_msix(qdev->pdev);
3223                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3224                 kfree(qdev->msi_x_entry);
3225                 qdev->msi_x_entry = NULL;
3226         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3227                 pci_disable_msi(qdev->pdev);
3228                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3229         }
3230 }
3231
3232 /* We start by trying to get the number of vectors
3233  * stored in qdev->intr_count. If we don't get that
3234  * many then we reduce the count and try again.
3235  */
3236 static void ql_enable_msix(struct ql_adapter *qdev)
3237 {
3238         int i, err;
3239
3240         /* Get the MSIX vectors. */
3241         if (qlge_irq_type == MSIX_IRQ) {
3242                 /* Try to alloc space for the msix struct,
3243                  * if it fails then go to MSI/legacy.
3244                  */
3245                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3246                                             sizeof(struct msix_entry),
3247                                             GFP_KERNEL);
3248                 if (!qdev->msi_x_entry) {
3249                         qlge_irq_type = MSI_IRQ;
3250                         goto msi;
3251                 }
3252
3253                 for (i = 0; i < qdev->intr_count; i++)
3254                         qdev->msi_x_entry[i].entry = i;
3255
3256                 /* Loop to get our vectors.  We start with
3257                  * what we want and settle for what we get.
3258                  */
3259                 do {
3260                         err = pci_enable_msix(qdev->pdev,
3261                                 qdev->msi_x_entry, qdev->intr_count);
3262                         if (err > 0)
3263                                 qdev->intr_count = err;
3264                 } while (err > 0);
3265
3266                 if (err < 0) {
3267                         kfree(qdev->msi_x_entry);
3268                         qdev->msi_x_entry = NULL;
3269                         netif_warn(qdev, ifup, qdev->ndev,
3270                                    "MSI-X Enable failed, trying MSI.\n");
3271                         qdev->intr_count = 1;
3272                         qlge_irq_type = MSI_IRQ;
3273                 } else if (err == 0) {
3274                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3275                         netif_info(qdev, ifup, qdev->ndev,
3276                                    "MSI-X Enabled, got %d vectors.\n",
3277                                    qdev->intr_count);
3278                         return;
3279                 }
3280         }
3281 msi:
3282         qdev->intr_count = 1;
3283         if (qlge_irq_type == MSI_IRQ) {
3284                 if (!pci_enable_msi(qdev->pdev)) {
3285                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3286                         netif_info(qdev, ifup, qdev->ndev,
3287                                    "Running with MSI interrupts.\n");
3288                         return;
3289                 }
3290         }
3291         qlge_irq_type = LEG_IRQ;
3292         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3293                      "Running with legacy interrupts.\n");
3294 }
3295
3296 /* Each vector services 1 RSS ring and and 1 or more
3297  * TX completion rings.  This function loops through
3298  * the TX completion rings and assigns the vector that
3299  * will service it.  An example would be if there are
3300  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3301  * This would mean that vector 0 would service RSS ring 0
3302  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3303  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3304  */
3305 static void ql_set_tx_vect(struct ql_adapter *qdev)
3306 {
3307         int i, j, vect;
3308         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3309
3310         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3311                 /* Assign irq vectors to TX rx_rings.*/
3312                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3313                                          i < qdev->rx_ring_count; i++) {
3314                         if (j == tx_rings_per_vector) {
3315                                 vect++;
3316                                 j = 0;
3317                         }
3318                         qdev->rx_ring[i].irq = vect;
3319                         j++;
3320                 }
3321         } else {
3322                 /* For single vector all rings have an irq
3323                  * of zero.
3324                  */
3325                 for (i = 0; i < qdev->rx_ring_count; i++)
3326                         qdev->rx_ring[i].irq = 0;
3327         }
3328 }
3329
3330 /* Set the interrupt mask for this vector.  Each vector
3331  * will service 1 RSS ring and 1 or more TX completion
3332  * rings.  This function sets up a bit mask per vector
3333  * that indicates which rings it services.
3334  */
3335 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3336 {
3337         int j, vect = ctx->intr;
3338         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3339
3340         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3341                 /* Add the RSS ring serviced by this vector
3342                  * to the mask.
3343                  */
3344                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3345                 /* Add the TX ring(s) serviced by this vector
3346                  * to the mask. */
3347                 for (j = 0; j < tx_rings_per_vector; j++) {
3348                         ctx->irq_mask |=
3349                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3350                         (vect * tx_rings_per_vector) + j].cq_id);
3351                 }
3352         } else {
3353                 /* For single vector we just shift each queue's
3354                  * ID into the mask.
3355                  */
3356                 for (j = 0; j < qdev->rx_ring_count; j++)
3357                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3358         }
3359 }
3360
3361 /*
3362  * Here we build the intr_context structures based on
3363  * our rx_ring count and intr vector count.
3364  * The intr_context structure is used to hook each vector
3365  * to possibly different handlers.
3366  */
3367 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3368 {
3369         int i = 0;
3370         struct intr_context *intr_context = &qdev->intr_context[0];
3371
3372         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3373                 /* Each rx_ring has it's
3374                  * own intr_context since we have separate
3375                  * vectors for each queue.
3376                  */
3377                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3378                         qdev->rx_ring[i].irq = i;
3379                         intr_context->intr = i;
3380                         intr_context->qdev = qdev;
3381                         /* Set up this vector's bit-mask that indicates
3382                          * which queues it services.
3383                          */
3384                         ql_set_irq_mask(qdev, intr_context);
3385                         /*
3386                          * We set up each vectors enable/disable/read bits so
3387                          * there's no bit/mask calculations in the critical path.
3388                          */
3389                         intr_context->intr_en_mask =
3390                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3391                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3392                             | i;
3393                         intr_context->intr_dis_mask =
3394                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3395                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3396                             INTR_EN_IHD | i;
3397                         intr_context->intr_read_mask =
3398                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3399                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3400                             i;
3401                         if (i == 0) {
3402                                 /* The first vector/queue handles
3403                                  * broadcast/multicast, fatal errors,
3404                                  * and firmware events.  This in addition
3405                                  * to normal inbound NAPI processing.
3406                                  */
3407                                 intr_context->handler = qlge_isr;
3408                                 sprintf(intr_context->name, "%s-rx-%d",
3409                                         qdev->ndev->name, i);
3410                         } else {
3411                                 /*
3412                                  * Inbound queues handle unicast frames only.
3413                                  */
3414                                 intr_context->handler = qlge_msix_rx_isr;
3415                                 sprintf(intr_context->name, "%s-rx-%d",
3416                                         qdev->ndev->name, i);
3417                         }
3418                 }
3419         } else {
3420                 /*
3421                  * All rx_rings use the same intr_context since
3422                  * there is only one vector.
3423                  */
3424                 intr_context->intr = 0;
3425                 intr_context->qdev = qdev;
3426                 /*
3427                  * We set up each vectors enable/disable/read bits so
3428                  * there's no bit/mask calculations in the critical path.
3429                  */
3430                 intr_context->intr_en_mask =
3431                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3432                 intr_context->intr_dis_mask =
3433                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3434                     INTR_EN_TYPE_DISABLE;
3435                 intr_context->intr_read_mask =
3436                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3437                 /*
3438                  * Single interrupt means one handler for all rings.
3439                  */
3440                 intr_context->handler = qlge_isr;
3441                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3442                 /* Set up this vector's bit-mask that indicates
3443                  * which queues it services. In this case there is
3444                  * a single vector so it will service all RSS and
3445                  * TX completion rings.
3446                  */
3447                 ql_set_irq_mask(qdev, intr_context);
3448         }
3449         /* Tell the TX completion rings which MSIx vector
3450          * they will be using.
3451          */
3452         ql_set_tx_vect(qdev);
3453 }
3454
3455 static void ql_free_irq(struct ql_adapter *qdev)
3456 {
3457         int i;
3458         struct intr_context *intr_context = &qdev->intr_context[0];
3459
3460         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3461                 if (intr_context->hooked) {
3462                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3463                                 free_irq(qdev->msi_x_entry[i].vector,
3464                                          &qdev->rx_ring[i]);
3465                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3466                                              "freeing msix interrupt %d.\n", i);
3467                         } else {
3468                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3469                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3470                                              "freeing msi interrupt %d.\n", i);
3471                         }
3472                 }
3473         }
3474         ql_disable_msix(qdev);
3475 }
3476
3477 static int ql_request_irq(struct ql_adapter *qdev)
3478 {
3479         int i;
3480         int status = 0;
3481         struct pci_dev *pdev = qdev->pdev;
3482         struct intr_context *intr_context = &qdev->intr_context[0];
3483
3484         ql_resolve_queues_to_irqs(qdev);
3485
3486         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3487                 atomic_set(&intr_context->irq_cnt, 0);
3488                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3489                         status = request_irq(qdev->msi_x_entry[i].vector,
3490                                              intr_context->handler,
3491                                              0,
3492                                              intr_context->name,
3493                                              &qdev->rx_ring[i]);
3494                         if (status) {
3495                                 netif_err(qdev, ifup, qdev->ndev,
3496                                           "Failed request for MSIX interrupt %d.\n",
3497                                           i);
3498                                 goto err_irq;
3499                         } else {
3500                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501                                              "Hooked intr %d, queue type %s, with name %s.\n",
3502                                              i,
3503                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3504                                              "DEFAULT_Q" :
3505                                              qdev->rx_ring[i].type == TX_Q ?
3506                                              "TX_Q" :
3507                                              qdev->rx_ring[i].type == RX_Q ?
3508                                              "RX_Q" : "",
3509                                              intr_context->name);
3510                         }
3511                 } else {
3512                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3513                                      "trying msi or legacy interrupts.\n");
3514                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3515                                      "%s: irq = %d.\n", __func__, pdev->irq);
3516                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517                                      "%s: context->name = %s.\n", __func__,
3518                                      intr_context->name);
3519                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3520                                      "%s: dev_id = 0x%p.\n", __func__,
3521                                      &qdev->rx_ring[0]);
3522                         status =
3523                             request_irq(pdev->irq, qlge_isr,
3524                                         test_bit(QL_MSI_ENABLED,
3525                                                  &qdev->
3526                                                  flags) ? 0 : IRQF_SHARED,
3527                                         intr_context->name, &qdev->rx_ring[0]);
3528                         if (status)
3529                                 goto err_irq;
3530
3531                         netif_err(qdev, ifup, qdev->ndev,
3532                                   "Hooked intr %d, queue type %s, with name %s.\n",
3533                                   i,
3534                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3535                                   "DEFAULT_Q" :
3536                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3537                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3538                                   intr_context->name);
3539                 }
3540                 intr_context->hooked = 1;
3541         }
3542         return status;
3543 err_irq:
3544         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3545         ql_free_irq(qdev);
3546         return status;
3547 }
3548
3549 static int ql_start_rss(struct ql_adapter *qdev)
3550 {
3551         static const u8 init_hash_seed[] = {
3552                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3553                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3554                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3555                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3556                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3557         };
3558         struct ricb *ricb = &qdev->ricb;
3559         int status = 0;
3560         int i;
3561         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3562
3563         memset((void *)ricb, 0, sizeof(*ricb));
3564
3565         ricb->base_cq = RSS_L4K;
3566         ricb->flags =
3567                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3568         ricb->mask = cpu_to_le16((u16)(0x3ff));
3569
3570         /*
3571          * Fill out the Indirection Table.
3572          */
3573         for (i = 0; i < 1024; i++)
3574                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3575
3576         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3577         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3578
3579         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3580
3581         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3582         if (status) {
3583                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3584                 return status;
3585         }
3586         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3587                      "Successfully loaded RICB.\n");
3588         return status;
3589 }
3590
3591 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3592 {
3593         int i, status = 0;
3594
3595         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3596         if (status)
3597                 return status;
3598         /* Clear all the entries in the routing table. */
3599         for (i = 0; i < 16; i++) {
3600                 status = ql_set_routing_reg(qdev, i, 0, 0);
3601                 if (status) {
3602                         netif_err(qdev, ifup, qdev->ndev,
3603                                   "Failed to init routing register for CAM packets.\n");
3604                         break;
3605                 }
3606         }
3607         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3608         return status;
3609 }
3610
3611 /* Initialize the frame-to-queue routing. */
3612 static int ql_route_initialize(struct ql_adapter *qdev)
3613 {
3614         int status = 0;
3615
3616         /* Clear all the entries in the routing table. */
3617         status = ql_clear_routing_entries(qdev);
3618         if (status)
3619                 return status;
3620
3621         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3622         if (status)
3623                 return status;
3624
3625         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3626                                                 RT_IDX_IP_CSUM_ERR, 1);
3627         if (status) {
3628                 netif_err(qdev, ifup, qdev->ndev,
3629                         "Failed to init routing register "
3630                         "for IP CSUM error packets.\n");
3631                 goto exit;
3632         }
3633         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3634                                                 RT_IDX_TU_CSUM_ERR, 1);
3635         if (status) {
3636                 netif_err(qdev, ifup, qdev->ndev,
3637                         "Failed to init routing register "
3638                         "for TCP/UDP CSUM error packets.\n");
3639                 goto exit;
3640         }
3641         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3642         if (status) {
3643                 netif_err(qdev, ifup, qdev->ndev,
3644                           "Failed to init routing register for broadcast packets.\n");
3645                 goto exit;
3646         }
3647         /* If we have more than one inbound queue, then turn on RSS in the
3648          * routing block.
3649          */
3650         if (qdev->rss_ring_count > 1) {
3651                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3652                                         RT_IDX_RSS_MATCH, 1);
3653                 if (status) {
3654                         netif_err(qdev, ifup, qdev->ndev,
3655                                   "Failed to init routing register for MATCH RSS packets.\n");
3656                         goto exit;
3657                 }
3658         }
3659
3660         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3661                                     RT_IDX_CAM_HIT, 1);
3662         if (status)
3663                 netif_err(qdev, ifup, qdev->ndev,
3664                           "Failed to init routing register for CAM packets.\n");
3665 exit:
3666         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3667         return status;
3668 }
3669
3670 int ql_cam_route_initialize(struct ql_adapter *qdev)
3671 {
3672         int status, set;
3673
3674         /* If check if the link is up and use to
3675          * determine if we are setting or clearing
3676          * the MAC address in the CAM.
3677          */
3678         set = ql_read32(qdev, STS);
3679         set &= qdev->port_link_up;
3680         status = ql_set_mac_addr(qdev, set);
3681         if (status) {
3682                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3683                 return status;
3684         }
3685
3686         status = ql_route_initialize(qdev);
3687         if (status)
3688                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3689
3690         return status;
3691 }
3692
3693 static int ql_adapter_initialize(struct ql_adapter *qdev)
3694 {
3695         u32 value, mask;
3696         int i;
3697         int status = 0;
3698
3699         /*
3700          * Set up the System register to halt on errors.
3701          */
3702         value = SYS_EFE | SYS_FAE;
3703         mask = value << 16;
3704         ql_write32(qdev, SYS, mask | value);
3705
3706         /* Set the default queue, and VLAN behavior. */
3707         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3708         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3709         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3710
3711         /* Set the MPI interrupt to enabled. */
3712         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3713
3714         /* Enable the function, set pagesize, enable error checking. */
3715         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3716             FSC_EC | FSC_VM_PAGE_4K;
3717         value |= SPLT_SETTING;
3718
3719         /* Set/clear header splitting. */
3720         mask = FSC_VM_PAGESIZE_MASK |
3721             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3722         ql_write32(qdev, FSC, mask | value);
3723
3724         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3725
3726         /* Set RX packet routing to use port/pci function on which the
3727          * packet arrived on in addition to usual frame routing.
3728          * This is helpful on bonding where both interfaces can have
3729          * the same MAC address.
3730          */
3731         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3732         /* Reroute all packets to our Interface.
3733          * They may have been routed to MPI firmware
3734          * due to WOL.
3735          */
3736         value = ql_read32(qdev, MGMT_RCV_CFG);
3737         value &= ~MGMT_RCV_CFG_RM;
3738         mask = 0xffff0000;
3739
3740         /* Sticky reg needs clearing due to WOL. */
3741         ql_write32(qdev, MGMT_RCV_CFG, mask);
3742         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3743
3744         /* Default WOL is enable on Mezz cards */
3745         if (qdev->pdev->subsystem_device == 0x0068 ||
3746                         qdev->pdev->subsystem_device == 0x0180)
3747                 qdev->wol = WAKE_MAGIC;
3748
3749         /* Start up the rx queues. */
3750         for (i = 0; i < qdev->rx_ring_count; i++) {
3751                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3752                 if (status) {
3753                         netif_err(qdev, ifup, qdev->ndev,
3754                                   "Failed to start rx ring[%d].\n", i);
3755                         return status;
3756                 }
3757         }
3758
3759         /* If there is more than one inbound completion queue
3760          * then download a RICB to configure RSS.
3761          */
3762         if (qdev->rss_ring_count > 1) {
3763                 status = ql_start_rss(qdev);
3764                 if (status) {
3765                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3766                         return status;
3767                 }
3768         }
3769
3770         /* Start up the tx queues. */
3771         for (i = 0; i < qdev->tx_ring_count; i++) {
3772                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3773                 if (status) {
3774                         netif_err(qdev, ifup, qdev->ndev,
3775                                   "Failed to start tx ring[%d].\n", i);
3776                         return status;
3777                 }
3778         }
3779
3780         /* Initialize the port and set the max framesize. */
3781         status = qdev->nic_ops->port_initialize(qdev);
3782         if (status)
3783                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3784
3785         /* Set up the MAC address and frame routing filter. */
3786         status = ql_cam_route_initialize(qdev);
3787         if (status) {
3788                 netif_err(qdev, ifup, qdev->ndev,
3789                           "Failed to init CAM/Routing tables.\n");
3790                 return status;
3791         }
3792
3793         /* Start NAPI for the RSS queues. */
3794         for (i = 0; i < qdev->rss_ring_count; i++) {
3795                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3796                              "Enabling NAPI for rx_ring[%d].\n", i);
3797                 napi_enable(&qdev->rx_ring[i].napi);
3798         }
3799
3800         return status;
3801 }
3802
3803 /* Issue soft reset to chip. */
3804 static int ql_adapter_reset(struct ql_adapter *qdev)
3805 {
3806         u32 value;
3807         int status = 0;
3808         unsigned long end_jiffies;
3809
3810         /* Clear all the entries in the routing table. */
3811         status = ql_clear_routing_entries(qdev);
3812         if (status) {
3813                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3814                 return status;
3815         }
3816
3817         end_jiffies = jiffies +
3818                 max((unsigned long)1, usecs_to_jiffies(30));
3819
3820         /* Stop management traffic. */
3821         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3822
3823         /* Wait for the NIC and MGMNT FIFOs to empty. */
3824         ql_wait_fifo_empty(qdev);
3825
3826         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3827
3828         do {
3829                 value = ql_read32(qdev, RST_FO);
3830                 if ((value & RST_FO_FR) == 0)
3831                         break;
3832                 cpu_relax();
3833         } while (time_before(jiffies, end_jiffies));
3834
3835         if (value & RST_FO_FR) {
3836                 netif_err(qdev, ifdown, qdev->ndev,
3837                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3838                 status = -ETIMEDOUT;
3839         }
3840
3841         /* Resume management traffic. */
3842         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3843         return status;
3844 }
3845
3846 static void ql_display_dev_info(struct net_device *ndev)
3847 {
3848         struct ql_adapter *qdev = netdev_priv(ndev);
3849
3850         netif_info(qdev, probe, qdev->ndev,
3851                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3852                    "XG Roll = %d, XG Rev = %d.\n",
3853                    qdev->func,
3854                    qdev->port,
3855                    qdev->chip_rev_id & 0x0000000f,
3856                    qdev->chip_rev_id >> 4 & 0x0000000f,
3857                    qdev->chip_rev_id >> 8 & 0x0000000f,
3858                    qdev->chip_rev_id >> 12 & 0x0000000f);
3859         netif_info(qdev, probe, qdev->ndev,
3860                    "MAC address %pM\n", ndev->dev_addr);
3861 }
3862
3863 static int ql_wol(struct ql_adapter *qdev)
3864 {
3865         int status = 0;
3866         u32 wol = MB_WOL_DISABLE;
3867
3868         /* The CAM is still intact after a reset, but if we
3869          * are doing WOL, then we may need to program the
3870          * routing regs. We would also need to issue the mailbox
3871          * commands to instruct the MPI what to do per the ethtool
3872          * settings.
3873          */
3874
3875         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3876                         WAKE_MCAST | WAKE_BCAST)) {
3877                 netif_err(qdev, ifdown, qdev->ndev,
3878                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3879                           qdev->wol);
3880                 return -EINVAL;
3881         }
3882
3883         if (qdev->wol & WAKE_MAGIC) {
3884                 status = ql_mb_wol_set_magic(qdev, 1);
3885                 if (status) {
3886                         netif_err(qdev, ifdown, qdev->ndev,
3887                                   "Failed to set magic packet on %s.\n",
3888                                   qdev->ndev->name);
3889                         return status;
3890                 } else
3891                         netif_info(qdev, drv, qdev->ndev,
3892                                    "Enabled magic packet successfully on %s.\n",
3893                                    qdev->ndev->name);
3894
3895                 wol |= MB_WOL_MAGIC_PKT;
3896         }
3897
3898         if (qdev->wol) {
3899                 wol |= MB_WOL_MODE_ON;
3900                 status = ql_mb_wol_mode(qdev, wol);
3901                 netif_err(qdev, drv, qdev->ndev,
3902                           "WOL %s (wol code 0x%x) on %s\n",
3903                           (status == 0) ? "Successfully set" : "Failed",
3904                           wol, qdev->ndev->name);
3905         }
3906
3907         return status;
3908 }
3909
3910 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3911 {
3912
3913         /* Don't kill the reset worker thread if we
3914          * are in the process of recovery.
3915          */
3916         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3917                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3918         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3919         cancel_delayed_work_sync(&qdev->mpi_work);
3920         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3921         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3922         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3923 }
3924
3925 static int ql_adapter_down(struct ql_adapter *qdev)
3926 {
3927         int i, status = 0;
3928
3929         ql_link_off(qdev);
3930
3931         ql_cancel_all_work_sync(qdev);
3932
3933         for (i = 0; i < qdev->rss_ring_count; i++)
3934                 napi_disable(&qdev->rx_ring[i].napi);
3935
3936         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3937
3938         ql_disable_interrupts(qdev);
3939
3940         ql_tx_ring_clean(qdev);
3941
3942         /* Call netif_napi_del() from common point.
3943          */
3944         for (i = 0; i < qdev->rss_ring_count; i++)
3945                 netif_napi_del(&qdev->rx_ring[i].napi);
3946
3947         status = ql_adapter_reset(qdev);
3948         if (status)
3949                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3950                           qdev->func);
3951         ql_free_rx_buffers(qdev);
3952
3953         return status;
3954 }
3955
3956 static int ql_adapter_up(struct ql_adapter *qdev)
3957 {
3958         int err = 0;
3959
3960         err = ql_adapter_initialize(qdev);
3961         if (err) {
3962                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3963                 goto err_init;
3964         }
3965         set_bit(QL_ADAPTER_UP, &qdev->flags);
3966         ql_alloc_rx_buffers(qdev);
3967         /* If the port is initialized and the
3968          * link is up the turn on the carrier.
3969          */
3970         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3971                         (ql_read32(qdev, STS) & qdev->port_link_up))
3972                 ql_link_on(qdev);
3973         /* Restore rx mode. */
3974         clear_bit(QL_ALLMULTI, &qdev->flags);
3975         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3976         qlge_set_multicast_list(qdev->ndev);
3977
3978         /* Restore vlan setting. */
3979         qlge_restore_vlan(qdev);
3980
3981         ql_enable_interrupts(qdev);
3982         ql_enable_all_completion_interrupts(qdev);
3983         netif_tx_start_all_queues(qdev->ndev);
3984
3985         return 0;
3986 err_init:
3987         ql_adapter_reset(qdev);
3988         return err;
3989 }
3990
3991 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3992 {
3993         ql_free_mem_resources(qdev);
3994         ql_free_irq(qdev);
3995 }
3996
3997 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3998 {
3999         int status = 0;
4000
4001         if (ql_alloc_mem_resources(qdev)) {
4002                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4003                 return -ENOMEM;
4004         }
4005         status = ql_request_irq(qdev);
4006         return status;
4007 }
4008
4009 static int qlge_close(struct net_device *ndev)
4010 {
4011         struct ql_adapter *qdev = netdev_priv(ndev);
4012
4013         /* If we hit pci_channel_io_perm_failure
4014          * failure condition, then we already
4015          * brought the adapter down.
4016          */
4017         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4018                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4019                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4020                 return 0;
4021         }
4022
4023         /*
4024          * Wait for device to recover from a reset.
4025          * (Rarely happens, but possible.)
4026          */
4027         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4028                 msleep(1);
4029         ql_adapter_down(qdev);
4030         ql_release_adapter_resources(qdev);
4031         return 0;
4032 }
4033
4034 static int ql_configure_rings(struct ql_adapter *qdev)
4035 {
4036         int i;
4037         struct rx_ring *rx_ring;
4038         struct tx_ring *tx_ring;
4039         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4040         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4041                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4042
4043         qdev->lbq_buf_order = get_order(lbq_buf_len);
4044
4045         /* In a perfect world we have one RSS ring for each CPU
4046          * and each has it's own vector.  To do that we ask for
4047          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4048          * vector count to what we actually get.  We then
4049          * allocate an RSS ring for each.
4050          * Essentially, we are doing min(cpu_count, msix_vector_count).
4051          */
4052         qdev->intr_count = cpu_cnt;
4053         ql_enable_msix(qdev);
4054         /* Adjust the RSS ring count to the actual vector count. */
4055         qdev->rss_ring_count = qdev->intr_count;
4056         qdev->tx_ring_count = cpu_cnt;
4057         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4058
4059         for (i = 0; i < qdev->tx_ring_count; i++) {
4060                 tx_ring = &qdev->tx_ring[i];
4061                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4062                 tx_ring->qdev = qdev;
4063                 tx_ring->wq_id = i;
4064                 tx_ring->wq_len = qdev->tx_ring_size;
4065                 tx_ring->wq_size =
4066                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4067
4068                 /*
4069                  * The completion queue ID for the tx rings start
4070                  * immediately after the rss rings.
4071                  */
4072                 tx_ring->cq_id = qdev->rss_ring_count + i;
4073         }
4074
4075         for (i = 0; i < qdev->rx_ring_count; i++) {
4076                 rx_ring = &qdev->rx_ring[i];
4077                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4078                 rx_ring->qdev = qdev;
4079                 rx_ring->cq_id = i;
4080                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4081                 if (i < qdev->rss_ring_count) {
4082                         /*
4083                          * Inbound (RSS) queues.
4084                          */
4085                         rx_ring->cq_len = qdev->rx_ring_size;
4086                         rx_ring->cq_size =
4087                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4088                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4089                         rx_ring->lbq_size =
4090                             rx_ring->lbq_len * sizeof(__le64);
4091                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4092                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4093                                      "lbq_buf_size %d, order = %d\n",
4094                                      rx_ring->lbq_buf_size,
4095                                      qdev->lbq_buf_order);
4096                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4097                         rx_ring->sbq_size =
4098                             rx_ring->sbq_len * sizeof(__le64);
4099                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4100                         rx_ring->type = RX_Q;
4101                 } else {
4102                         /*
4103                          * Outbound queue handles outbound completions only.
4104                          */
4105                         /* outbound cq is same size as tx_ring it services. */
4106                         rx_ring->cq_len = qdev->tx_ring_size;
4107                         rx_ring->cq_size =
4108                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4109                         rx_ring->lbq_len = 0;
4110                         rx_ring->lbq_size = 0;
4111                         rx_ring->lbq_buf_size = 0;
4112                         rx_ring->sbq_len = 0;
4113                         rx_ring->sbq_size = 0;
4114                         rx_ring->sbq_buf_size = 0;
4115                         rx_ring->type = TX_Q;
4116                 }
4117         }
4118         return 0;
4119 }
4120
4121 static int qlge_open(struct net_device *ndev)
4122 {
4123         int err = 0;
4124         struct ql_adapter *qdev = netdev_priv(ndev);
4125
4126         err = ql_adapter_reset(qdev);
4127         if (err)
4128                 return err;
4129
4130         err = ql_configure_rings(qdev);
4131         if (err)
4132                 return err;
4133
4134         err = ql_get_adapter_resources(qdev);
4135         if (err)
4136                 goto error_up;
4137
4138         err = ql_adapter_up(qdev);
4139         if (err)
4140                 goto error_up;
4141
4142         return err;
4143
4144 error_up:
4145         ql_release_adapter_resources(qdev);
4146         return err;
4147 }
4148
4149 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4150 {
4151         struct rx_ring *rx_ring;
4152         int i, status;
4153         u32 lbq_buf_len;
4154
4155         /* Wait for an outstanding reset to complete. */
4156         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4157                 int i = 3;
4158                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4159                         netif_err(qdev, ifup, qdev->ndev,
4160                                   "Waiting for adapter UP...\n");
4161                         ssleep(1);
4162                 }
4163
4164                 if (!i) {
4165                         netif_err(qdev, ifup, qdev->ndev,
4166                                   "Timed out waiting for adapter UP\n");
4167                         return -ETIMEDOUT;
4168                 }
4169         }
4170
4171         status = ql_adapter_down(qdev);
4172         if (status)
4173                 goto error;
4174
4175         /* Get the new rx buffer size. */
4176         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4177                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4178         qdev->lbq_buf_order = get_order(lbq_buf_len);
4179
4180         for (i = 0; i < qdev->rss_ring_count; i++) {
4181                 rx_ring = &qdev->rx_ring[i];
4182                 /* Set the new size. */
4183                 rx_ring->lbq_buf_size = lbq_buf_len;
4184         }
4185
4186         status = ql_adapter_up(qdev);
4187         if (status)
4188                 goto error;
4189
4190         return status;
4191 error:
4192         netif_alert(qdev, ifup, qdev->ndev,
4193                     "Driver up/down cycle failed, closing device.\n");
4194         set_bit(QL_ADAPTER_UP, &qdev->flags);
4195         dev_close(qdev->ndev);
4196         return status;
4197 }
4198
4199 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4200 {
4201         struct ql_adapter *qdev = netdev_priv(ndev);
4202         int status;
4203
4204         if (ndev->mtu == 1500 && new_mtu == 9000) {
4205                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4206         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4207                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4208         } else
4209                 return -EINVAL;
4210
4211         queue_delayed_work(qdev->workqueue,
4212                         &qdev->mpi_port_cfg_work, 3*HZ);
4213
4214         ndev->mtu = new_mtu;
4215
4216         if (!netif_running(qdev->ndev)) {
4217                 return 0;
4218         }
4219
4220         status = ql_change_rx_buffers(qdev);
4221         if (status) {
4222                 netif_err(qdev, ifup, qdev->ndev,
4223                           "Changing MTU failed.\n");
4224         }
4225
4226         return status;
4227 }
4228
4229 static struct net_device_stats *qlge_get_stats(struct net_device
4230                                                *ndev)
4231 {
4232         struct ql_adapter *qdev = netdev_priv(ndev);
4233         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4234         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4235         unsigned long pkts, mcast, dropped, errors, bytes;
4236         int i;
4237
4238         /* Get RX stats. */
4239         pkts = mcast = dropped = errors = bytes = 0;
4240         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4241                         pkts += rx_ring->rx_packets;
4242                         bytes += rx_ring->rx_bytes;
4243                         dropped += rx_ring->rx_dropped;
4244                         errors += rx_ring->rx_errors;
4245                         mcast += rx_ring->rx_multicast;
4246         }
4247         ndev->stats.rx_packets = pkts;
4248         ndev->stats.rx_bytes = bytes;
4249         ndev->stats.rx_dropped = dropped;
4250         ndev->stats.rx_errors = errors;
4251         ndev->stats.multicast = mcast;
4252
4253         /* Get TX stats. */
4254         pkts = errors = bytes = 0;
4255         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4256                         pkts += tx_ring->tx_packets;
4257                         bytes += tx_ring->tx_bytes;
4258                         errors += tx_ring->tx_errors;
4259         }
4260         ndev->stats.tx_packets = pkts;
4261         ndev->stats.tx_bytes = bytes;
4262         ndev->stats.tx_errors = errors;
4263         return &ndev->stats;
4264 }
4265
4266 static void qlge_set_multicast_list(struct net_device *ndev)
4267 {
4268         struct ql_adapter *qdev = netdev_priv(ndev);
4269         struct netdev_hw_addr *ha;
4270         int i, status;
4271
4272         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4273         if (status)
4274                 return;
4275         /*
4276          * Set or clear promiscuous mode if a
4277          * transition is taking place.
4278          */
4279         if (ndev->flags & IFF_PROMISC) {
4280                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4281                         if (ql_set_routing_reg
4282                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4283                                 netif_err(qdev, hw, qdev->ndev,
4284                                           "Failed to set promiscuous mode.\n");
4285                         } else {
4286                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4287                         }
4288                 }
4289         } else {
4290                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4291                         if (ql_set_routing_reg
4292                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4293                                 netif_err(qdev, hw, qdev->ndev,
4294                                           "Failed to clear promiscuous mode.\n");
4295                         } else {
4296                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4297                         }
4298                 }
4299         }
4300
4301         /*
4302          * Set or clear all multicast mode if a
4303          * transition is taking place.
4304          */
4305         if ((ndev->flags & IFF_ALLMULTI) ||
4306             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4307                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4308                         if (ql_set_routing_reg
4309                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4310                                 netif_err(qdev, hw, qdev->ndev,
4311                                           "Failed to set all-multi mode.\n");
4312                         } else {
4313                                 set_bit(QL_ALLMULTI, &qdev->flags);
4314                         }
4315                 }
4316         } else {
4317                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4318                         if (ql_set_routing_reg
4319                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4320                                 netif_err(qdev, hw, qdev->ndev,
4321                                           "Failed to clear all-multi mode.\n");
4322                         } else {
4323                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4324                         }
4325                 }
4326         }
4327
4328         if (!netdev_mc_empty(ndev)) {
4329                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4330                 if (status)
4331                         goto exit;
4332                 i = 0;
4333                 netdev_for_each_mc_addr(ha, ndev) {
4334                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4335                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4336                                 netif_err(qdev, hw, qdev->ndev,
4337                                           "Failed to loadmulticast address.\n");
4338                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4339                                 goto exit;
4340                         }
4341                         i++;
4342                 }
4343                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4344                 if (ql_set_routing_reg
4345                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4346                         netif_err(qdev, hw, qdev->ndev,
4347                                   "Failed to set multicast match mode.\n");
4348                 } else {
4349                         set_bit(QL_ALLMULTI, &qdev->flags);
4350                 }
4351         }
4352 exit:
4353         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4354 }
4355
4356 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4357 {
4358         struct ql_adapter *qdev = netdev_priv(ndev);
4359         struct sockaddr *addr = p;
4360         int status;
4361
4362         if (!is_valid_ether_addr(addr->sa_data))
4363                 return -EADDRNOTAVAIL;
4364         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4365         /* Update local copy of current mac address. */
4366         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4367
4368         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4369         if (status)
4370                 return status;
4371         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4372                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4373         if (status)
4374                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4375         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4376         return status;
4377 }
4378
4379 static void qlge_tx_timeout(struct net_device *ndev)
4380 {
4381         struct ql_adapter *qdev = netdev_priv(ndev);
4382         ql_queue_asic_error(qdev);
4383 }
4384
4385 static void ql_asic_reset_work(struct work_struct *work)
4386 {
4387         struct ql_adapter *qdev =
4388             container_of(work, struct ql_adapter, asic_reset_work.work);
4389         int status;
4390         rtnl_lock();
4391         status = ql_adapter_down(qdev);
4392         if (status)
4393                 goto error;
4394
4395         status = ql_adapter_up(qdev);
4396         if (status)
4397                 goto error;
4398
4399         /* Restore rx mode. */
4400         clear_bit(QL_ALLMULTI, &qdev->flags);
4401         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4402         qlge_set_multicast_list(qdev->ndev);
4403
4404         rtnl_unlock();
4405         return;
4406 error:
4407         netif_alert(qdev, ifup, qdev->ndev,
4408                     "Driver up/down cycle failed, closing device\n");
4409
4410         set_bit(QL_ADAPTER_UP, &qdev->flags);
4411         dev_close(qdev->ndev);
4412         rtnl_unlock();
4413 }
4414
4415 static struct nic_operations qla8012_nic_ops = {
4416         .get_flash              = ql_get_8012_flash_params,
4417         .port_initialize        = ql_8012_port_initialize,
4418 };
4419
4420 static struct nic_operations qla8000_nic_ops = {
4421         .get_flash              = ql_get_8000_flash_params,
4422         .port_initialize        = ql_8000_port_initialize,
4423 };
4424
4425 /* Find the pcie function number for the other NIC
4426  * on this chip.  Since both NIC functions share a
4427  * common firmware we have the lowest enabled function
4428  * do any common work.  Examples would be resetting
4429  * after a fatal firmware error, or doing a firmware
4430  * coredump.
4431  */
4432 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4433 {
4434         int status = 0;
4435         u32 temp;
4436         u32 nic_func1, nic_func2;
4437
4438         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4439                         &temp);
4440         if (status)
4441                 return status;
4442
4443         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4444                         MPI_TEST_NIC_FUNC_MASK);
4445         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4446                         MPI_TEST_NIC_FUNC_MASK);
4447
4448         if (qdev->func == nic_func1)
4449                 qdev->alt_func = nic_func2;
4450         else if (qdev->func == nic_func2)
4451                 qdev->alt_func = nic_func1;
4452         else
4453                 status = -EIO;
4454
4455         return status;
4456 }
4457
4458 static int ql_get_board_info(struct ql_adapter *qdev)
4459 {
4460         int status;
4461         qdev->func =
4462             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4463         if (qdev->func > 3)
4464                 return -EIO;
4465
4466         status = ql_get_alt_pcie_func(qdev);
4467         if (status)
4468                 return status;
4469
4470         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4471         if (qdev->port) {
4472                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4473                 qdev->port_link_up = STS_PL1;
4474                 qdev->port_init = STS_PI1;
4475                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4476                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4477         } else {
4478                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4479                 qdev->port_link_up = STS_PL0;
4480                 qdev->port_init = STS_PI0;
4481                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4482                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4483         }
4484         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4485         qdev->device_id = qdev->pdev->device;
4486         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4487                 qdev->nic_ops = &qla8012_nic_ops;
4488         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4489                 qdev->nic_ops = &qla8000_nic_ops;
4490         return status;
4491 }
4492
4493 static void ql_release_all(struct pci_dev *pdev)
4494 {
4495         struct net_device *ndev = pci_get_drvdata(pdev);
4496         struct ql_adapter *qdev = netdev_priv(ndev);
4497
4498         if (qdev->workqueue) {
4499                 destroy_workqueue(qdev->workqueue);
4500                 qdev->workqueue = NULL;
4501         }
4502
4503         if (qdev->reg_base)
4504                 iounmap(qdev->reg_base);
4505         if (qdev->doorbell_area)
4506                 iounmap(qdev->doorbell_area);
4507         vfree(qdev->mpi_coredump);
4508         pci_release_regions(pdev);
4509         pci_set_drvdata(pdev, NULL);
4510 }
4511
4512 static int __devinit ql_init_device(struct pci_dev *pdev,
4513                                     struct net_device *ndev, int cards_found)
4514 {
4515         struct ql_adapter *qdev = netdev_priv(ndev);
4516         int err = 0;
4517
4518         memset((void *)qdev, 0, sizeof(*qdev));
4519         err = pci_enable_device(pdev);
4520         if (err) {
4521                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4522                 return err;
4523         }
4524
4525         qdev->ndev = ndev;
4526         qdev->pdev = pdev;
4527         pci_set_drvdata(pdev, ndev);
4528
4529         /* Set PCIe read request size */
4530         err = pcie_set_readrq(pdev, 4096);
4531         if (err) {
4532                 dev_err(&pdev->dev, "Set readrq failed.\n");
4533                 goto err_out1;
4534         }
4535
4536         err = pci_request_regions(pdev, DRV_NAME);
4537         if (err) {
4538                 dev_err(&pdev->dev, "PCI region request failed.\n");
4539                 return err;
4540         }
4541
4542         pci_set_master(pdev);
4543         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4544                 set_bit(QL_DMA64, &qdev->flags);
4545                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4546         } else {
4547                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4548                 if (!err)
4549                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4550         }
4551
4552         if (err) {
4553                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4554                 goto err_out2;
4555         }
4556
4557         /* Set PCIe reset type for EEH to fundamental. */
4558         pdev->needs_freset = 1;
4559         pci_save_state(pdev);
4560         qdev->reg_base =
4561             ioremap_nocache(pci_resource_start(pdev, 1),
4562                             pci_resource_len(pdev, 1));
4563         if (!qdev->reg_base) {
4564                 dev_err(&pdev->dev, "Register mapping failed.\n");
4565                 err = -ENOMEM;
4566                 goto err_out2;
4567         }
4568
4569         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4570         qdev->doorbell_area =
4571             ioremap_nocache(pci_resource_start(pdev, 3),
4572                             pci_resource_len(pdev, 3));
4573         if (!qdev->doorbell_area) {
4574                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4575                 err = -ENOMEM;
4576                 goto err_out2;
4577         }
4578
4579         err = ql_get_board_info(qdev);
4580         if (err) {
4581                 dev_err(&pdev->dev, "Register access failed.\n");
4582                 err = -EIO;
4583                 goto err_out2;
4584         }
4585         qdev->msg_enable = netif_msg_init(debug, default_msg);
4586         spin_lock_init(&qdev->hw_lock);
4587         spin_lock_init(&qdev->stats_lock);
4588
4589         if (qlge_mpi_coredump) {
4590                 qdev->mpi_coredump =
4591                         vmalloc(sizeof(struct ql_mpi_coredump));
4592                 if (qdev->mpi_coredump == NULL) {
4593                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4594                         err = -ENOMEM;
4595                         goto err_out2;
4596                 }
4597                 if (qlge_force_coredump)
4598                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4599         }
4600         /* make sure the EEPROM is good */
4601         err = qdev->nic_ops->get_flash(qdev);
4602         if (err) {
4603                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4604                 goto err_out2;
4605         }
4606
4607         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4608         /* Keep local copy of current mac address. */
4609         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4610
4611         /* Set up the default ring sizes. */
4612         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4613         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4614
4615         /* Set up the coalescing parameters. */
4616         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4617         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4618         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4619         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4620
4621         /*
4622          * Set up the operating parameters.
4623          */
4624         qdev->rx_csum = 1;
4625         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4626         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4627         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4628         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4629         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4630         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4631         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4632         init_completion(&qdev->ide_completion);
4633         mutex_init(&qdev->mpi_mutex);
4634
4635         if (!cards_found) {
4636                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4637                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4638                          DRV_NAME, DRV_VERSION);
4639         }
4640         return 0;
4641 err_out2:
4642         ql_release_all(pdev);
4643 err_out1:
4644         pci_disable_device(pdev);
4645         return err;
4646 }
4647
4648 static const struct net_device_ops qlge_netdev_ops = {
4649         .ndo_open               = qlge_open,
4650         .ndo_stop               = qlge_close,
4651         .ndo_start_xmit         = qlge_send,
4652         .ndo_change_mtu         = qlge_change_mtu,
4653         .ndo_get_stats          = qlge_get_stats,
4654         .ndo_set_multicast_list = qlge_set_multicast_list,
4655         .ndo_set_mac_address    = qlge_set_mac_address,
4656         .ndo_validate_addr      = eth_validate_addr,
4657         .ndo_tx_timeout         = qlge_tx_timeout,
4658         .ndo_vlan_rx_register   = qlge_vlan_rx_register,
4659         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4660         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4661 };
4662
4663 static void ql_timer(unsigned long data)
4664 {
4665         struct ql_adapter *qdev = (struct ql_adapter *)data;
4666         u32 var = 0;
4667
4668         var = ql_read32(qdev, STS);
4669         if (pci_channel_offline(qdev->pdev)) {
4670                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4671                 return;
4672         }
4673
4674         mod_timer(&qdev->timer, jiffies + (5*HZ));
4675 }
4676
4677 static int __devinit qlge_probe(struct pci_dev *pdev,
4678                                 const struct pci_device_id *pci_entry)
4679 {
4680         struct net_device *ndev = NULL;
4681         struct ql_adapter *qdev = NULL;
4682         static int cards_found = 0;
4683         int err = 0;
4684
4685         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4686                         min(MAX_CPUS, (int)num_online_cpus()));
4687         if (!ndev)
4688                 return -ENOMEM;
4689
4690         err = ql_init_device(pdev, ndev, cards_found);
4691         if (err < 0) {
4692                 free_netdev(ndev);
4693                 return err;
4694         }
4695
4696         qdev = netdev_priv(ndev);
4697         SET_NETDEV_DEV(ndev, &pdev->dev);
4698         ndev->features = (0
4699                           | NETIF_F_IP_CSUM
4700                           | NETIF_F_SG
4701                           | NETIF_F_TSO
4702                           | NETIF_F_TSO6
4703                           | NETIF_F_TSO_ECN
4704                           | NETIF_F_HW_VLAN_TX
4705                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4706         ndev->features |= NETIF_F_GRO;
4707
4708         if (test_bit(QL_DMA64, &qdev->flags))
4709                 ndev->features |= NETIF_F_HIGHDMA;
4710
4711         /*
4712          * Set up net_device structure.
4713          */
4714         ndev->tx_queue_len = qdev->tx_ring_size;
4715         ndev->irq = pdev->irq;
4716
4717         ndev->netdev_ops = &qlge_netdev_ops;
4718         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4719         ndev->watchdog_timeo = 10 * HZ;
4720
4721         err = register_netdev(ndev);
4722         if (err) {
4723                 dev_err(&pdev->dev, "net device registration failed.\n");
4724                 ql_release_all(pdev);
4725                 pci_disable_device(pdev);
4726                 return err;
4727         }
4728         /* Start up the timer to trigger EEH if
4729          * the bus goes dead
4730          */
4731         init_timer_deferrable(&qdev->timer);
4732         qdev->timer.data = (unsigned long)qdev;
4733         qdev->timer.function = ql_timer;
4734         qdev->timer.expires = jiffies + (5*HZ);
4735         add_timer(&qdev->timer);
4736         ql_link_off(qdev);
4737         ql_display_dev_info(ndev);
4738         atomic_set(&qdev->lb_count, 0);
4739         cards_found++;
4740         return 0;
4741 }
4742
4743 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4744 {
4745         return qlge_send(skb, ndev);
4746 }
4747
4748 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4749 {
4750         return ql_clean_inbound_rx_ring(rx_ring, budget);
4751 }
4752
4753 static void __devexit qlge_remove(struct pci_dev *pdev)
4754 {
4755         struct net_device *ndev = pci_get_drvdata(pdev);
4756         struct ql_adapter *qdev = netdev_priv(ndev);
4757         del_timer_sync(&qdev->timer);
4758         ql_cancel_all_work_sync(qdev);
4759         unregister_netdev(ndev);
4760         ql_release_all(pdev);
4761         pci_disable_device(pdev);
4762         free_netdev(ndev);
4763 }
4764
4765 /* Clean up resources without touching hardware. */
4766 static void ql_eeh_close(struct net_device *ndev)
4767 {
4768         int i;
4769         struct ql_adapter *qdev = netdev_priv(ndev);
4770
4771         if (netif_carrier_ok(ndev)) {
4772                 netif_carrier_off(ndev);
4773                 netif_stop_queue(ndev);
4774         }
4775
4776         /* Disabling the timer */
4777         del_timer_sync(&qdev->timer);
4778         ql_cancel_all_work_sync(qdev);
4779
4780         for (i = 0; i < qdev->rss_ring_count; i++)
4781                 netif_napi_del(&qdev->rx_ring[i].napi);
4782
4783         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4784         ql_tx_ring_clean(qdev);
4785         ql_free_rx_buffers(qdev);
4786         ql_release_adapter_resources(qdev);
4787 }
4788
4789 /*
4790  * This callback is called by the PCI subsystem whenever
4791  * a PCI bus error is detected.
4792  */
4793 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4794                                                enum pci_channel_state state)
4795 {
4796         struct net_device *ndev = pci_get_drvdata(pdev);
4797         struct ql_adapter *qdev = netdev_priv(ndev);
4798
4799         switch (state) {
4800         case pci_channel_io_normal:
4801                 return PCI_ERS_RESULT_CAN_RECOVER;
4802         case pci_channel_io_frozen:
4803                 netif_device_detach(ndev);
4804                 if (netif_running(ndev))
4805                         ql_eeh_close(ndev);
4806                 pci_disable_device(pdev);
4807                 return PCI_ERS_RESULT_NEED_RESET;
4808         case pci_channel_io_perm_failure:
4809                 dev_err(&pdev->dev,
4810                         "%s: pci_channel_io_perm_failure.\n", __func__);
4811                 ql_eeh_close(ndev);
4812                 set_bit(QL_EEH_FATAL, &qdev->flags);
4813                 return PCI_ERS_RESULT_DISCONNECT;
4814         }
4815
4816         /* Request a slot reset. */
4817         return PCI_ERS_RESULT_NEED_RESET;
4818 }
4819
4820 /*
4821  * This callback is called after the PCI buss has been reset.
4822  * Basically, this tries to restart the card from scratch.
4823  * This is a shortened version of the device probe/discovery code,
4824  * it resembles the first-half of the () routine.
4825  */
4826 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4827 {
4828         struct net_device *ndev = pci_get_drvdata(pdev);
4829         struct ql_adapter *qdev = netdev_priv(ndev);
4830
4831         pdev->error_state = pci_channel_io_normal;
4832
4833         pci_restore_state(pdev);
4834         if (pci_enable_device(pdev)) {
4835                 netif_err(qdev, ifup, qdev->ndev,
4836                           "Cannot re-enable PCI device after reset.\n");
4837                 return PCI_ERS_RESULT_DISCONNECT;
4838         }
4839         pci_set_master(pdev);
4840
4841         if (ql_adapter_reset(qdev)) {
4842                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4843                 set_bit(QL_EEH_FATAL, &qdev->flags);
4844                 return PCI_ERS_RESULT_DISCONNECT;
4845         }
4846
4847         return PCI_ERS_RESULT_RECOVERED;
4848 }
4849
4850 static void qlge_io_resume(struct pci_dev *pdev)
4851 {
4852         struct net_device *ndev = pci_get_drvdata(pdev);
4853         struct ql_adapter *qdev = netdev_priv(ndev);
4854         int err = 0;
4855
4856         if (netif_running(ndev)) {
4857                 err = qlge_open(ndev);
4858                 if (err) {
4859                         netif_err(qdev, ifup, qdev->ndev,
4860                                   "Device initialization failed after reset.\n");
4861                         return;
4862                 }
4863         } else {
4864                 netif_err(qdev, ifup, qdev->ndev,
4865                           "Device was not running prior to EEH.\n");
4866         }
4867         mod_timer(&qdev->timer, jiffies + (5*HZ));
4868         netif_device_attach(ndev);
4869 }
4870
4871 static struct pci_error_handlers qlge_err_handler = {
4872         .error_detected = qlge_io_error_detected,
4873         .slot_reset = qlge_io_slot_reset,
4874         .resume = qlge_io_resume,
4875 };
4876
4877 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4878 {
4879         struct net_device *ndev = pci_get_drvdata(pdev);
4880         struct ql_adapter *qdev = netdev_priv(ndev);
4881         int err;
4882
4883         netif_device_detach(ndev);
4884         del_timer_sync(&qdev->timer);
4885
4886         if (netif_running(ndev)) {
4887                 err = ql_adapter_down(qdev);
4888                 if (!err)
4889                         return err;
4890         }
4891
4892         ql_wol(qdev);
4893         err = pci_save_state(pdev);
4894         if (err)
4895                 return err;
4896
4897         pci_disable_device(pdev);
4898
4899         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4900
4901         return 0;
4902 }
4903
4904 #ifdef CONFIG_PM
4905 static int qlge_resume(struct pci_dev *pdev)
4906 {
4907         struct net_device *ndev = pci_get_drvdata(pdev);
4908         struct ql_adapter *qdev = netdev_priv(ndev);
4909         int err;
4910
4911         pci_set_power_state(pdev, PCI_D0);
4912         pci_restore_state(pdev);
4913         err = pci_enable_device(pdev);
4914         if (err) {
4915                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4916                 return err;
4917         }
4918         pci_set_master(pdev);
4919
4920         pci_enable_wake(pdev, PCI_D3hot, 0);
4921         pci_enable_wake(pdev, PCI_D3cold, 0);
4922
4923         if (netif_running(ndev)) {
4924                 err = ql_adapter_up(qdev);
4925                 if (err)
4926                         return err;
4927         }
4928
4929         mod_timer(&qdev->timer, jiffies + (5*HZ));
4930         netif_device_attach(ndev);
4931
4932         return 0;
4933 }
4934 #endif /* CONFIG_PM */
4935
4936 static void qlge_shutdown(struct pci_dev *pdev)
4937 {
4938         qlge_suspend(pdev, PMSG_SUSPEND);
4939 }
4940
4941 static struct pci_driver qlge_driver = {
4942         .name = DRV_NAME,
4943         .id_table = qlge_pci_tbl,
4944         .probe = qlge_probe,
4945         .remove = __devexit_p(qlge_remove),
4946 #ifdef CONFIG_PM
4947         .suspend = qlge_suspend,
4948         .resume = qlge_resume,
4949 #endif
4950         .shutdown = qlge_shutdown,
4951         .err_handler = &qlge_err_handler
4952 };
4953
4954 static int __init qlge_init_module(void)
4955 {
4956         return pci_register_driver(&qlge_driver);
4957 }
4958
4959 static void __exit qlge_exit(void)
4960 {
4961         pci_unregister_driver(&qlge_driver);
4962 }
4963
4964 module_init(qlge_init_module);
4965 module_exit(qlge_exit);