c92afcd912e23fe48a17c686e9355ba350585f15
[pandora-kernel.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378
379                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380                                      "Adding %s address %pM at index %d in the CAM.\n",
381                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
382                                      "MULTICAST" : "UNICAST",
383                                      addr, index);
384
385                         status =
386                             ql_wait_reg_rdy(qdev,
387                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388                         if (status)
389                                 goto exit;
390                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
392                                    type);       /* type */
393                         ql_write32(qdev, MAC_ADDR_DATA, lower);
394                         status =
395                             ql_wait_reg_rdy(qdev,
396                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397                         if (status)
398                                 goto exit;
399                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
401                                    type);       /* type */
402                         ql_write32(qdev, MAC_ADDR_DATA, upper);
403                         status =
404                             ql_wait_reg_rdy(qdev,
405                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406                         if (status)
407                                 goto exit;
408                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
409                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
410                                    type);       /* type */
411                         /* This field should also include the queue id
412                            and possibly the function id.  Right now we hardcode
413                            the route field to NIC core.
414                          */
415                         cam_output = (CAM_OUT_ROUTE_NIC |
416                                       (qdev->
417                                        func << CAM_OUT_FUNC_SHIFT) |
418                                         (0 << CAM_OUT_CQ_ID_SHIFT));
419                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420                                 cam_output |= CAM_OUT_RV;
421                         /* route to NIC core */
422                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423                         break;
424                 }
425         case MAC_ADDR_TYPE_VLAN:
426                 {
427                         u32 enable_bit = *((u32 *) &addr[0]);
428                         /* For VLAN, the addr actually holds a bit that
429                          * either enables or disables the vlan id we are
430                          * addressing. It's either MAC_ADDR_E on or off.
431                          * That's bit-27 we're talking about.
432                          */
433                         netif_info(qdev, ifup, qdev->ndev,
434                                    "%s VLAN ID %d %s the CAM.\n",
435                                    enable_bit ? "Adding" : "Removing",
436                                    index,
437                                    enable_bit ? "to" : "from");
438
439                         status =
440                             ql_wait_reg_rdy(qdev,
441                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442                         if (status)
443                                 goto exit;
444                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
446                                    type |       /* type */
447                                    enable_bit); /* enable/disable */
448                         break;
449                 }
450         case MAC_ADDR_TYPE_MULTI_FLTR:
451         default:
452                 netif_crit(qdev, ifup, qdev->ndev,
453                            "Address type %d not yet supported.\n", type);
454                 status = -EPERM;
455         }
456 exit:
457         return status;
458 }
459
460 /* Set or clear MAC address in hardware. We sometimes
461  * have to clear it to prevent wrong frame routing
462  * especially in a bonding environment.
463  */
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 {
466         int status;
467         char zero_mac_addr[ETH_ALEN];
468         char *addr;
469
470         if (set) {
471                 addr = &qdev->current_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Set Mac addr %pM\n", addr);
474         } else {
475                 memset(zero_mac_addr, 0, ETH_ALEN);
476                 addr = &zero_mac_addr[0];
477                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478                              "Clearing MAC address\n");
479         }
480         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 return status;
483         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486         if (status)
487                 netif_err(qdev, ifup, qdev->ndev,
488                           "Failed to init mac address.\n");
489         return status;
490 }
491
492 void ql_link_on(struct ql_adapter *qdev)
493 {
494         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495         netif_carrier_on(qdev->ndev);
496         ql_set_mac_addr(qdev, 1);
497 }
498
499 void ql_link_off(struct ql_adapter *qdev)
500 {
501         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502         netif_carrier_off(qdev->ndev);
503         ql_set_mac_addr(qdev, 0);
504 }
505
506 /* Get a specific frame routing value from the CAM.
507  * Used for debug and reg dump.
508  */
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510 {
511         int status = 0;
512
513         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514         if (status)
515                 goto exit;
516
517         ql_write32(qdev, RT_IDX,
518                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520         if (status)
521                 goto exit;
522         *value = ql_read32(qdev, RT_DATA);
523 exit:
524         return status;
525 }
526
527 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
528  * to route different frame types to various inbound queues.  We send broadcast/
529  * multicast/error frames to the default queue for slow handling,
530  * and CAM hit/RSS frames to the fast handling queues.
531  */
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533                               int enable)
534 {
535         int status = -EINVAL; /* Return error if no mask match. */
536         u32 value = 0;
537
538         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539                      "%s %s mask %s the routing reg.\n",
540                      enable ? "Adding" : "Removing",
541                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
555                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
556                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557                      "(Bad index != RT_IDX)",
558                      enable ? "to" : "from");
559
560         switch (mask) {
561         case RT_IDX_CAM_HIT:
562                 {
563                         value = RT_IDX_DST_CAM_Q |      /* dest */
564                             RT_IDX_TYPE_NICQ |  /* type */
565                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566                         break;
567                 }
568         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
569                 {
570                         value = RT_IDX_DST_DFLT_Q |     /* dest */
571                             RT_IDX_TYPE_NICQ |  /* type */
572                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573                         break;
574                 }
575         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
576                 {
577                         value = RT_IDX_DST_DFLT_Q |     /* dest */
578                             RT_IDX_TYPE_NICQ |  /* type */
579                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580                         break;
581                 }
582         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583                 {
584                         value = RT_IDX_DST_DFLT_Q | /* dest */
585                                 RT_IDX_TYPE_NICQ | /* type */
586                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
587                                 RT_IDX_IDX_SHIFT); /* index */
588                         break;
589                 }
590         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591                 {
592                         value = RT_IDX_DST_DFLT_Q | /* dest */
593                                 RT_IDX_TYPE_NICQ | /* type */
594                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595                                 RT_IDX_IDX_SHIFT); /* index */
596                         break;
597                 }
598         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
599                 {
600                         value = RT_IDX_DST_DFLT_Q |     /* dest */
601                             RT_IDX_TYPE_NICQ |  /* type */
602                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603                         break;
604                 }
605         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
606                 {
607                         value = RT_IDX_DST_DFLT_Q |     /* dest */
608                             RT_IDX_TYPE_NICQ |  /* type */
609                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610                         break;
611                 }
612         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
613                 {
614                         value = RT_IDX_DST_DFLT_Q |     /* dest */
615                             RT_IDX_TYPE_NICQ |  /* type */
616                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617                         break;
618                 }
619         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
620                 {
621                         value = RT_IDX_DST_RSS |        /* dest */
622                             RT_IDX_TYPE_NICQ |  /* type */
623                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624                         break;
625                 }
626         case 0:         /* Clear the E-bit on an entry. */
627                 {
628                         value = RT_IDX_DST_DFLT_Q |     /* dest */
629                             RT_IDX_TYPE_NICQ |  /* type */
630                             (index << RT_IDX_IDX_SHIFT);/* index */
631                         break;
632                 }
633         default:
634                 netif_err(qdev, ifup, qdev->ndev,
635                           "Mask type %d not yet supported.\n", mask);
636                 status = -EPERM;
637                 goto exit;
638         }
639
640         if (value) {
641                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642                 if (status)
643                         goto exit;
644                 value |= (enable ? RT_IDX_E : 0);
645                 ql_write32(qdev, RT_IDX, value);
646                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647         }
648 exit:
649         return status;
650 }
651
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
653 {
654         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 }
656
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
658 {
659         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 }
661
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663  * Otherwise, we may have multiple outstanding workers and don't want to
664  * enable until the last one finishes. In this case, the irq_cnt gets
665  * incremented every time we queue a worker and decremented every time
666  * a worker finishes.  Once it hits zero we enable the interrupt.
667  */
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 {
670         u32 var = 0;
671         unsigned long hw_flags = 0;
672         struct intr_context *ctx = qdev->intr_context + intr;
673
674         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675                 /* Always enable if we're MSIX multi interrupts and
676                  * it's not the default (zeroeth) interrupt.
677                  */
678                 ql_write32(qdev, INTR_EN,
679                            ctx->intr_en_mask);
680                 var = ql_read32(qdev, STS);
681                 return var;
682         }
683
684         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685         if (atomic_dec_and_test(&ctx->irq_cnt)) {
686                 ql_write32(qdev, INTR_EN,
687                            ctx->intr_en_mask);
688                 var = ql_read32(qdev, STS);
689         }
690         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691         return var;
692 }
693
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 {
696         u32 var = 0;
697         struct intr_context *ctx;
698
699         /* HW disables for us if we're MSIX multi interrupts and
700          * it's not the default (zeroeth) interrupt.
701          */
702         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703                 return 0;
704
705         ctx = qdev->intr_context + intr;
706         spin_lock(&qdev->hw_lock);
707         if (!atomic_read(&ctx->irq_cnt)) {
708                 ql_write32(qdev, INTR_EN,
709                 ctx->intr_dis_mask);
710                 var = ql_read32(qdev, STS);
711         }
712         atomic_inc(&ctx->irq_cnt);
713         spin_unlock(&qdev->hw_lock);
714         return var;
715 }
716
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 {
719         int i;
720         for (i = 0; i < qdev->intr_count; i++) {
721                 /* The enable call does a atomic_dec_and_test
722                  * and enables only if the result is zero.
723                  * So we precharge it here.
724                  */
725                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726                         i == 0))
727                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728                 ql_enable_completion_interrupt(qdev, i);
729         }
730
731 }
732
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734 {
735         int status, i;
736         u16 csum = 0;
737         __le16 *flash = (__le16 *)&qdev->flash;
738
739         status = strncmp((char *)&qdev->flash, str, 4);
740         if (status) {
741                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742                 return  status;
743         }
744
745         for (i = 0; i < size; i++)
746                 csum += le16_to_cpu(*flash++);
747
748         if (csum)
749                 netif_err(qdev, ifup, qdev->ndev,
750                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751
752         return csum;
753 }
754
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 {
757         int status = 0;
758         /* wait for reg to come ready */
759         status = ql_wait_reg_rdy(qdev,
760                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761         if (status)
762                 goto exit;
763         /* set up for reg read */
764         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765         /* wait for reg to come ready */
766         status = ql_wait_reg_rdy(qdev,
767                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768         if (status)
769                 goto exit;
770          /* This data is stored on flash as an array of
771          * __le32.  Since ql_read32() returns cpu endian
772          * we need to swap it back.
773          */
774         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775 exit:
776         return status;
777 }
778
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780 {
781         u32 i, size;
782         int status;
783         __le32 *p = (__le32 *)&qdev->flash;
784         u32 offset;
785         u8 mac_addr[6];
786
787         /* Get flash offset for function and adjust
788          * for dword access.
789          */
790         if (!qdev->port)
791                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792         else
793                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796                 return -ETIMEDOUT;
797
798         size = sizeof(struct flash_params_8000) / sizeof(u32);
799         for (i = 0; i < size; i++, p++) {
800                 status = ql_read_flash_word(qdev, i+offset, p);
801                 if (status) {
802                         netif_err(qdev, ifup, qdev->ndev,
803                                   "Error reading flash.\n");
804                         goto exit;
805                 }
806         }
807
808         status = ql_validate_flash(qdev,
809                         sizeof(struct flash_params_8000) / sizeof(u16),
810                         "8000");
811         if (status) {
812                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813                 status = -EINVAL;
814                 goto exit;
815         }
816
817         /* Extract either manufacturer or BOFM modified
818          * MAC address.
819          */
820         if (qdev->flash.flash_params_8000.data_type1 == 2)
821                 memcpy(mac_addr,
822                         qdev->flash.flash_params_8000.mac_addr1,
823                         qdev->ndev->addr_len);
824         else
825                 memcpy(mac_addr,
826                         qdev->flash.flash_params_8000.mac_addr,
827                         qdev->ndev->addr_len);
828
829         if (!is_valid_ether_addr(mac_addr)) {
830                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831                 status = -EINVAL;
832                 goto exit;
833         }
834
835         memcpy(qdev->ndev->dev_addr,
836                 mac_addr,
837                 qdev->ndev->addr_len);
838
839 exit:
840         ql_sem_unlock(qdev, SEM_FLASH_MASK);
841         return status;
842 }
843
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845 {
846         int i;
847         int status;
848         __le32 *p = (__le32 *)&qdev->flash;
849         u32 offset = 0;
850         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851
852         /* Second function's parameters follow the first
853          * function's.
854          */
855         if (qdev->port)
856                 offset = size;
857
858         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859                 return -ETIMEDOUT;
860
861         for (i = 0; i < size; i++, p++) {
862                 status = ql_read_flash_word(qdev, i+offset, p);
863                 if (status) {
864                         netif_err(qdev, ifup, qdev->ndev,
865                                   "Error reading flash.\n");
866                         goto exit;
867                 }
868
869         }
870
871         status = ql_validate_flash(qdev,
872                         sizeof(struct flash_params_8012) / sizeof(u16),
873                         "8012");
874         if (status) {
875                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876                 status = -EINVAL;
877                 goto exit;
878         }
879
880         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881                 status = -EINVAL;
882                 goto exit;
883         }
884
885         memcpy(qdev->ndev->dev_addr,
886                 qdev->flash.flash_params_8012.mac_addr,
887                 qdev->ndev->addr_len);
888
889 exit:
890         ql_sem_unlock(qdev, SEM_FLASH_MASK);
891         return status;
892 }
893
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895  * register pair.  Each read/write requires us to wait for the ready
896  * bit before reading/writing the data.
897  */
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 {
900         int status;
901         /* wait for reg to come ready */
902         status = ql_wait_reg_rdy(qdev,
903                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904         if (status)
905                 return status;
906         /* write the data to the data reg */
907         ql_write32(qdev, XGMAC_DATA, data);
908         /* trigger the write */
909         ql_write32(qdev, XGMAC_ADDR, reg);
910         return status;
911 }
912
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914  * register pair.  Each read/write requires us to wait for the ready
915  * bit before reading/writing the data.
916  */
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 {
919         int status = 0;
920         /* wait for reg to come ready */
921         status = ql_wait_reg_rdy(qdev,
922                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923         if (status)
924                 goto exit;
925         /* set up for reg read */
926         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927         /* wait for reg to come ready */
928         status = ql_wait_reg_rdy(qdev,
929                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930         if (status)
931                 goto exit;
932         /* get the data */
933         *data = ql_read32(qdev, XGMAC_DATA);
934 exit:
935         return status;
936 }
937
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 {
941         int status = 0;
942         u32 hi = 0;
943         u32 lo = 0;
944
945         status = ql_read_xgmac_reg(qdev, reg, &lo);
946         if (status)
947                 goto exit;
948
949         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950         if (status)
951                 goto exit;
952
953         *data = (u64) lo | ((u64) hi << 32);
954
955 exit:
956         return status;
957 }
958
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
960 {
961         int status;
962         /*
963          * Get MPI firmware version for driver banner
964          * and ethool info.
965          */
966         status = ql_mb_about_fw(qdev);
967         if (status)
968                 goto exit;
969         status = ql_mb_get_fw_state(qdev);
970         if (status)
971                 goto exit;
972         /* Wake up a worker to get/set the TX/RX frame sizes. */
973         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974 exit:
975         return status;
976 }
977
978 /* Take the MAC Core out of reset.
979  * Enable statistics counting.
980  * Take the transmitter/receiver out of reset.
981  * This functionality may be done in the MPI firmware at a
982  * later date.
983  */
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
985 {
986         int status = 0;
987         u32 data;
988
989         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990                 /* Another function has the semaphore, so
991                  * wait for the port init bit to come ready.
992                  */
993                 netif_info(qdev, link, qdev->ndev,
994                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996                 if (status) {
997                         netif_crit(qdev, link, qdev->ndev,
998                                    "Port initialize timed out.\n");
999                 }
1000                 return status;
1001         }
1002
1003         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004         /* Set the core reset. */
1005         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006         if (status)
1007                 goto end;
1008         data |= GLOBAL_CFG_RESET;
1009         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010         if (status)
1011                 goto end;
1012
1013         /* Clear the core reset and turn on jumbo for receiver. */
1014         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1015         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1016         data |= GLOBAL_CFG_TX_STAT_EN;
1017         data |= GLOBAL_CFG_RX_STAT_EN;
1018         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019         if (status)
1020                 goto end;
1021
1022         /* Enable transmitter, and clear it's reset. */
1023         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024         if (status)
1025                 goto end;
1026         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1027         data |= TX_CFG_EN;      /* Enable the transmitter. */
1028         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029         if (status)
1030                 goto end;
1031
1032         /* Enable receiver and clear it's reset. */
1033         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034         if (status)
1035                 goto end;
1036         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1037         data |= RX_CFG_EN;      /* Enable the receiver. */
1038         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039         if (status)
1040                 goto end;
1041
1042         /* Turn on jumbo. */
1043         status =
1044             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045         if (status)
1046                 goto end;
1047         status =
1048             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049         if (status)
1050                 goto end;
1051
1052         /* Signal to the world that the port is enabled.        */
1053         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054 end:
1055         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056         return status;
1057 }
1058
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060 {
1061         return PAGE_SIZE << qdev->lbq_buf_order;
1062 }
1063
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066 {
1067         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068         rx_ring->lbq_curr_idx++;
1069         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070                 rx_ring->lbq_curr_idx = 0;
1071         rx_ring->lbq_free_cnt++;
1072         return lbq_desc;
1073 }
1074
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076                 struct rx_ring *rx_ring)
1077 {
1078         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080         pci_dma_sync_single_for_cpu(qdev->pdev,
1081                                         dma_unmap_addr(lbq_desc, mapaddr),
1082                                     rx_ring->lbq_buf_size,
1083                                         PCI_DMA_FROMDEVICE);
1084
1085         /* If it's the last chunk of our master page then
1086          * we unmap it.
1087          */
1088         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089                                         == ql_lbq_block_size(qdev))
1090                 pci_unmap_page(qdev->pdev,
1091                                 lbq_desc->p.pg_chunk.map,
1092                                 ql_lbq_block_size(qdev),
1093                                 PCI_DMA_FROMDEVICE);
1094         return lbq_desc;
1095 }
1096
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099 {
1100         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101         rx_ring->sbq_curr_idx++;
1102         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103                 rx_ring->sbq_curr_idx = 0;
1104         rx_ring->sbq_free_cnt++;
1105         return sbq_desc;
1106 }
1107
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1110 {
1111         rx_ring->cnsmr_idx++;
1112         rx_ring->curr_entry++;
1113         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114                 rx_ring->cnsmr_idx = 0;
1115                 rx_ring->curr_entry = rx_ring->cq_base;
1116         }
1117 }
1118
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120 {
1121         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 }
1123
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125                                                 struct bq_desc *lbq_desc)
1126 {
1127         if (!rx_ring->pg_chunk.page) {
1128                 u64 map;
1129                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130                                                 GFP_ATOMIC,
1131                                                 qdev->lbq_buf_order);
1132                 if (unlikely(!rx_ring->pg_chunk.page)) {
1133                         netif_err(qdev, drv, qdev->ndev,
1134                                   "page allocation failed.\n");
1135                         return -ENOMEM;
1136                 }
1137                 rx_ring->pg_chunk.offset = 0;
1138                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139                                         0, ql_lbq_block_size(qdev),
1140                                         PCI_DMA_FROMDEVICE);
1141                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142                         __free_pages(rx_ring->pg_chunk.page,
1143                                         qdev->lbq_buf_order);
1144                         netif_err(qdev, drv, qdev->ndev,
1145                                   "PCI mapping failed.\n");
1146                         return -ENOMEM;
1147                 }
1148                 rx_ring->pg_chunk.map = map;
1149                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150         }
1151
1152         /* Copy the current master pg_chunk info
1153          * to the current descriptor.
1154          */
1155         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157         /* Adjust the master page chunk for next
1158          * buffer get.
1159          */
1160         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162                 rx_ring->pg_chunk.page = NULL;
1163                 lbq_desc->p.pg_chunk.last_flag = 1;
1164         } else {
1165                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166                 get_page(rx_ring->pg_chunk.page);
1167                 lbq_desc->p.pg_chunk.last_flag = 0;
1168         }
1169         return 0;
1170 }
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173 {
1174         u32 clean_idx = rx_ring->lbq_clean_idx;
1175         u32 start_idx = clean_idx;
1176         struct bq_desc *lbq_desc;
1177         u64 map;
1178         int i;
1179
1180         while (rx_ring->lbq_free_cnt > 32) {
1181                 for (i = 0; i < 16; i++) {
1182                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                                      "lbq: try cleaning clean_idx = %d.\n",
1184                                      clean_idx);
1185                         lbq_desc = &rx_ring->lbq[clean_idx];
1186                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187                                 netif_err(qdev, ifup, qdev->ndev,
1188                                           "Could not get a page chunk.\n");
1189                                 return;
1190                         }
1191
1192                         map = lbq_desc->p.pg_chunk.map +
1193                                 lbq_desc->p.pg_chunk.offset;
1194                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195                         dma_unmap_len_set(lbq_desc, maplen,
1196                                         rx_ring->lbq_buf_size);
1197                                 *lbq_desc->addr = cpu_to_le64(map);
1198
1199                         pci_dma_sync_single_for_device(qdev->pdev, map,
1200                                                 rx_ring->lbq_buf_size,
1201                                                 PCI_DMA_FROMDEVICE);
1202                         clean_idx++;
1203                         if (clean_idx == rx_ring->lbq_len)
1204                                 clean_idx = 0;
1205                 }
1206
1207                 rx_ring->lbq_clean_idx = clean_idx;
1208                 rx_ring->lbq_prod_idx += 16;
1209                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210                         rx_ring->lbq_prod_idx = 0;
1211                 rx_ring->lbq_free_cnt -= 16;
1212         }
1213
1214         if (start_idx != clean_idx) {
1215                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216                              "lbq: updating prod idx = %d.\n",
1217                              rx_ring->lbq_prod_idx);
1218                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219                                 rx_ring->lbq_prod_idx_db_reg);
1220         }
1221 }
1222
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225 {
1226         u32 clean_idx = rx_ring->sbq_clean_idx;
1227         u32 start_idx = clean_idx;
1228         struct bq_desc *sbq_desc;
1229         u64 map;
1230         int i;
1231
1232         while (rx_ring->sbq_free_cnt > 16) {
1233                 for (i = 0; i < 16; i++) {
1234                         sbq_desc = &rx_ring->sbq[clean_idx];
1235                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236                                      "sbq: try cleaning clean_idx = %d.\n",
1237                                      clean_idx);
1238                         if (sbq_desc->p.skb == NULL) {
1239                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1240                                              qdev->ndev,
1241                                              "sbq: getting new skb for index %d.\n",
1242                                              sbq_desc->index);
1243                                 sbq_desc->p.skb =
1244                                     netdev_alloc_skb(qdev->ndev,
1245                                                      SMALL_BUFFER_SIZE);
1246                                 if (sbq_desc->p.skb == NULL) {
1247                                         netif_err(qdev, probe, qdev->ndev,
1248                                                   "Couldn't get an skb.\n");
1249                                         rx_ring->sbq_clean_idx = clean_idx;
1250                                         return;
1251                                 }
1252                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253                                 map = pci_map_single(qdev->pdev,
1254                                                      sbq_desc->p.skb->data,
1255                                                      rx_ring->sbq_buf_size,
1256                                                      PCI_DMA_FROMDEVICE);
1257                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258                                         netif_err(qdev, ifup, qdev->ndev,
1259                                                   "PCI mapping failed.\n");
1260                                         rx_ring->sbq_clean_idx = clean_idx;
1261                                         dev_kfree_skb_any(sbq_desc->p.skb);
1262                                         sbq_desc->p.skb = NULL;
1263                                         return;
1264                                 }
1265                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266                                 dma_unmap_len_set(sbq_desc, maplen,
1267                                                   rx_ring->sbq_buf_size);
1268                                 *sbq_desc->addr = cpu_to_le64(map);
1269                         }
1270
1271                         clean_idx++;
1272                         if (clean_idx == rx_ring->sbq_len)
1273                                 clean_idx = 0;
1274                 }
1275                 rx_ring->sbq_clean_idx = clean_idx;
1276                 rx_ring->sbq_prod_idx += 16;
1277                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278                         rx_ring->sbq_prod_idx = 0;
1279                 rx_ring->sbq_free_cnt -= 16;
1280         }
1281
1282         if (start_idx != clean_idx) {
1283                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284                              "sbq: updating prod idx = %d.\n",
1285                              rx_ring->sbq_prod_idx);
1286                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287                                 rx_ring->sbq_prod_idx_db_reg);
1288         }
1289 }
1290
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292                                     struct rx_ring *rx_ring)
1293 {
1294         ql_update_sbq(qdev, rx_ring);
1295         ql_update_lbq(qdev, rx_ring);
1296 }
1297
1298 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1299  * fails at some stage, or from the interrupt when a tx completes.
1300  */
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302                           struct tx_ring_desc *tx_ring_desc, int mapped)
1303 {
1304         int i;
1305         for (i = 0; i < mapped; i++) {
1306                 if (i == 0 || (i == 7 && mapped > 7)) {
1307                         /*
1308                          * Unmap the skb->data area, or the
1309                          * external sglist (AKA the Outbound
1310                          * Address List (OAL)).
1311                          * If its the zeroeth element, then it's
1312                          * the skb->data area.  If it's the 7th
1313                          * element and there is more than 6 frags,
1314                          * then its an OAL.
1315                          */
1316                         if (i == 7) {
1317                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1318                                              qdev->ndev,
1319                                              "unmapping OAL area.\n");
1320                         }
1321                         pci_unmap_single(qdev->pdev,
1322                                          dma_unmap_addr(&tx_ring_desc->map[i],
1323                                                         mapaddr),
1324                                          dma_unmap_len(&tx_ring_desc->map[i],
1325                                                        maplen),
1326                                          PCI_DMA_TODEVICE);
1327                 } else {
1328                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329                                      "unmapping frag %d.\n", i);
1330                         pci_unmap_page(qdev->pdev,
1331                                        dma_unmap_addr(&tx_ring_desc->map[i],
1332                                                       mapaddr),
1333                                        dma_unmap_len(&tx_ring_desc->map[i],
1334                                                      maplen), PCI_DMA_TODEVICE);
1335                 }
1336         }
1337
1338 }
1339
1340 /* Map the buffers for this transmit.  This will return
1341  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342  */
1343 static int ql_map_send(struct ql_adapter *qdev,
1344                        struct ob_mac_iocb_req *mac_iocb_ptr,
1345                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346 {
1347         int len = skb_headlen(skb);
1348         dma_addr_t map;
1349         int frag_idx, err, map_idx = 0;
1350         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351         int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353         if (frag_cnt) {
1354                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355                              "frag_cnt = %d.\n", frag_cnt);
1356         }
1357         /*
1358          * Map the skb buffer first.
1359          */
1360         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362         err = pci_dma_mapping_error(qdev->pdev, map);
1363         if (err) {
1364                 netif_err(qdev, tx_queued, qdev->ndev,
1365                           "PCI mapping failed with error: %d\n", err);
1366
1367                 return NETDEV_TX_BUSY;
1368         }
1369
1370         tbd->len = cpu_to_le32(len);
1371         tbd->addr = cpu_to_le64(map);
1372         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374         map_idx++;
1375
1376         /*
1377          * This loop fills the remainder of the 8 address descriptors
1378          * in the IOCB.  If there are more than 7 fragments, then the
1379          * eighth address desc will point to an external list (OAL).
1380          * When this happens, the remainder of the frags will be stored
1381          * in this list.
1382          */
1383         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385                 tbd++;
1386                 if (frag_idx == 6 && frag_cnt > 7) {
1387                         /* Let's tack on an sglist.
1388                          * Our control block will now
1389                          * look like this:
1390                          * iocb->seg[0] = skb->data
1391                          * iocb->seg[1] = frag[0]
1392                          * iocb->seg[2] = frag[1]
1393                          * iocb->seg[3] = frag[2]
1394                          * iocb->seg[4] = frag[3]
1395                          * iocb->seg[5] = frag[4]
1396                          * iocb->seg[6] = frag[5]
1397                          * iocb->seg[7] = ptr to OAL (external sglist)
1398                          * oal->seg[0] = frag[6]
1399                          * oal->seg[1] = frag[7]
1400                          * oal->seg[2] = frag[8]
1401                          * oal->seg[3] = frag[9]
1402                          * oal->seg[4] = frag[10]
1403                          *      etc...
1404                          */
1405                         /* Tack on the OAL in the eighth segment of IOCB. */
1406                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407                                              sizeof(struct oal),
1408                                              PCI_DMA_TODEVICE);
1409                         err = pci_dma_mapping_error(qdev->pdev, map);
1410                         if (err) {
1411                                 netif_err(qdev, tx_queued, qdev->ndev,
1412                                           "PCI mapping outbound address list with error: %d\n",
1413                                           err);
1414                                 goto map_error;
1415                         }
1416
1417                         tbd->addr = cpu_to_le64(map);
1418                         /*
1419                          * The length is the number of fragments
1420                          * that remain to be mapped times the length
1421                          * of our sglist (OAL).
1422                          */
1423                         tbd->len =
1424                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1425                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1426                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427                                            map);
1428                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429                                           sizeof(struct oal));
1430                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431                         map_idx++;
1432                 }
1433
1434                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1435                                        DMA_TO_DEVICE);
1436
1437                 err = dma_mapping_error(&qdev->pdev->dev, map);
1438                 if (err) {
1439                         netif_err(qdev, tx_queued, qdev->ndev,
1440                                   "PCI mapping frags failed with error: %d.\n",
1441                                   err);
1442                         goto map_error;
1443                 }
1444
1445                 tbd->addr = cpu_to_le64(map);
1446                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449                                   skb_frag_size(frag));
1450
1451         }
1452         /* Save the number of segments we've mapped. */
1453         tx_ring_desc->map_cnt = map_idx;
1454         /* Terminate the last segment. */
1455         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456         return NETDEV_TX_OK;
1457
1458 map_error:
1459         /*
1460          * If the first frag mapping failed, then i will be zero.
1461          * This causes the unmap of the skb->data area.  Otherwise
1462          * we pass in the number of frags that mapped successfully
1463          * so they can be umapped.
1464          */
1465         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466         return NETDEV_TX_BUSY;
1467 }
1468
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471                                         struct rx_ring *rx_ring,
1472                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                         u32 length,
1474                                         u16 vlan_id)
1475 {
1476         struct sk_buff *skb;
1477         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478         struct napi_struct *napi = &rx_ring->napi;
1479
1480         napi->dev = qdev->ndev;
1481
1482         skb = napi_get_frags(napi);
1483         if (!skb) {
1484                 netif_err(qdev, drv, qdev->ndev,
1485                           "Couldn't get an skb, exiting.\n");
1486                 rx_ring->rx_dropped++;
1487                 put_page(lbq_desc->p.pg_chunk.page);
1488                 return;
1489         }
1490         prefetch(lbq_desc->p.pg_chunk.va);
1491         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492                              lbq_desc->p.pg_chunk.page,
1493                              lbq_desc->p.pg_chunk.offset,
1494                              length);
1495
1496         skb->len += length;
1497         skb->data_len += length;
1498         skb->truesize += length;
1499         skb_shinfo(skb)->nr_frags++;
1500
1501         rx_ring->rx_packets++;
1502         rx_ring->rx_bytes += length;
1503         skb->ip_summed = CHECKSUM_UNNECESSARY;
1504         skb_record_rx_queue(skb, rx_ring->cq_id);
1505         if (vlan_id != 0xffff)
1506                 __vlan_hwaccel_put_tag(skb, vlan_id);
1507         napi_gro_frags(napi);
1508 }
1509
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512                                         struct rx_ring *rx_ring,
1513                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1514                                         u32 length,
1515                                         u16 vlan_id)
1516 {
1517         struct net_device *ndev = qdev->ndev;
1518         struct sk_buff *skb = NULL;
1519         void *addr;
1520         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521         struct napi_struct *napi = &rx_ring->napi;
1522
1523         skb = netdev_alloc_skb(ndev, length);
1524         if (!skb) {
1525                 netif_err(qdev, drv, qdev->ndev,
1526                           "Couldn't get an skb, need to unwind!.\n");
1527                 rx_ring->rx_dropped++;
1528                 put_page(lbq_desc->p.pg_chunk.page);
1529                 return;
1530         }
1531
1532         addr = lbq_desc->p.pg_chunk.va;
1533         prefetch(addr);
1534
1535
1536         /* Frame error, so drop the packet. */
1537         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538                 netif_info(qdev, drv, qdev->ndev,
1539                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540                 rx_ring->rx_errors++;
1541                 goto err_out;
1542         }
1543
1544         /* The max framesize filter on this chip is set higher than
1545          * MTU since FCoE uses 2k frames.
1546          */
1547         if (skb->len > ndev->mtu + ETH_HLEN) {
1548                 netif_err(qdev, drv, qdev->ndev,
1549                           "Segment too small, dropping.\n");
1550                 rx_ring->rx_dropped++;
1551                 goto err_out;
1552         }
1553         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556                      length);
1557         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559                                 length-ETH_HLEN);
1560         skb->len += length-ETH_HLEN;
1561         skb->data_len += length-ETH_HLEN;
1562         skb->truesize += length-ETH_HLEN;
1563
1564         rx_ring->rx_packets++;
1565         rx_ring->rx_bytes += skb->len;
1566         skb->protocol = eth_type_trans(skb, ndev);
1567         skb_checksum_none_assert(skb);
1568
1569         if ((ndev->features & NETIF_F_RXCSUM) &&
1570                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571                 /* TCP frame. */
1572                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574                                      "TCP checksum done!\n");
1575                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1576                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578                         /* Unfragmented ipv4 UDP frame. */
1579                         struct iphdr *iph = (struct iphdr *) skb->data;
1580                         if (!(iph->frag_off &
1581                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1584                                              qdev->ndev,
1585                                              "TCP checksum done!\n");
1586                         }
1587                 }
1588         }
1589
1590         skb_record_rx_queue(skb, rx_ring->cq_id);
1591         if (vlan_id != 0xffff)
1592                 __vlan_hwaccel_put_tag(skb, vlan_id);
1593         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594                 napi_gro_receive(napi, skb);
1595         else
1596                 netif_receive_skb(skb);
1597         return;
1598 err_out:
1599         dev_kfree_skb_any(skb);
1600         put_page(lbq_desc->p.pg_chunk.page);
1601 }
1602
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605                                         struct rx_ring *rx_ring,
1606                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1607                                         u32 length,
1608                                         u16 vlan_id)
1609 {
1610         struct net_device *ndev = qdev->ndev;
1611         struct sk_buff *skb = NULL;
1612         struct sk_buff *new_skb = NULL;
1613         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615         skb = sbq_desc->p.skb;
1616         /* Allocate new_skb and copy */
1617         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618         if (new_skb == NULL) {
1619                 netif_err(qdev, probe, qdev->ndev,
1620                           "No skb available, drop the packet.\n");
1621                 rx_ring->rx_dropped++;
1622                 return;
1623         }
1624         skb_reserve(new_skb, NET_IP_ALIGN);
1625         memcpy(skb_put(new_skb, length), skb->data, length);
1626         skb = new_skb;
1627
1628         /* Frame error, so drop the packet. */
1629         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1630                 netif_info(qdev, drv, qdev->ndev,
1631                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1632                 dev_kfree_skb_any(skb);
1633                 rx_ring->rx_errors++;
1634                 return;
1635         }
1636
1637         /* loopback self test for ethtool */
1638         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639                 ql_check_lb_frame(qdev, skb);
1640                 dev_kfree_skb_any(skb);
1641                 return;
1642         }
1643
1644         /* The max framesize filter on this chip is set higher than
1645          * MTU since FCoE uses 2k frames.
1646          */
1647         if (skb->len > ndev->mtu + ETH_HLEN) {
1648                 dev_kfree_skb_any(skb);
1649                 rx_ring->rx_dropped++;
1650                 return;
1651         }
1652
1653         prefetch(skb->data);
1654         skb->dev = ndev;
1655         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657                              "%s Multicast.\n",
1658                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1664         }
1665         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667                              "Promiscuous Packet.\n");
1668
1669         rx_ring->rx_packets++;
1670         rx_ring->rx_bytes += skb->len;
1671         skb->protocol = eth_type_trans(skb, ndev);
1672         skb_checksum_none_assert(skb);
1673
1674         /* If rx checksum is on, and there are no
1675          * csum or frame errors.
1676          */
1677         if ((ndev->features & NETIF_F_RXCSUM) &&
1678                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679                 /* TCP frame. */
1680                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682                                      "TCP checksum done!\n");
1683                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1684                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686                         /* Unfragmented ipv4 UDP frame. */
1687                         struct iphdr *iph = (struct iphdr *) skb->data;
1688                         if (!(iph->frag_off &
1689                                 ntohs(IP_MF|IP_OFFSET))) {
1690                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1692                                              qdev->ndev,
1693                                              "TCP checksum done!\n");
1694                         }
1695                 }
1696         }
1697
1698         skb_record_rx_queue(skb, rx_ring->cq_id);
1699         if (vlan_id != 0xffff)
1700                 __vlan_hwaccel_put_tag(skb, vlan_id);
1701         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702                 napi_gro_receive(&rx_ring->napi, skb);
1703         else
1704                 netif_receive_skb(skb);
1705 }
1706
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1708 {
1709         void *temp_addr = skb->data;
1710
1711         /* Undo the skb_reserve(skb,32) we did before
1712          * giving to hardware, and realign data on
1713          * a 2-byte boundary.
1714          */
1715         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717         skb_copy_to_linear_data(skb, temp_addr,
1718                 (unsigned int)len);
1719 }
1720
1721 /*
1722  * This function builds an skb for the given inbound
1723  * completion.  It will be rewritten for readability in the near
1724  * future, but for not it works well.
1725  */
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727                                        struct rx_ring *rx_ring,
1728                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1729 {
1730         struct bq_desc *lbq_desc;
1731         struct bq_desc *sbq_desc;
1732         struct sk_buff *skb = NULL;
1733         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736         /*
1737          * Handle the header buffer if present.
1738          */
1739         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742                              "Header of %d bytes in small buffer.\n", hdr_len);
1743                 /*
1744                  * Headers fit nicely into a small buffer.
1745                  */
1746                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747                 pci_unmap_single(qdev->pdev,
1748                                 dma_unmap_addr(sbq_desc, mapaddr),
1749                                 dma_unmap_len(sbq_desc, maplen),
1750                                 PCI_DMA_FROMDEVICE);
1751                 skb = sbq_desc->p.skb;
1752                 ql_realign_skb(skb, hdr_len);
1753                 skb_put(skb, hdr_len);
1754                 sbq_desc->p.skb = NULL;
1755         }
1756
1757         /*
1758          * Handle the data buffer(s).
1759          */
1760         if (unlikely(!length)) {        /* Is there data too? */
1761                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762                              "No Data buffer in this packet.\n");
1763                 return skb;
1764         }
1765
1766         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769                                      "Headers in small, data of %d bytes in small, combine them.\n",
1770                                      length);
1771                         /*
1772                          * Data is less than small buffer size so it's
1773                          * stuffed in a small buffer.
1774                          * For this case we append the data
1775                          * from the "data" small buffer to the "header" small
1776                          * buffer.
1777                          */
1778                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1779                         pci_dma_sync_single_for_cpu(qdev->pdev,
1780                                                     dma_unmap_addr
1781                                                     (sbq_desc, mapaddr),
1782                                                     dma_unmap_len
1783                                                     (sbq_desc, maplen),
1784                                                     PCI_DMA_FROMDEVICE);
1785                         memcpy(skb_put(skb, length),
1786                                sbq_desc->p.skb->data, length);
1787                         pci_dma_sync_single_for_device(qdev->pdev,
1788                                                        dma_unmap_addr
1789                                                        (sbq_desc,
1790                                                         mapaddr),
1791                                                        dma_unmap_len
1792                                                        (sbq_desc,
1793                                                         maplen),
1794                                                        PCI_DMA_FROMDEVICE);
1795                 } else {
1796                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797                                      "%d bytes in a single small buffer.\n",
1798                                      length);
1799                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1800                         skb = sbq_desc->p.skb;
1801                         ql_realign_skb(skb, length);
1802                         skb_put(skb, length);
1803                         pci_unmap_single(qdev->pdev,
1804                                          dma_unmap_addr(sbq_desc,
1805                                                         mapaddr),
1806                                          dma_unmap_len(sbq_desc,
1807                                                        maplen),
1808                                          PCI_DMA_FROMDEVICE);
1809                         sbq_desc->p.skb = NULL;
1810                 }
1811         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814                                      "Header in small, %d bytes in large. Chain large to small!\n",
1815                                      length);
1816                         /*
1817                          * The data is in a single large buffer.  We
1818                          * chain it to the header buffer's skb and let
1819                          * it rip.
1820                          */
1821                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1824                                      lbq_desc->p.pg_chunk.offset, length);
1825                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826                                                 lbq_desc->p.pg_chunk.offset,
1827                                                 length);
1828                         skb->len += length;
1829                         skb->data_len += length;
1830                         skb->truesize += length;
1831                 } else {
1832                         /*
1833                          * The headers and data are in a single large buffer. We
1834                          * copy it to a new skb and let it go. This can happen with
1835                          * jumbo mtu on a non-TCP/UDP frame.
1836                          */
1837                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838                         skb = netdev_alloc_skb(qdev->ndev, length);
1839                         if (skb == NULL) {
1840                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841                                              "No skb available, drop the packet.\n");
1842                                 return NULL;
1843                         }
1844                         pci_unmap_page(qdev->pdev,
1845                                        dma_unmap_addr(lbq_desc,
1846                                                       mapaddr),
1847                                        dma_unmap_len(lbq_desc, maplen),
1848                                        PCI_DMA_FROMDEVICE);
1849                         skb_reserve(skb, NET_IP_ALIGN);
1850                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852                                      length);
1853                         skb_fill_page_desc(skb, 0,
1854                                                 lbq_desc->p.pg_chunk.page,
1855                                                 lbq_desc->p.pg_chunk.offset,
1856                                                 length);
1857                         skb->len += length;
1858                         skb->data_len += length;
1859                         skb->truesize += length;
1860                         length -= length;
1861                         __pskb_pull_tail(skb,
1862                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863                                 VLAN_ETH_HLEN : ETH_HLEN);
1864                 }
1865         } else {
1866                 /*
1867                  * The data is in a chain of large buffers
1868                  * pointed to by a small buffer.  We loop
1869                  * thru and chain them to the our small header
1870                  * buffer's skb.
1871                  * frags:  There are 18 max frags and our small
1872                  *         buffer will hold 32 of them. The thing is,
1873                  *         we'll use 3 max for our 9000 byte jumbo
1874                  *         frames.  If the MTU goes up we could
1875                  *          eventually be in trouble.
1876                  */
1877                 int size, i = 0;
1878                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879                 pci_unmap_single(qdev->pdev,
1880                                  dma_unmap_addr(sbq_desc, mapaddr),
1881                                  dma_unmap_len(sbq_desc, maplen),
1882                                  PCI_DMA_FROMDEVICE);
1883                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884                         /*
1885                          * This is an non TCP/UDP IP frame, so
1886                          * the headers aren't split into a small
1887                          * buffer.  We have to use the small buffer
1888                          * that contains our sg list as our skb to
1889                          * send upstairs. Copy the sg list here to
1890                          * a local buffer and use it to find the
1891                          * pages to chain.
1892                          */
1893                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894                                      "%d bytes of headers & data in chain of large.\n",
1895                                      length);
1896                         skb = sbq_desc->p.skb;
1897                         sbq_desc->p.skb = NULL;
1898                         skb_reserve(skb, NET_IP_ALIGN);
1899                 }
1900                 while (length > 0) {
1901                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902                         size = (length < rx_ring->lbq_buf_size) ? length :
1903                                 rx_ring->lbq_buf_size;
1904
1905                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906                                      "Adding page %d to skb for %d bytes.\n",
1907                                      i, size);
1908                         skb_fill_page_desc(skb, i,
1909                                                 lbq_desc->p.pg_chunk.page,
1910                                                 lbq_desc->p.pg_chunk.offset,
1911                                                 size);
1912                         skb->len += size;
1913                         skb->data_len += size;
1914                         skb->truesize += size;
1915                         length -= size;
1916                         i++;
1917                 }
1918                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919                                 VLAN_ETH_HLEN : ETH_HLEN);
1920         }
1921         return skb;
1922 }
1923
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926                                    struct rx_ring *rx_ring,
1927                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1928                                    u16 vlan_id)
1929 {
1930         struct net_device *ndev = qdev->ndev;
1931         struct sk_buff *skb = NULL;
1932
1933         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936         if (unlikely(!skb)) {
1937                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938                              "No skb available, drop packet.\n");
1939                 rx_ring->rx_dropped++;
1940                 return;
1941         }
1942
1943         /* Frame error, so drop the packet. */
1944         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945                 netif_info(qdev, drv, qdev->ndev,
1946                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1947                 dev_kfree_skb_any(skb);
1948                 rx_ring->rx_errors++;
1949                 return;
1950         }
1951
1952         /* The max framesize filter on this chip is set higher than
1953          * MTU since FCoE uses 2k frames.
1954          */
1955         if (skb->len > ndev->mtu + ETH_HLEN) {
1956                 dev_kfree_skb_any(skb);
1957                 rx_ring->rx_dropped++;
1958                 return;
1959         }
1960
1961         /* loopback self test for ethtool */
1962         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963                 ql_check_lb_frame(qdev, skb);
1964                 dev_kfree_skb_any(skb);
1965                 return;
1966         }
1967
1968         prefetch(skb->data);
1969         skb->dev = ndev;
1970         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1971                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1978                 rx_ring->rx_multicast++;
1979         }
1980         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1981                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982                              "Promiscuous Packet.\n");
1983         }
1984
1985         skb->protocol = eth_type_trans(skb, ndev);
1986         skb_checksum_none_assert(skb);
1987
1988         /* If rx checksum is on, and there are no
1989          * csum or frame errors.
1990          */
1991         if ((ndev->features & NETIF_F_RXCSUM) &&
1992                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1993                 /* TCP frame. */
1994                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1995                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996                                      "TCP checksum done!\n");
1997                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1998                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000                 /* Unfragmented ipv4 UDP frame. */
2001                         struct iphdr *iph = (struct iphdr *) skb->data;
2002                         if (!(iph->frag_off &
2003                                 ntohs(IP_MF|IP_OFFSET))) {
2004                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2005                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006                                              "TCP checksum done!\n");
2007                         }
2008                 }
2009         }
2010
2011         rx_ring->rx_packets++;
2012         rx_ring->rx_bytes += skb->len;
2013         skb_record_rx_queue(skb, rx_ring->cq_id);
2014         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015                 __vlan_hwaccel_put_tag(skb, vlan_id);
2016         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017                 napi_gro_receive(&rx_ring->napi, skb);
2018         else
2019                 netif_receive_skb(skb);
2020 }
2021
2022 /* Process an inbound completion from an rx ring. */
2023 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024                                         struct rx_ring *rx_ring,
2025                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2026 {
2027         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2031
2032         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2033
2034         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035                 /* The data and headers are split into
2036                  * separate buffers.
2037                  */
2038                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2039                                                 vlan_id);
2040         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041                 /* The data fit in a single small buffer.
2042                  * Allocate a new skb, copy the data and
2043                  * return the buffer to the free pool.
2044                  */
2045                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2046                                                 length, vlan_id);
2047         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050                 /* TCP packet in a page chunk that's been checksummed.
2051                  * Tack it on to our GRO skb and let it go.
2052                  */
2053                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2054                                                 length, vlan_id);
2055         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056                 /* Non-TCP packet in a page chunk. Allocate an
2057                  * skb, tack it on frags, and send it up.
2058                  */
2059                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2060                                                 length, vlan_id);
2061         } else {
2062                 /* Non-TCP/UDP large frames that span multiple buffers
2063                  * can be processed corrrectly by the split frame logic.
2064                  */
2065                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066                                                 vlan_id);
2067         }
2068
2069         return (unsigned long)length;
2070 }
2071
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074                                    struct ob_mac_iocb_rsp *mac_rsp)
2075 {
2076         struct tx_ring *tx_ring;
2077         struct tx_ring_desc *tx_ring_desc;
2078
2079         QL_DUMP_OB_MAC_RSP(mac_rsp);
2080         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084         tx_ring->tx_packets++;
2085         dev_kfree_skb(tx_ring_desc->skb);
2086         tx_ring_desc->skb = NULL;
2087
2088         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089                                         OB_MAC_IOCB_RSP_S |
2090                                         OB_MAC_IOCB_RSP_L |
2091                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093                         netif_warn(qdev, tx_done, qdev->ndev,
2094                                    "Total descriptor length did not match transfer length.\n");
2095                 }
2096                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097                         netif_warn(qdev, tx_done, qdev->ndev,
2098                                    "Frame too short to be valid, not sent.\n");
2099                 }
2100                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101                         netif_warn(qdev, tx_done, qdev->ndev,
2102                                    "Frame too long, but sent anyway.\n");
2103                 }
2104                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105                         netif_warn(qdev, tx_done, qdev->ndev,
2106                                    "PCI backplane error. Frame not sent.\n");
2107                 }
2108         }
2109         atomic_inc(&tx_ring->tx_count);
2110 }
2111
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2114 {
2115         ql_link_off(qdev);
2116         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117 }
2118
2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2120 {
2121         ql_link_off(qdev);
2122         ql_disable_interrupts(qdev);
2123         /* Clear adapter up bit to signal the recovery
2124          * process that it shouldn't kill the reset worker
2125          * thread
2126          */
2127         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128         /* Set asic recovery bit to indicate reset process that we are
2129          * in fatal error recovery process rather than normal close
2130          */
2131         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2132         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2133 }
2134
2135 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2137 {
2138         switch (ib_ae_rsp->event) {
2139         case MGMT_ERR_EVENT:
2140                 netif_err(qdev, rx_err, qdev->ndev,
2141                           "Management Processor Fatal Error.\n");
2142                 ql_queue_fw_error(qdev);
2143                 return;
2144
2145         case CAM_LOOKUP_ERR_EVENT:
2146                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2148                 ql_queue_asic_error(qdev);
2149                 return;
2150
2151         case SOFT_ECC_ERROR_EVENT:
2152                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2153                 ql_queue_asic_error(qdev);
2154                 break;
2155
2156         case PCI_ERR_ANON_BUF_RD:
2157                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158                                         "anonymous buffers from rx_ring %d.\n",
2159                                         ib_ae_rsp->q_id);
2160                 ql_queue_asic_error(qdev);
2161                 break;
2162
2163         default:
2164                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2165                           ib_ae_rsp->event);
2166                 ql_queue_asic_error(qdev);
2167                 break;
2168         }
2169 }
2170
2171 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2172 {
2173         struct ql_adapter *qdev = rx_ring->qdev;
2174         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2175         struct ob_mac_iocb_rsp *net_rsp = NULL;
2176         int count = 0;
2177
2178         struct tx_ring *tx_ring;
2179         /* While there are entries in the completion queue. */
2180         while (prod != rx_ring->cnsmr_idx) {
2181
2182                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2185
2186                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2187                 rmb();
2188                 switch (net_rsp->opcode) {
2189
2190                 case OPCODE_OB_MAC_TSO_IOCB:
2191                 case OPCODE_OB_MAC_IOCB:
2192                         ql_process_mac_tx_intr(qdev, net_rsp);
2193                         break;
2194                 default:
2195                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197                                      net_rsp->opcode);
2198                 }
2199                 count++;
2200                 ql_update_cq(rx_ring);
2201                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2202         }
2203         if (!net_rsp)
2204                 return 0;
2205         ql_write_cq_idx(rx_ring);
2206         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2207         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2208                 if (atomic_read(&tx_ring->queue_stopped) &&
2209                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2210                         /*
2211                          * The queue got stopped because the tx_ring was full.
2212                          * Wake it up, because it's now at least 25% empty.
2213                          */
2214                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2215         }
2216
2217         return count;
2218 }
2219
2220 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2221 {
2222         struct ql_adapter *qdev = rx_ring->qdev;
2223         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224         struct ql_net_rsp_iocb *net_rsp;
2225         int count = 0;
2226
2227         /* While there are entries in the completion queue. */
2228         while (prod != rx_ring->cnsmr_idx) {
2229
2230                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2233
2234                 net_rsp = rx_ring->curr_entry;
2235                 rmb();
2236                 switch (net_rsp->opcode) {
2237                 case OPCODE_IB_MAC_IOCB:
2238                         ql_process_mac_rx_intr(qdev, rx_ring,
2239                                                (struct ib_mac_iocb_rsp *)
2240                                                net_rsp);
2241                         break;
2242
2243                 case OPCODE_IB_AE_IOCB:
2244                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2245                                                 net_rsp);
2246                         break;
2247                 default:
2248                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250                                      net_rsp->opcode);
2251                         break;
2252                 }
2253                 count++;
2254                 ql_update_cq(rx_ring);
2255                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256                 if (count == budget)
2257                         break;
2258         }
2259         ql_update_buffer_queues(qdev, rx_ring);
2260         ql_write_cq_idx(rx_ring);
2261         return count;
2262 }
2263
2264 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2265 {
2266         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267         struct ql_adapter *qdev = rx_ring->qdev;
2268         struct rx_ring *trx_ring;
2269         int i, work_done = 0;
2270         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2271
2272         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2274
2275         /* Service the TX rings first.  They start
2276          * right after the RSS rings. */
2277         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278                 trx_ring = &qdev->rx_ring[i];
2279                 /* If this TX completion ring belongs to this vector and
2280                  * it's not empty then service it.
2281                  */
2282                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284                                         trx_ring->cnsmr_idx)) {
2285                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286                                      "%s: Servicing TX completion ring %d.\n",
2287                                      __func__, trx_ring->cq_id);
2288                         ql_clean_outbound_rx_ring(trx_ring);
2289                 }
2290         }
2291
2292         /*
2293          * Now service the RSS ring if it's active.
2294          */
2295         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296                                         rx_ring->cnsmr_idx) {
2297                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298                              "%s: Servicing RX completion ring %d.\n",
2299                              __func__, rx_ring->cq_id);
2300                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2301         }
2302
2303         if (work_done < budget) {
2304                 napi_complete(napi);
2305                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306         }
2307         return work_done;
2308 }
2309
2310 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2311 {
2312         struct ql_adapter *qdev = netdev_priv(ndev);
2313
2314         if (features & NETIF_F_HW_VLAN_RX) {
2315                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2316                              "Turning on VLAN in NIC_RCV_CFG.\n");
2317                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2318                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2319         } else {
2320                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2321                              "Turning off VLAN in NIC_RCV_CFG.\n");
2322                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323         }
2324 }
2325
2326 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2327 {
2328         /*
2329          * Since there is no support for separate rx/tx vlan accel
2330          * enable/disable make sure tx flag is always in same state as rx.
2331          */
2332         if (features & NETIF_F_HW_VLAN_RX)
2333                 features |= NETIF_F_HW_VLAN_TX;
2334         else
2335                 features &= ~NETIF_F_HW_VLAN_TX;
2336
2337         return features;
2338 }
2339
2340 static int qlge_set_features(struct net_device *ndev, u32 features)
2341 {
2342         u32 changed = ndev->features ^ features;
2343
2344         if (changed & NETIF_F_HW_VLAN_RX)
2345                 qlge_vlan_mode(ndev, features);
2346
2347         return 0;
2348 }
2349
2350 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2351 {
2352         u32 enable_bit = MAC_ADDR_E;
2353
2354         if (ql_set_mac_addr_reg
2355             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2356                 netif_err(qdev, ifup, qdev->ndev,
2357                           "Failed to init vlan address.\n");
2358         }
2359 }
2360
2361 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2362 {
2363         struct ql_adapter *qdev = netdev_priv(ndev);
2364         int status;
2365
2366         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367         if (status)
2368                 return;
2369
2370         __qlge_vlan_rx_add_vid(qdev, vid);
2371         set_bit(vid, qdev->active_vlans);
2372
2373         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2374 }
2375
2376 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2377 {
2378         u32 enable_bit = 0;
2379
2380         if (ql_set_mac_addr_reg
2381             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2382                 netif_err(qdev, ifup, qdev->ndev,
2383                           "Failed to clear vlan address.\n");
2384         }
2385 }
2386
2387 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2388 {
2389         struct ql_adapter *qdev = netdev_priv(ndev);
2390         int status;
2391
2392         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2393         if (status)
2394                 return;
2395
2396         __qlge_vlan_rx_kill_vid(qdev, vid);
2397         clear_bit(vid, qdev->active_vlans);
2398
2399         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2400 }
2401
2402 static void qlge_restore_vlan(struct ql_adapter *qdev)
2403 {
2404         int status;
2405         u16 vid;
2406
2407         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2408         if (status)
2409                 return;
2410
2411         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2412                 __qlge_vlan_rx_add_vid(qdev, vid);
2413
2414         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2415 }
2416
2417 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2418 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2419 {
2420         struct rx_ring *rx_ring = dev_id;
2421         napi_schedule(&rx_ring->napi);
2422         return IRQ_HANDLED;
2423 }
2424
2425 /* This handles a fatal error, MPI activity, and the default
2426  * rx_ring in an MSI-X multiple vector environment.
2427  * In MSI/Legacy environment it also process the rest of
2428  * the rx_rings.
2429  */
2430 static irqreturn_t qlge_isr(int irq, void *dev_id)
2431 {
2432         struct rx_ring *rx_ring = dev_id;
2433         struct ql_adapter *qdev = rx_ring->qdev;
2434         struct intr_context *intr_context = &qdev->intr_context[0];
2435         u32 var;
2436         int work_done = 0;
2437
2438         spin_lock(&qdev->hw_lock);
2439         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2440                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2441                              "Shared Interrupt, Not ours!\n");
2442                 spin_unlock(&qdev->hw_lock);
2443                 return IRQ_NONE;
2444         }
2445         spin_unlock(&qdev->hw_lock);
2446
2447         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2448
2449         /*
2450          * Check for fatal error.
2451          */
2452         if (var & STS_FE) {
2453                 ql_queue_asic_error(qdev);
2454                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2455                 var = ql_read32(qdev, ERR_STS);
2456                 netdev_err(qdev->ndev, "Resetting chip. "
2457                                         "Error Status Register = 0x%x\n", var);
2458                 return IRQ_HANDLED;
2459         }
2460
2461         /*
2462          * Check MPI processor activity.
2463          */
2464         if ((var & STS_PI) &&
2465                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2466                 /*
2467                  * We've got an async event or mailbox completion.
2468                  * Handle it and clear the source of the interrupt.
2469                  */
2470                 netif_err(qdev, intr, qdev->ndev,
2471                           "Got MPI processor interrupt.\n");
2472                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2473                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2474                 queue_delayed_work_on(smp_processor_id(),
2475                                 qdev->workqueue, &qdev->mpi_work, 0);
2476                 work_done++;
2477         }
2478
2479         /*
2480          * Get the bit-mask that shows the active queues for this
2481          * pass.  Compare it to the queues that this irq services
2482          * and call napi if there's a match.
2483          */
2484         var = ql_read32(qdev, ISR1);
2485         if (var & intr_context->irq_mask) {
2486                 netif_info(qdev, intr, qdev->ndev,
2487                            "Waking handler for rx_ring[0].\n");
2488                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2489                 napi_schedule(&rx_ring->napi);
2490                 work_done++;
2491         }
2492         ql_enable_completion_interrupt(qdev, intr_context->intr);
2493         return work_done ? IRQ_HANDLED : IRQ_NONE;
2494 }
2495
2496 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2497 {
2498
2499         if (skb_is_gso(skb)) {
2500                 int err;
2501                 if (skb_header_cloned(skb)) {
2502                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2503                         if (err)
2504                                 return err;
2505                 }
2506
2507                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2508                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2509                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2510                 mac_iocb_ptr->total_hdrs_len =
2511                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2512                 mac_iocb_ptr->net_trans_offset =
2513                     cpu_to_le16(skb_network_offset(skb) |
2514                                 skb_transport_offset(skb)
2515                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2516                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2517                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2518                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2519                         struct iphdr *iph = ip_hdr(skb);
2520                         iph->check = 0;
2521                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2522                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2523                                                                  iph->daddr, 0,
2524                                                                  IPPROTO_TCP,
2525                                                                  0);
2526                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2527                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2528                         tcp_hdr(skb)->check =
2529                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2530                                              &ipv6_hdr(skb)->daddr,
2531                                              0, IPPROTO_TCP, 0);
2532                 }
2533                 return 1;
2534         }
2535         return 0;
2536 }
2537
2538 static void ql_hw_csum_setup(struct sk_buff *skb,
2539                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2540 {
2541         int len;
2542         struct iphdr *iph = ip_hdr(skb);
2543         __sum16 *check;
2544         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2545         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2546         mac_iocb_ptr->net_trans_offset =
2547                 cpu_to_le16(skb_network_offset(skb) |
2548                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2549
2550         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2551         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2552         if (likely(iph->protocol == IPPROTO_TCP)) {
2553                 check = &(tcp_hdr(skb)->check);
2554                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2555                 mac_iocb_ptr->total_hdrs_len =
2556                     cpu_to_le16(skb_transport_offset(skb) +
2557                                 (tcp_hdr(skb)->doff << 2));
2558         } else {
2559                 check = &(udp_hdr(skb)->check);
2560                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2561                 mac_iocb_ptr->total_hdrs_len =
2562                     cpu_to_le16(skb_transport_offset(skb) +
2563                                 sizeof(struct udphdr));
2564         }
2565         *check = ~csum_tcpudp_magic(iph->saddr,
2566                                     iph->daddr, len, iph->protocol, 0);
2567 }
2568
2569 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2570 {
2571         struct tx_ring_desc *tx_ring_desc;
2572         struct ob_mac_iocb_req *mac_iocb_ptr;
2573         struct ql_adapter *qdev = netdev_priv(ndev);
2574         int tso;
2575         struct tx_ring *tx_ring;
2576         u32 tx_ring_idx = (u32) skb->queue_mapping;
2577
2578         tx_ring = &qdev->tx_ring[tx_ring_idx];
2579
2580         if (skb_padto(skb, ETH_ZLEN))
2581                 return NETDEV_TX_OK;
2582
2583         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2584                 netif_info(qdev, tx_queued, qdev->ndev,
2585                            "%s: shutting down tx queue %d du to lack of resources.\n",
2586                            __func__, tx_ring_idx);
2587                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2588                 atomic_inc(&tx_ring->queue_stopped);
2589                 tx_ring->tx_errors++;
2590                 return NETDEV_TX_BUSY;
2591         }
2592         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2593         mac_iocb_ptr = tx_ring_desc->queue_entry;
2594         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2595
2596         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2597         mac_iocb_ptr->tid = tx_ring_desc->index;
2598         /* We use the upper 32-bits to store the tx queue for this IO.
2599          * When we get the completion we can use it to establish the context.
2600          */
2601         mac_iocb_ptr->txq_idx = tx_ring_idx;
2602         tx_ring_desc->skb = skb;
2603
2604         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2605
2606         if (vlan_tx_tag_present(skb)) {
2607                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2608                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2609                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2610                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2611         }
2612         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2613         if (tso < 0) {
2614                 dev_kfree_skb_any(skb);
2615                 return NETDEV_TX_OK;
2616         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2617                 ql_hw_csum_setup(skb,
2618                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2619         }
2620         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2621                         NETDEV_TX_OK) {
2622                 netif_err(qdev, tx_queued, qdev->ndev,
2623                           "Could not map the segments.\n");
2624                 tx_ring->tx_errors++;
2625                 return NETDEV_TX_BUSY;
2626         }
2627         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2628         tx_ring->prod_idx++;
2629         if (tx_ring->prod_idx == tx_ring->wq_len)
2630                 tx_ring->prod_idx = 0;
2631         wmb();
2632
2633         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2634         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2635                      "tx queued, slot %d, len %d\n",
2636                      tx_ring->prod_idx, skb->len);
2637
2638         atomic_dec(&tx_ring->tx_count);
2639         return NETDEV_TX_OK;
2640 }
2641
2642
2643 static void ql_free_shadow_space(struct ql_adapter *qdev)
2644 {
2645         if (qdev->rx_ring_shadow_reg_area) {
2646                 pci_free_consistent(qdev->pdev,
2647                                     PAGE_SIZE,
2648                                     qdev->rx_ring_shadow_reg_area,
2649                                     qdev->rx_ring_shadow_reg_dma);
2650                 qdev->rx_ring_shadow_reg_area = NULL;
2651         }
2652         if (qdev->tx_ring_shadow_reg_area) {
2653                 pci_free_consistent(qdev->pdev,
2654                                     PAGE_SIZE,
2655                                     qdev->tx_ring_shadow_reg_area,
2656                                     qdev->tx_ring_shadow_reg_dma);
2657                 qdev->tx_ring_shadow_reg_area = NULL;
2658         }
2659 }
2660
2661 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2662 {
2663         qdev->rx_ring_shadow_reg_area =
2664             pci_alloc_consistent(qdev->pdev,
2665                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2666         if (qdev->rx_ring_shadow_reg_area == NULL) {
2667                 netif_err(qdev, ifup, qdev->ndev,
2668                           "Allocation of RX shadow space failed.\n");
2669                 return -ENOMEM;
2670         }
2671         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2672         qdev->tx_ring_shadow_reg_area =
2673             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2674                                  &qdev->tx_ring_shadow_reg_dma);
2675         if (qdev->tx_ring_shadow_reg_area == NULL) {
2676                 netif_err(qdev, ifup, qdev->ndev,
2677                           "Allocation of TX shadow space failed.\n");
2678                 goto err_wqp_sh_area;
2679         }
2680         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2681         return 0;
2682
2683 err_wqp_sh_area:
2684         pci_free_consistent(qdev->pdev,
2685                             PAGE_SIZE,
2686                             qdev->rx_ring_shadow_reg_area,
2687                             qdev->rx_ring_shadow_reg_dma);
2688         return -ENOMEM;
2689 }
2690
2691 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2692 {
2693         struct tx_ring_desc *tx_ring_desc;
2694         int i;
2695         struct ob_mac_iocb_req *mac_iocb_ptr;
2696
2697         mac_iocb_ptr = tx_ring->wq_base;
2698         tx_ring_desc = tx_ring->q;
2699         for (i = 0; i < tx_ring->wq_len; i++) {
2700                 tx_ring_desc->index = i;
2701                 tx_ring_desc->skb = NULL;
2702                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2703                 mac_iocb_ptr++;
2704                 tx_ring_desc++;
2705         }
2706         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2707         atomic_set(&tx_ring->queue_stopped, 0);
2708 }
2709
2710 static void ql_free_tx_resources(struct ql_adapter *qdev,
2711                                  struct tx_ring *tx_ring)
2712 {
2713         if (tx_ring->wq_base) {
2714                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2715                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2716                 tx_ring->wq_base = NULL;
2717         }
2718         kfree(tx_ring->q);
2719         tx_ring->q = NULL;
2720 }
2721
2722 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2723                                  struct tx_ring *tx_ring)
2724 {
2725         tx_ring->wq_base =
2726             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2727                                  &tx_ring->wq_base_dma);
2728
2729         if ((tx_ring->wq_base == NULL) ||
2730             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2731                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2732                 return -ENOMEM;
2733         }
2734         tx_ring->q =
2735             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2736         if (tx_ring->q == NULL)
2737                 goto err;
2738
2739         return 0;
2740 err:
2741         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2742                             tx_ring->wq_base, tx_ring->wq_base_dma);
2743         return -ENOMEM;
2744 }
2745
2746 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2747 {
2748         struct bq_desc *lbq_desc;
2749
2750         uint32_t  curr_idx, clean_idx;
2751
2752         curr_idx = rx_ring->lbq_curr_idx;
2753         clean_idx = rx_ring->lbq_clean_idx;
2754         while (curr_idx != clean_idx) {
2755                 lbq_desc = &rx_ring->lbq[curr_idx];
2756
2757                 if (lbq_desc->p.pg_chunk.last_flag) {
2758                         pci_unmap_page(qdev->pdev,
2759                                 lbq_desc->p.pg_chunk.map,
2760                                 ql_lbq_block_size(qdev),
2761                                        PCI_DMA_FROMDEVICE);
2762                         lbq_desc->p.pg_chunk.last_flag = 0;
2763                 }
2764
2765                 put_page(lbq_desc->p.pg_chunk.page);
2766                 lbq_desc->p.pg_chunk.page = NULL;
2767
2768                 if (++curr_idx == rx_ring->lbq_len)
2769                         curr_idx = 0;
2770
2771         }
2772 }
2773
2774 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2775 {
2776         int i;
2777         struct bq_desc *sbq_desc;
2778
2779         for (i = 0; i < rx_ring->sbq_len; i++) {
2780                 sbq_desc = &rx_ring->sbq[i];
2781                 if (sbq_desc == NULL) {
2782                         netif_err(qdev, ifup, qdev->ndev,
2783                                   "sbq_desc %d is NULL.\n", i);
2784                         return;
2785                 }
2786                 if (sbq_desc->p.skb) {
2787                         pci_unmap_single(qdev->pdev,
2788                                          dma_unmap_addr(sbq_desc, mapaddr),
2789                                          dma_unmap_len(sbq_desc, maplen),
2790                                          PCI_DMA_FROMDEVICE);
2791                         dev_kfree_skb(sbq_desc->p.skb);
2792                         sbq_desc->p.skb = NULL;
2793                 }
2794         }
2795 }
2796
2797 /* Free all large and small rx buffers associated
2798  * with the completion queues for this device.
2799  */
2800 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2801 {
2802         int i;
2803         struct rx_ring *rx_ring;
2804
2805         for (i = 0; i < qdev->rx_ring_count; i++) {
2806                 rx_ring = &qdev->rx_ring[i];
2807                 if (rx_ring->lbq)
2808                         ql_free_lbq_buffers(qdev, rx_ring);
2809                 if (rx_ring->sbq)
2810                         ql_free_sbq_buffers(qdev, rx_ring);
2811         }
2812 }
2813
2814 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2815 {
2816         struct rx_ring *rx_ring;
2817         int i;
2818
2819         for (i = 0; i < qdev->rx_ring_count; i++) {
2820                 rx_ring = &qdev->rx_ring[i];
2821                 if (rx_ring->type != TX_Q)
2822                         ql_update_buffer_queues(qdev, rx_ring);
2823         }
2824 }
2825
2826 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2827                                 struct rx_ring *rx_ring)
2828 {
2829         int i;
2830         struct bq_desc *lbq_desc;
2831         __le64 *bq = rx_ring->lbq_base;
2832
2833         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2834         for (i = 0; i < rx_ring->lbq_len; i++) {
2835                 lbq_desc = &rx_ring->lbq[i];
2836                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2837                 lbq_desc->index = i;
2838                 lbq_desc->addr = bq;
2839                 bq++;
2840         }
2841 }
2842
2843 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2844                                 struct rx_ring *rx_ring)
2845 {
2846         int i;
2847         struct bq_desc *sbq_desc;
2848         __le64 *bq = rx_ring->sbq_base;
2849
2850         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2851         for (i = 0; i < rx_ring->sbq_len; i++) {
2852                 sbq_desc = &rx_ring->sbq[i];
2853                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2854                 sbq_desc->index = i;
2855                 sbq_desc->addr = bq;
2856                 bq++;
2857         }
2858 }
2859
2860 static void ql_free_rx_resources(struct ql_adapter *qdev,
2861                                  struct rx_ring *rx_ring)
2862 {
2863         /* Free the small buffer queue. */
2864         if (rx_ring->sbq_base) {
2865                 pci_free_consistent(qdev->pdev,
2866                                     rx_ring->sbq_size,
2867                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2868                 rx_ring->sbq_base = NULL;
2869         }
2870
2871         /* Free the small buffer queue control blocks. */
2872         kfree(rx_ring->sbq);
2873         rx_ring->sbq = NULL;
2874
2875         /* Free the large buffer queue. */
2876         if (rx_ring->lbq_base) {
2877                 pci_free_consistent(qdev->pdev,
2878                                     rx_ring->lbq_size,
2879                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2880                 rx_ring->lbq_base = NULL;
2881         }
2882
2883         /* Free the large buffer queue control blocks. */
2884         kfree(rx_ring->lbq);
2885         rx_ring->lbq = NULL;
2886
2887         /* Free the rx queue. */
2888         if (rx_ring->cq_base) {
2889                 pci_free_consistent(qdev->pdev,
2890                                     rx_ring->cq_size,
2891                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2892                 rx_ring->cq_base = NULL;
2893         }
2894 }
2895
2896 /* Allocate queues and buffers for this completions queue based
2897  * on the values in the parameter structure. */
2898 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2899                                  struct rx_ring *rx_ring)
2900 {
2901
2902         /*
2903          * Allocate the completion queue for this rx_ring.
2904          */
2905         rx_ring->cq_base =
2906             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2907                                  &rx_ring->cq_base_dma);
2908
2909         if (rx_ring->cq_base == NULL) {
2910                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2911                 return -ENOMEM;
2912         }
2913
2914         if (rx_ring->sbq_len) {
2915                 /*
2916                  * Allocate small buffer queue.
2917                  */
2918                 rx_ring->sbq_base =
2919                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2920                                          &rx_ring->sbq_base_dma);
2921
2922                 if (rx_ring->sbq_base == NULL) {
2923                         netif_err(qdev, ifup, qdev->ndev,
2924                                   "Small buffer queue allocation failed.\n");
2925                         goto err_mem;
2926                 }
2927
2928                 /*
2929                  * Allocate small buffer queue control blocks.
2930                  */
2931                 rx_ring->sbq =
2932                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2933                             GFP_KERNEL);
2934                 if (rx_ring->sbq == NULL) {
2935                         netif_err(qdev, ifup, qdev->ndev,
2936                                   "Small buffer queue control block allocation failed.\n");
2937                         goto err_mem;
2938                 }
2939
2940                 ql_init_sbq_ring(qdev, rx_ring);
2941         }
2942
2943         if (rx_ring->lbq_len) {
2944                 /*
2945                  * Allocate large buffer queue.
2946                  */
2947                 rx_ring->lbq_base =
2948                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2949                                          &rx_ring->lbq_base_dma);
2950
2951                 if (rx_ring->lbq_base == NULL) {
2952                         netif_err(qdev, ifup, qdev->ndev,
2953                                   "Large buffer queue allocation failed.\n");
2954                         goto err_mem;
2955                 }
2956                 /*
2957                  * Allocate large buffer queue control blocks.
2958                  */
2959                 rx_ring->lbq =
2960                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2961                             GFP_KERNEL);
2962                 if (rx_ring->lbq == NULL) {
2963                         netif_err(qdev, ifup, qdev->ndev,
2964                                   "Large buffer queue control block allocation failed.\n");
2965                         goto err_mem;
2966                 }
2967
2968                 ql_init_lbq_ring(qdev, rx_ring);
2969         }
2970
2971         return 0;
2972
2973 err_mem:
2974         ql_free_rx_resources(qdev, rx_ring);
2975         return -ENOMEM;
2976 }
2977
2978 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2979 {
2980         struct tx_ring *tx_ring;
2981         struct tx_ring_desc *tx_ring_desc;
2982         int i, j;
2983
2984         /*
2985          * Loop through all queues and free
2986          * any resources.
2987          */
2988         for (j = 0; j < qdev->tx_ring_count; j++) {
2989                 tx_ring = &qdev->tx_ring[j];
2990                 for (i = 0; i < tx_ring->wq_len; i++) {
2991                         tx_ring_desc = &tx_ring->q[i];
2992                         if (tx_ring_desc && tx_ring_desc->skb) {
2993                                 netif_err(qdev, ifdown, qdev->ndev,
2994                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2995                                           tx_ring_desc->skb, j,
2996                                           tx_ring_desc->index);
2997                                 ql_unmap_send(qdev, tx_ring_desc,
2998                                               tx_ring_desc->map_cnt);
2999                                 dev_kfree_skb(tx_ring_desc->skb);
3000                                 tx_ring_desc->skb = NULL;
3001                         }
3002                 }
3003         }
3004 }
3005
3006 static void ql_free_mem_resources(struct ql_adapter *qdev)
3007 {
3008         int i;
3009
3010         for (i = 0; i < qdev->tx_ring_count; i++)
3011                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3012         for (i = 0; i < qdev->rx_ring_count; i++)
3013                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3014         ql_free_shadow_space(qdev);
3015 }
3016
3017 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3018 {
3019         int i;
3020
3021         /* Allocate space for our shadow registers and such. */
3022         if (ql_alloc_shadow_space(qdev))
3023                 return -ENOMEM;
3024
3025         for (i = 0; i < qdev->rx_ring_count; i++) {
3026                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3027                         netif_err(qdev, ifup, qdev->ndev,
3028                                   "RX resource allocation failed.\n");
3029                         goto err_mem;
3030                 }
3031         }
3032         /* Allocate tx queue resources */
3033         for (i = 0; i < qdev->tx_ring_count; i++) {
3034                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3035                         netif_err(qdev, ifup, qdev->ndev,
3036                                   "TX resource allocation failed.\n");
3037                         goto err_mem;
3038                 }
3039         }
3040         return 0;
3041
3042 err_mem:
3043         ql_free_mem_resources(qdev);
3044         return -ENOMEM;
3045 }
3046
3047 /* Set up the rx ring control block and pass it to the chip.
3048  * The control block is defined as
3049  * "Completion Queue Initialization Control Block", or cqicb.
3050  */
3051 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3052 {
3053         struct cqicb *cqicb = &rx_ring->cqicb;
3054         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3055                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3056         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3057                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3058         void __iomem *doorbell_area =
3059             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3060         int err = 0;
3061         u16 bq_len;
3062         u64 tmp;
3063         __le64 *base_indirect_ptr;
3064         int page_entries;
3065
3066         /* Set up the shadow registers for this ring. */
3067         rx_ring->prod_idx_sh_reg = shadow_reg;
3068         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3069         *rx_ring->prod_idx_sh_reg = 0;
3070         shadow_reg += sizeof(u64);
3071         shadow_reg_dma += sizeof(u64);
3072         rx_ring->lbq_base_indirect = shadow_reg;
3073         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3074         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3075         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3076         rx_ring->sbq_base_indirect = shadow_reg;
3077         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3078
3079         /* PCI doorbell mem area + 0x00 for consumer index register */
3080         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3081         rx_ring->cnsmr_idx = 0;
3082         rx_ring->curr_entry = rx_ring->cq_base;
3083
3084         /* PCI doorbell mem area + 0x04 for valid register */
3085         rx_ring->valid_db_reg = doorbell_area + 0x04;
3086
3087         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3088         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3089
3090         /* PCI doorbell mem area + 0x1c */
3091         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3092
3093         memset((void *)cqicb, 0, sizeof(struct cqicb));
3094         cqicb->msix_vect = rx_ring->irq;
3095
3096         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3097         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3098
3099         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3100
3101         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3102
3103         /*
3104          * Set up the control block load flags.
3105          */
3106         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3107             FLAGS_LV |          /* Load MSI-X vector */
3108             FLAGS_LI;           /* Load irq delay values */
3109         if (rx_ring->lbq_len) {
3110                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3111                 tmp = (u64)rx_ring->lbq_base_dma;
3112                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3113                 page_entries = 0;
3114                 do {
3115                         *base_indirect_ptr = cpu_to_le64(tmp);
3116                         tmp += DB_PAGE_SIZE;
3117                         base_indirect_ptr++;
3118                         page_entries++;
3119                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3120                 cqicb->lbq_addr =
3121                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3122                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3123                         (u16) rx_ring->lbq_buf_size;
3124                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3125                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3126                         (u16) rx_ring->lbq_len;
3127                 cqicb->lbq_len = cpu_to_le16(bq_len);
3128                 rx_ring->lbq_prod_idx = 0;
3129                 rx_ring->lbq_curr_idx = 0;
3130                 rx_ring->lbq_clean_idx = 0;
3131                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3132         }
3133         if (rx_ring->sbq_len) {
3134                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3135                 tmp = (u64)rx_ring->sbq_base_dma;
3136                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3137                 page_entries = 0;
3138                 do {
3139                         *base_indirect_ptr