qlge: Fix receive packets drop.
[pandora-kernel.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378
379                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380                                      "Adding %s address %pM at index %d in the CAM.\n",
381                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
382                                      "MULTICAST" : "UNICAST",
383                                      addr, index);
384
385                         status =
386                             ql_wait_reg_rdy(qdev,
387                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388                         if (status)
389                                 goto exit;
390                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
392                                    type);       /* type */
393                         ql_write32(qdev, MAC_ADDR_DATA, lower);
394                         status =
395                             ql_wait_reg_rdy(qdev,
396                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397                         if (status)
398                                 goto exit;
399                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
401                                    type);       /* type */
402                         ql_write32(qdev, MAC_ADDR_DATA, upper);
403                         status =
404                             ql_wait_reg_rdy(qdev,
405                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406                         if (status)
407                                 goto exit;
408                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
409                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
410                                    type);       /* type */
411                         /* This field should also include the queue id
412                            and possibly the function id.  Right now we hardcode
413                            the route field to NIC core.
414                          */
415                         cam_output = (CAM_OUT_ROUTE_NIC |
416                                       (qdev->
417                                        func << CAM_OUT_FUNC_SHIFT) |
418                                         (0 << CAM_OUT_CQ_ID_SHIFT));
419                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420                                 cam_output |= CAM_OUT_RV;
421                         /* route to NIC core */
422                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423                         break;
424                 }
425         case MAC_ADDR_TYPE_VLAN:
426                 {
427                         u32 enable_bit = *((u32 *) &addr[0]);
428                         /* For VLAN, the addr actually holds a bit that
429                          * either enables or disables the vlan id we are
430                          * addressing. It's either MAC_ADDR_E on or off.
431                          * That's bit-27 we're talking about.
432                          */
433                         netif_info(qdev, ifup, qdev->ndev,
434                                    "%s VLAN ID %d %s the CAM.\n",
435                                    enable_bit ? "Adding" : "Removing",
436                                    index,
437                                    enable_bit ? "to" : "from");
438
439                         status =
440                             ql_wait_reg_rdy(qdev,
441                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442                         if (status)
443                                 goto exit;
444                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
446                                    type |       /* type */
447                                    enable_bit); /* enable/disable */
448                         break;
449                 }
450         case MAC_ADDR_TYPE_MULTI_FLTR:
451         default:
452                 netif_crit(qdev, ifup, qdev->ndev,
453                            "Address type %d not yet supported.\n", type);
454                 status = -EPERM;
455         }
456 exit:
457         return status;
458 }
459
460 /* Set or clear MAC address in hardware. We sometimes
461  * have to clear it to prevent wrong frame routing
462  * especially in a bonding environment.
463  */
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 {
466         int status;
467         char zero_mac_addr[ETH_ALEN];
468         char *addr;
469
470         if (set) {
471                 addr = &qdev->current_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Set Mac addr %pM\n", addr);
474         } else {
475                 memset(zero_mac_addr, 0, ETH_ALEN);
476                 addr = &zero_mac_addr[0];
477                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478                              "Clearing MAC address\n");
479         }
480         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 return status;
483         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486         if (status)
487                 netif_err(qdev, ifup, qdev->ndev,
488                           "Failed to init mac address.\n");
489         return status;
490 }
491
492 void ql_link_on(struct ql_adapter *qdev)
493 {
494         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495         netif_carrier_on(qdev->ndev);
496         ql_set_mac_addr(qdev, 1);
497 }
498
499 void ql_link_off(struct ql_adapter *qdev)
500 {
501         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502         netif_carrier_off(qdev->ndev);
503         ql_set_mac_addr(qdev, 0);
504 }
505
506 /* Get a specific frame routing value from the CAM.
507  * Used for debug and reg dump.
508  */
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510 {
511         int status = 0;
512
513         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514         if (status)
515                 goto exit;
516
517         ql_write32(qdev, RT_IDX,
518                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520         if (status)
521                 goto exit;
522         *value = ql_read32(qdev, RT_DATA);
523 exit:
524         return status;
525 }
526
527 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
528  * to route different frame types to various inbound queues.  We send broadcast/
529  * multicast/error frames to the default queue for slow handling,
530  * and CAM hit/RSS frames to the fast handling queues.
531  */
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533                               int enable)
534 {
535         int status = -EINVAL; /* Return error if no mask match. */
536         u32 value = 0;
537
538         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539                      "%s %s mask %s the routing reg.\n",
540                      enable ? "Adding" : "Removing",
541                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
555                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
556                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557                      "(Bad index != RT_IDX)",
558                      enable ? "to" : "from");
559
560         switch (mask) {
561         case RT_IDX_CAM_HIT:
562                 {
563                         value = RT_IDX_DST_CAM_Q |      /* dest */
564                             RT_IDX_TYPE_NICQ |  /* type */
565                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566                         break;
567                 }
568         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
569                 {
570                         value = RT_IDX_DST_DFLT_Q |     /* dest */
571                             RT_IDX_TYPE_NICQ |  /* type */
572                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573                         break;
574                 }
575         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
576                 {
577                         value = RT_IDX_DST_DFLT_Q |     /* dest */
578                             RT_IDX_TYPE_NICQ |  /* type */
579                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580                         break;
581                 }
582         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583                 {
584                         value = RT_IDX_DST_DFLT_Q | /* dest */
585                                 RT_IDX_TYPE_NICQ | /* type */
586                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
587                                 RT_IDX_IDX_SHIFT); /* index */
588                         break;
589                 }
590         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591                 {
592                         value = RT_IDX_DST_DFLT_Q | /* dest */
593                                 RT_IDX_TYPE_NICQ | /* type */
594                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595                                 RT_IDX_IDX_SHIFT); /* index */
596                         break;
597                 }
598         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
599                 {
600                         value = RT_IDX_DST_DFLT_Q |     /* dest */
601                             RT_IDX_TYPE_NICQ |  /* type */
602                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603                         break;
604                 }
605         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
606                 {
607                         value = RT_IDX_DST_DFLT_Q |     /* dest */
608                             RT_IDX_TYPE_NICQ |  /* type */
609                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610                         break;
611                 }
612         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
613                 {
614                         value = RT_IDX_DST_DFLT_Q |     /* dest */
615                             RT_IDX_TYPE_NICQ |  /* type */
616                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617                         break;
618                 }
619         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
620                 {
621                         value = RT_IDX_DST_RSS |        /* dest */
622                             RT_IDX_TYPE_NICQ |  /* type */
623                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624                         break;
625                 }
626         case 0:         /* Clear the E-bit on an entry. */
627                 {
628                         value = RT_IDX_DST_DFLT_Q |     /* dest */
629                             RT_IDX_TYPE_NICQ |  /* type */
630                             (index << RT_IDX_IDX_SHIFT);/* index */
631                         break;
632                 }
633         default:
634                 netif_err(qdev, ifup, qdev->ndev,
635                           "Mask type %d not yet supported.\n", mask);
636                 status = -EPERM;
637                 goto exit;
638         }
639
640         if (value) {
641                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642                 if (status)
643                         goto exit;
644                 value |= (enable ? RT_IDX_E : 0);
645                 ql_write32(qdev, RT_IDX, value);
646                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647         }
648 exit:
649         return status;
650 }
651
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
653 {
654         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 }
656
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
658 {
659         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 }
661
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663  * Otherwise, we may have multiple outstanding workers and don't want to
664  * enable until the last one finishes. In this case, the irq_cnt gets
665  * incremented every time we queue a worker and decremented every time
666  * a worker finishes.  Once it hits zero we enable the interrupt.
667  */
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 {
670         u32 var = 0;
671         unsigned long hw_flags = 0;
672         struct intr_context *ctx = qdev->intr_context + intr;
673
674         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675                 /* Always enable if we're MSIX multi interrupts and
676                  * it's not the default (zeroeth) interrupt.
677                  */
678                 ql_write32(qdev, INTR_EN,
679                            ctx->intr_en_mask);
680                 var = ql_read32(qdev, STS);
681                 return var;
682         }
683
684         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685         if (atomic_dec_and_test(&ctx->irq_cnt)) {
686                 ql_write32(qdev, INTR_EN,
687                            ctx->intr_en_mask);
688                 var = ql_read32(qdev, STS);
689         }
690         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691         return var;
692 }
693
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 {
696         u32 var = 0;
697         struct intr_context *ctx;
698
699         /* HW disables for us if we're MSIX multi interrupts and
700          * it's not the default (zeroeth) interrupt.
701          */
702         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703                 return 0;
704
705         ctx = qdev->intr_context + intr;
706         spin_lock(&qdev->hw_lock);
707         if (!atomic_read(&ctx->irq_cnt)) {
708                 ql_write32(qdev, INTR_EN,
709                 ctx->intr_dis_mask);
710                 var = ql_read32(qdev, STS);
711         }
712         atomic_inc(&ctx->irq_cnt);
713         spin_unlock(&qdev->hw_lock);
714         return var;
715 }
716
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 {
719         int i;
720         for (i = 0; i < qdev->intr_count; i++) {
721                 /* The enable call does a atomic_dec_and_test
722                  * and enables only if the result is zero.
723                  * So we precharge it here.
724                  */
725                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726                         i == 0))
727                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728                 ql_enable_completion_interrupt(qdev, i);
729         }
730
731 }
732
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734 {
735         int status, i;
736         u16 csum = 0;
737         __le16 *flash = (__le16 *)&qdev->flash;
738
739         status = strncmp((char *)&qdev->flash, str, 4);
740         if (status) {
741                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742                 return  status;
743         }
744
745         for (i = 0; i < size; i++)
746                 csum += le16_to_cpu(*flash++);
747
748         if (csum)
749                 netif_err(qdev, ifup, qdev->ndev,
750                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751
752         return csum;
753 }
754
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 {
757         int status = 0;
758         /* wait for reg to come ready */
759         status = ql_wait_reg_rdy(qdev,
760                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761         if (status)
762                 goto exit;
763         /* set up for reg read */
764         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765         /* wait for reg to come ready */
766         status = ql_wait_reg_rdy(qdev,
767                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768         if (status)
769                 goto exit;
770          /* This data is stored on flash as an array of
771          * __le32.  Since ql_read32() returns cpu endian
772          * we need to swap it back.
773          */
774         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775 exit:
776         return status;
777 }
778
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780 {
781         u32 i, size;
782         int status;
783         __le32 *p = (__le32 *)&qdev->flash;
784         u32 offset;
785         u8 mac_addr[6];
786
787         /* Get flash offset for function and adjust
788          * for dword access.
789          */
790         if (!qdev->port)
791                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792         else
793                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796                 return -ETIMEDOUT;
797
798         size = sizeof(struct flash_params_8000) / sizeof(u32);
799         for (i = 0; i < size; i++, p++) {
800                 status = ql_read_flash_word(qdev, i+offset, p);
801                 if (status) {
802                         netif_err(qdev, ifup, qdev->ndev,
803                                   "Error reading flash.\n");
804                         goto exit;
805                 }
806         }
807
808         status = ql_validate_flash(qdev,
809                         sizeof(struct flash_params_8000) / sizeof(u16),
810                         "8000");
811         if (status) {
812                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813                 status = -EINVAL;
814                 goto exit;
815         }
816
817         /* Extract either manufacturer or BOFM modified
818          * MAC address.
819          */
820         if (qdev->flash.flash_params_8000.data_type1 == 2)
821                 memcpy(mac_addr,
822                         qdev->flash.flash_params_8000.mac_addr1,
823                         qdev->ndev->addr_len);
824         else
825                 memcpy(mac_addr,
826                         qdev->flash.flash_params_8000.mac_addr,
827                         qdev->ndev->addr_len);
828
829         if (!is_valid_ether_addr(mac_addr)) {
830                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831                 status = -EINVAL;
832                 goto exit;
833         }
834
835         memcpy(qdev->ndev->dev_addr,
836                 mac_addr,
837                 qdev->ndev->addr_len);
838
839 exit:
840         ql_sem_unlock(qdev, SEM_FLASH_MASK);
841         return status;
842 }
843
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845 {
846         int i;
847         int status;
848         __le32 *p = (__le32 *)&qdev->flash;
849         u32 offset = 0;
850         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851
852         /* Second function's parameters follow the first
853          * function's.
854          */
855         if (qdev->port)
856                 offset = size;
857
858         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859                 return -ETIMEDOUT;
860
861         for (i = 0; i < size; i++, p++) {
862                 status = ql_read_flash_word(qdev, i+offset, p);
863                 if (status) {
864                         netif_err(qdev, ifup, qdev->ndev,
865                                   "Error reading flash.\n");
866                         goto exit;
867                 }
868
869         }
870
871         status = ql_validate_flash(qdev,
872                         sizeof(struct flash_params_8012) / sizeof(u16),
873                         "8012");
874         if (status) {
875                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876                 status = -EINVAL;
877                 goto exit;
878         }
879
880         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881                 status = -EINVAL;
882                 goto exit;
883         }
884
885         memcpy(qdev->ndev->dev_addr,
886                 qdev->flash.flash_params_8012.mac_addr,
887                 qdev->ndev->addr_len);
888
889 exit:
890         ql_sem_unlock(qdev, SEM_FLASH_MASK);
891         return status;
892 }
893
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895  * register pair.  Each read/write requires us to wait for the ready
896  * bit before reading/writing the data.
897  */
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 {
900         int status;
901         /* wait for reg to come ready */
902         status = ql_wait_reg_rdy(qdev,
903                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904         if (status)
905                 return status;
906         /* write the data to the data reg */
907         ql_write32(qdev, XGMAC_DATA, data);
908         /* trigger the write */
909         ql_write32(qdev, XGMAC_ADDR, reg);
910         return status;
911 }
912
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914  * register pair.  Each read/write requires us to wait for the ready
915  * bit before reading/writing the data.
916  */
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 {
919         int status = 0;
920         /* wait for reg to come ready */
921         status = ql_wait_reg_rdy(qdev,
922                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923         if (status)
924                 goto exit;
925         /* set up for reg read */
926         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927         /* wait for reg to come ready */
928         status = ql_wait_reg_rdy(qdev,
929                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930         if (status)
931                 goto exit;
932         /* get the data */
933         *data = ql_read32(qdev, XGMAC_DATA);
934 exit:
935         return status;
936 }
937
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 {
941         int status = 0;
942         u32 hi = 0;
943         u32 lo = 0;
944
945         status = ql_read_xgmac_reg(qdev, reg, &lo);
946         if (status)
947                 goto exit;
948
949         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950         if (status)
951                 goto exit;
952
953         *data = (u64) lo | ((u64) hi << 32);
954
955 exit:
956         return status;
957 }
958
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
960 {
961         int status;
962         /*
963          * Get MPI firmware version for driver banner
964          * and ethool info.
965          */
966         status = ql_mb_about_fw(qdev);
967         if (status)
968                 goto exit;
969         status = ql_mb_get_fw_state(qdev);
970         if (status)
971                 goto exit;
972         /* Wake up a worker to get/set the TX/RX frame sizes. */
973         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974 exit:
975         return status;
976 }
977
978 /* Take the MAC Core out of reset.
979  * Enable statistics counting.
980  * Take the transmitter/receiver out of reset.
981  * This functionality may be done in the MPI firmware at a
982  * later date.
983  */
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
985 {
986         int status = 0;
987         u32 data;
988
989         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990                 /* Another function has the semaphore, so
991                  * wait for the port init bit to come ready.
992                  */
993                 netif_info(qdev, link, qdev->ndev,
994                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996                 if (status) {
997                         netif_crit(qdev, link, qdev->ndev,
998                                    "Port initialize timed out.\n");
999                 }
1000                 return status;
1001         }
1002
1003         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004         /* Set the core reset. */
1005         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006         if (status)
1007                 goto end;
1008         data |= GLOBAL_CFG_RESET;
1009         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010         if (status)
1011                 goto end;
1012
1013         /* Clear the core reset and turn on jumbo for receiver. */
1014         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1015         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1016         data |= GLOBAL_CFG_TX_STAT_EN;
1017         data |= GLOBAL_CFG_RX_STAT_EN;
1018         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019         if (status)
1020                 goto end;
1021
1022         /* Enable transmitter, and clear it's reset. */
1023         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024         if (status)
1025                 goto end;
1026         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1027         data |= TX_CFG_EN;      /* Enable the transmitter. */
1028         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029         if (status)
1030                 goto end;
1031
1032         /* Enable receiver and clear it's reset. */
1033         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034         if (status)
1035                 goto end;
1036         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1037         data |= RX_CFG_EN;      /* Enable the receiver. */
1038         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039         if (status)
1040                 goto end;
1041
1042         /* Turn on jumbo. */
1043         status =
1044             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045         if (status)
1046                 goto end;
1047         status =
1048             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049         if (status)
1050                 goto end;
1051
1052         /* Signal to the world that the port is enabled.        */
1053         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054 end:
1055         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056         return status;
1057 }
1058
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060 {
1061         return PAGE_SIZE << qdev->lbq_buf_order;
1062 }
1063
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066 {
1067         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068         rx_ring->lbq_curr_idx++;
1069         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070                 rx_ring->lbq_curr_idx = 0;
1071         rx_ring->lbq_free_cnt++;
1072         return lbq_desc;
1073 }
1074
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076                 struct rx_ring *rx_ring)
1077 {
1078         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080         pci_dma_sync_single_for_cpu(qdev->pdev,
1081                                         dma_unmap_addr(lbq_desc, mapaddr),
1082                                     rx_ring->lbq_buf_size,
1083                                         PCI_DMA_FROMDEVICE);
1084
1085         /* If it's the last chunk of our master page then
1086          * we unmap it.
1087          */
1088         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089                                         == ql_lbq_block_size(qdev))
1090                 pci_unmap_page(qdev->pdev,
1091                                 lbq_desc->p.pg_chunk.map,
1092                                 ql_lbq_block_size(qdev),
1093                                 PCI_DMA_FROMDEVICE);
1094         return lbq_desc;
1095 }
1096
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099 {
1100         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101         rx_ring->sbq_curr_idx++;
1102         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103                 rx_ring->sbq_curr_idx = 0;
1104         rx_ring->sbq_free_cnt++;
1105         return sbq_desc;
1106 }
1107
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1110 {
1111         rx_ring->cnsmr_idx++;
1112         rx_ring->curr_entry++;
1113         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114                 rx_ring->cnsmr_idx = 0;
1115                 rx_ring->curr_entry = rx_ring->cq_base;
1116         }
1117 }
1118
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120 {
1121         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 }
1123
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125                                                 struct bq_desc *lbq_desc)
1126 {
1127         if (!rx_ring->pg_chunk.page) {
1128                 u64 map;
1129                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130                                                 GFP_ATOMIC,
1131                                                 qdev->lbq_buf_order);
1132                 if (unlikely(!rx_ring->pg_chunk.page)) {
1133                         netif_err(qdev, drv, qdev->ndev,
1134                                   "page allocation failed.\n");
1135                         return -ENOMEM;
1136                 }
1137                 rx_ring->pg_chunk.offset = 0;
1138                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139                                         0, ql_lbq_block_size(qdev),
1140                                         PCI_DMA_FROMDEVICE);
1141                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142                         __free_pages(rx_ring->pg_chunk.page,
1143                                         qdev->lbq_buf_order);
1144                         netif_err(qdev, drv, qdev->ndev,
1145                                   "PCI mapping failed.\n");
1146                         return -ENOMEM;
1147                 }
1148                 rx_ring->pg_chunk.map = map;
1149                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150         }
1151
1152         /* Copy the current master pg_chunk info
1153          * to the current descriptor.
1154          */
1155         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157         /* Adjust the master page chunk for next
1158          * buffer get.
1159          */
1160         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162                 rx_ring->pg_chunk.page = NULL;
1163                 lbq_desc->p.pg_chunk.last_flag = 1;
1164         } else {
1165                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166                 get_page(rx_ring->pg_chunk.page);
1167                 lbq_desc->p.pg_chunk.last_flag = 0;
1168         }
1169         return 0;
1170 }
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173 {
1174         u32 clean_idx = rx_ring->lbq_clean_idx;
1175         u32 start_idx = clean_idx;
1176         struct bq_desc *lbq_desc;
1177         u64 map;
1178         int i;
1179
1180         while (rx_ring->lbq_free_cnt > 32) {
1181                 for (i = 0; i < 16; i++) {
1182                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                                      "lbq: try cleaning clean_idx = %d.\n",
1184                                      clean_idx);
1185                         lbq_desc = &rx_ring->lbq[clean_idx];
1186                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187                                 netif_err(qdev, ifup, qdev->ndev,
1188                                           "Could not get a page chunk.\n");
1189                                 return;
1190                         }
1191
1192                         map = lbq_desc->p.pg_chunk.map +
1193                                 lbq_desc->p.pg_chunk.offset;
1194                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195                         dma_unmap_len_set(lbq_desc, maplen,
1196                                         rx_ring->lbq_buf_size);
1197                                 *lbq_desc->addr = cpu_to_le64(map);
1198
1199                         pci_dma_sync_single_for_device(qdev->pdev, map,
1200                                                 rx_ring->lbq_buf_size,
1201                                                 PCI_DMA_FROMDEVICE);
1202                         clean_idx++;
1203                         if (clean_idx == rx_ring->lbq_len)
1204                                 clean_idx = 0;
1205                 }
1206
1207                 rx_ring->lbq_clean_idx = clean_idx;
1208                 rx_ring->lbq_prod_idx += 16;
1209                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210                         rx_ring->lbq_prod_idx = 0;
1211                 rx_ring->lbq_free_cnt -= 16;
1212         }
1213
1214         if (start_idx != clean_idx) {
1215                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216                              "lbq: updating prod idx = %d.\n",
1217                              rx_ring->lbq_prod_idx);
1218                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219                                 rx_ring->lbq_prod_idx_db_reg);
1220         }
1221 }
1222
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225 {
1226         u32 clean_idx = rx_ring->sbq_clean_idx;
1227         u32 start_idx = clean_idx;
1228         struct bq_desc *sbq_desc;
1229         u64 map;
1230         int i;
1231
1232         while (rx_ring->sbq_free_cnt > 16) {
1233                 for (i = 0; i < 16; i++) {
1234                         sbq_desc = &rx_ring->sbq[clean_idx];
1235                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236                                      "sbq: try cleaning clean_idx = %d.\n",
1237                                      clean_idx);
1238                         if (sbq_desc->p.skb == NULL) {
1239                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1240                                              qdev->ndev,
1241                                              "sbq: getting new skb for index %d.\n",
1242                                              sbq_desc->index);
1243                                 sbq_desc->p.skb =
1244                                     netdev_alloc_skb(qdev->ndev,
1245                                                      SMALL_BUFFER_SIZE);
1246                                 if (sbq_desc->p.skb == NULL) {
1247                                         netif_err(qdev, probe, qdev->ndev,
1248                                                   "Couldn't get an skb.\n");
1249                                         rx_ring->sbq_clean_idx = clean_idx;
1250                                         return;
1251                                 }
1252                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253                                 map = pci_map_single(qdev->pdev,
1254                                                      sbq_desc->p.skb->data,
1255                                                      rx_ring->sbq_buf_size,
1256                                                      PCI_DMA_FROMDEVICE);
1257                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258                                         netif_err(qdev, ifup, qdev->ndev,
1259                                                   "PCI mapping failed.\n");
1260                                         rx_ring->sbq_clean_idx = clean_idx;
1261                                         dev_kfree_skb_any(sbq_desc->p.skb);
1262                                         sbq_desc->p.skb = NULL;
1263                                         return;
1264                                 }
1265                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266                                 dma_unmap_len_set(sbq_desc, maplen,
1267                                                   rx_ring->sbq_buf_size);
1268                                 *sbq_desc->addr = cpu_to_le64(map);
1269                         }
1270
1271                         clean_idx++;
1272                         if (clean_idx == rx_ring->sbq_len)
1273                                 clean_idx = 0;
1274                 }
1275                 rx_ring->sbq_clean_idx = clean_idx;
1276                 rx_ring->sbq_prod_idx += 16;
1277                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278                         rx_ring->sbq_prod_idx = 0;
1279                 rx_ring->sbq_free_cnt -= 16;
1280         }
1281
1282         if (start_idx != clean_idx) {
1283                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284                              "sbq: updating prod idx = %d.\n",
1285                              rx_ring->sbq_prod_idx);
1286                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287                                 rx_ring->sbq_prod_idx_db_reg);
1288         }
1289 }
1290
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292                                     struct rx_ring *rx_ring)
1293 {
1294         ql_update_sbq(qdev, rx_ring);
1295         ql_update_lbq(qdev, rx_ring);
1296 }
1297
1298 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1299  * fails at some stage, or from the interrupt when a tx completes.
1300  */
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302                           struct tx_ring_desc *tx_ring_desc, int mapped)
1303 {
1304         int i;
1305         for (i = 0; i < mapped; i++) {
1306                 if (i == 0 || (i == 7 && mapped > 7)) {
1307                         /*
1308                          * Unmap the skb->data area, or the
1309                          * external sglist (AKA the Outbound
1310                          * Address List (OAL)).
1311                          * If its the zeroeth element, then it's
1312                          * the skb->data area.  If it's the 7th
1313                          * element and there is more than 6 frags,
1314                          * then its an OAL.
1315                          */
1316                         if (i == 7) {
1317                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1318                                              qdev->ndev,
1319                                              "unmapping OAL area.\n");
1320                         }
1321                         pci_unmap_single(qdev->pdev,
1322                                          dma_unmap_addr(&tx_ring_desc->map[i],
1323                                                         mapaddr),
1324                                          dma_unmap_len(&tx_ring_desc->map[i],
1325                                                        maplen),
1326                                          PCI_DMA_TODEVICE);
1327                 } else {
1328                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329                                      "unmapping frag %d.\n", i);
1330                         pci_unmap_page(qdev->pdev,
1331                                        dma_unmap_addr(&tx_ring_desc->map[i],
1332                                                       mapaddr),
1333                                        dma_unmap_len(&tx_ring_desc->map[i],
1334                                                      maplen), PCI_DMA_TODEVICE);
1335                 }
1336         }
1337
1338 }
1339
1340 /* Map the buffers for this transmit.  This will return
1341  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342  */
1343 static int ql_map_send(struct ql_adapter *qdev,
1344                        struct ob_mac_iocb_req *mac_iocb_ptr,
1345                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346 {
1347         int len = skb_headlen(skb);
1348         dma_addr_t map;
1349         int frag_idx, err, map_idx = 0;
1350         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351         int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353         if (frag_cnt) {
1354                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355                              "frag_cnt = %d.\n", frag_cnt);
1356         }
1357         /*
1358          * Map the skb buffer first.
1359          */
1360         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362         err = pci_dma_mapping_error(qdev->pdev, map);
1363         if (err) {
1364                 netif_err(qdev, tx_queued, qdev->ndev,
1365                           "PCI mapping failed with error: %d\n", err);
1366
1367                 return NETDEV_TX_BUSY;
1368         }
1369
1370         tbd->len = cpu_to_le32(len);
1371         tbd->addr = cpu_to_le64(map);
1372         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374         map_idx++;
1375
1376         /*
1377          * This loop fills the remainder of the 8 address descriptors
1378          * in the IOCB.  If there are more than 7 fragments, then the
1379          * eighth address desc will point to an external list (OAL).
1380          * When this happens, the remainder of the frags will be stored
1381          * in this list.
1382          */
1383         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385                 tbd++;
1386                 if (frag_idx == 6 && frag_cnt > 7) {
1387                         /* Let's tack on an sglist.
1388                          * Our control block will now
1389                          * look like this:
1390                          * iocb->seg[0] = skb->data
1391                          * iocb->seg[1] = frag[0]
1392                          * iocb->seg[2] = frag[1]
1393                          * iocb->seg[3] = frag[2]
1394                          * iocb->seg[4] = frag[3]
1395                          * iocb->seg[5] = frag[4]
1396                          * iocb->seg[6] = frag[5]
1397                          * iocb->seg[7] = ptr to OAL (external sglist)
1398                          * oal->seg[0] = frag[6]
1399                          * oal->seg[1] = frag[7]
1400                          * oal->seg[2] = frag[8]
1401                          * oal->seg[3] = frag[9]
1402                          * oal->seg[4] = frag[10]
1403                          *      etc...
1404                          */
1405                         /* Tack on the OAL in the eighth segment of IOCB. */
1406                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407                                              sizeof(struct oal),
1408                                              PCI_DMA_TODEVICE);
1409                         err = pci_dma_mapping_error(qdev->pdev, map);
1410                         if (err) {
1411                                 netif_err(qdev, tx_queued, qdev->ndev,
1412                                           "PCI mapping outbound address list with error: %d\n",
1413                                           err);
1414                                 goto map_error;
1415                         }
1416
1417                         tbd->addr = cpu_to_le64(map);
1418                         /*
1419                          * The length is the number of fragments
1420                          * that remain to be mapped times the length
1421                          * of our sglist (OAL).
1422                          */
1423                         tbd->len =
1424                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1425                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1426                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427                                            map);
1428                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429                                           sizeof(struct oal));
1430                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431                         map_idx++;
1432                 }
1433
1434                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1435                                        DMA_TO_DEVICE);
1436
1437                 err = dma_mapping_error(&qdev->pdev->dev, map);
1438                 if (err) {
1439                         netif_err(qdev, tx_queued, qdev->ndev,
1440                                   "PCI mapping frags failed with error: %d.\n",
1441                                   err);
1442                         goto map_error;
1443                 }
1444
1445                 tbd->addr = cpu_to_le64(map);
1446                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449                                   skb_frag_size(frag));
1450
1451         }
1452         /* Save the number of segments we've mapped. */
1453         tx_ring_desc->map_cnt = map_idx;
1454         /* Terminate the last segment. */
1455         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456         return NETDEV_TX_OK;
1457
1458 map_error:
1459         /*
1460          * If the first frag mapping failed, then i will be zero.
1461          * This causes the unmap of the skb->data area.  Otherwise
1462          * we pass in the number of frags that mapped successfully
1463          * so they can be umapped.
1464          */
1465         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466         return NETDEV_TX_BUSY;
1467 }
1468
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471                                         struct rx_ring *rx_ring,
1472                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                         u32 length,
1474                                         u16 vlan_id)
1475 {
1476         struct sk_buff *skb;
1477         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478         struct napi_struct *napi = &rx_ring->napi;
1479
1480         napi->dev = qdev->ndev;
1481
1482         skb = napi_get_frags(napi);
1483         if (!skb) {
1484                 netif_err(qdev, drv, qdev->ndev,
1485                           "Couldn't get an skb, exiting.\n");
1486                 rx_ring->rx_dropped++;
1487                 put_page(lbq_desc->p.pg_chunk.page);
1488                 return;
1489         }
1490         prefetch(lbq_desc->p.pg_chunk.va);
1491         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492                              lbq_desc->p.pg_chunk.page,
1493                              lbq_desc->p.pg_chunk.offset,
1494                              length);
1495
1496         skb->len += length;
1497         skb->data_len += length;
1498         skb->truesize += length;
1499         skb_shinfo(skb)->nr_frags++;
1500
1501         rx_ring->rx_packets++;
1502         rx_ring->rx_bytes += length;
1503         skb->ip_summed = CHECKSUM_UNNECESSARY;
1504         skb_record_rx_queue(skb, rx_ring->cq_id);
1505         if (vlan_id != 0xffff)
1506                 __vlan_hwaccel_put_tag(skb, vlan_id);
1507         napi_gro_frags(napi);
1508 }
1509
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512                                         struct rx_ring *rx_ring,
1513                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1514                                         u32 length,
1515                                         u16 vlan_id)
1516 {
1517         struct net_device *ndev = qdev->ndev;
1518         struct sk_buff *skb = NULL;
1519         void *addr;
1520         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521         struct napi_struct *napi = &rx_ring->napi;
1522
1523         skb = netdev_alloc_skb(ndev, length);
1524         if (!skb) {
1525                 netif_err(qdev, drv, qdev->ndev,
1526                           "Couldn't get an skb, need to unwind!.\n");
1527                 rx_ring->rx_dropped++;
1528                 put_page(lbq_desc->p.pg_chunk.page);
1529                 return;
1530         }
1531
1532         addr = lbq_desc->p.pg_chunk.va;
1533         prefetch(addr);
1534
1535
1536         /* Frame error, so drop the packet. */
1537         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538                 netif_info(qdev, drv, qdev->ndev,
1539                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540                 rx_ring->rx_errors++;
1541                 goto err_out;
1542         }
1543
1544         /* The max framesize filter on this chip is set higher than
1545          * MTU since FCoE uses 2k frames.
1546          */
1547         if (skb->len > ndev->mtu + ETH_HLEN) {
1548                 netif_err(qdev, drv, qdev->ndev,
1549                           "Segment too small, dropping.\n");
1550                 rx_ring->rx_dropped++;
1551                 goto err_out;
1552         }
1553         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556                      length);
1557         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559                                 length-ETH_HLEN);
1560         skb->len += length-ETH_HLEN;
1561         skb->data_len += length-ETH_HLEN;
1562         skb->truesize += length-ETH_HLEN;
1563
1564         rx_ring->rx_packets++;
1565         rx_ring->rx_bytes += skb->len;
1566         skb->protocol = eth_type_trans(skb, ndev);
1567         skb_checksum_none_assert(skb);
1568
1569         if ((ndev->features & NETIF_F_RXCSUM) &&
1570                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571                 /* TCP frame. */
1572                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574                                      "TCP checksum done!\n");
1575                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1576                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578                         /* Unfragmented ipv4 UDP frame. */
1579                         struct iphdr *iph = (struct iphdr *) skb->data;
1580                         if (!(iph->frag_off &
1581                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1584                                              qdev->ndev,
1585                                              "TCP checksum done!\n");
1586                         }
1587                 }
1588         }
1589
1590         skb_record_rx_queue(skb, rx_ring->cq_id);
1591         if (vlan_id != 0xffff)
1592                 __vlan_hwaccel_put_tag(skb, vlan_id);
1593         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594                 napi_gro_receive(napi, skb);
1595         else
1596                 netif_receive_skb(skb);
1597         return;
1598 err_out:
1599         dev_kfree_skb_any(skb);
1600         put_page(lbq_desc->p.pg_chunk.page);
1601 }
1602
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605                                         struct rx_ring *rx_ring,
1606                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1607                                         u32 length,
1608                                         u16 vlan_id)
1609 {
1610         struct net_device *ndev = qdev->ndev;
1611         struct sk_buff *skb = NULL;
1612         struct sk_buff *new_skb = NULL;
1613         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615         skb = sbq_desc->p.skb;
1616         /* Allocate new_skb and copy */
1617         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618         if (new_skb == NULL) {
1619                 netif_err(qdev, probe, qdev->ndev,
1620                           "No skb available, drop the packet.\n");
1621                 rx_ring->rx_dropped++;
1622                 return;
1623         }
1624         skb_reserve(new_skb, NET_IP_ALIGN);
1625
1626         pci_dma_sync_single_for_cpu(qdev->pdev,
1627                                     dma_unmap_addr(sbq_desc, mapaddr),
1628                                     dma_unmap_len(sbq_desc, maplen),
1629                                     PCI_DMA_FROMDEVICE);
1630
1631         memcpy(skb_put(new_skb, length), skb->data, length);
1632
1633         pci_dma_sync_single_for_device(qdev->pdev,
1634                                        dma_unmap_addr(sbq_desc, mapaddr),
1635                                        dma_unmap_len(sbq_desc, maplen),
1636                                        PCI_DMA_FROMDEVICE);
1637         skb = new_skb;
1638
1639         /* Frame error, so drop the packet. */
1640         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1641                 netif_info(qdev, drv, qdev->ndev,
1642                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1643                 dev_kfree_skb_any(skb);
1644                 rx_ring->rx_errors++;
1645                 return;
1646         }
1647
1648         /* loopback self test for ethtool */
1649         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1650                 ql_check_lb_frame(qdev, skb);
1651                 dev_kfree_skb_any(skb);
1652                 return;
1653         }
1654
1655         /* The max framesize filter on this chip is set higher than
1656          * MTU since FCoE uses 2k frames.
1657          */
1658         if (skb->len > ndev->mtu + ETH_HLEN) {
1659                 dev_kfree_skb_any(skb);
1660                 rx_ring->rx_dropped++;
1661                 return;
1662         }
1663
1664         prefetch(skb->data);
1665         skb->dev = ndev;
1666         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1667                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668                              "%s Multicast.\n",
1669                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1671                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1672                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1673                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1674                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1675         }
1676         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1677                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1678                              "Promiscuous Packet.\n");
1679
1680         rx_ring->rx_packets++;
1681         rx_ring->rx_bytes += skb->len;
1682         skb->protocol = eth_type_trans(skb, ndev);
1683         skb_checksum_none_assert(skb);
1684
1685         /* If rx checksum is on, and there are no
1686          * csum or frame errors.
1687          */
1688         if ((ndev->features & NETIF_F_RXCSUM) &&
1689                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1690                 /* TCP frame. */
1691                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1692                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1693                                      "TCP checksum done!\n");
1694                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1695                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1696                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1697                         /* Unfragmented ipv4 UDP frame. */
1698                         struct iphdr *iph = (struct iphdr *) skb->data;
1699                         if (!(iph->frag_off &
1700                                 ntohs(IP_MF|IP_OFFSET))) {
1701                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1702                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1703                                              qdev->ndev,
1704                                              "TCP checksum done!\n");
1705                         }
1706                 }
1707         }
1708
1709         skb_record_rx_queue(skb, rx_ring->cq_id);
1710         if (vlan_id != 0xffff)
1711                 __vlan_hwaccel_put_tag(skb, vlan_id);
1712         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1713                 napi_gro_receive(&rx_ring->napi, skb);
1714         else
1715                 netif_receive_skb(skb);
1716 }
1717
1718 static void ql_realign_skb(struct sk_buff *skb, int len)
1719 {
1720         void *temp_addr = skb->data;
1721
1722         /* Undo the skb_reserve(skb,32) we did before
1723          * giving to hardware, and realign data on
1724          * a 2-byte boundary.
1725          */
1726         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1727         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1728         skb_copy_to_linear_data(skb, temp_addr,
1729                 (unsigned int)len);
1730 }
1731
1732 /*
1733  * This function builds an skb for the given inbound
1734  * completion.  It will be rewritten for readability in the near
1735  * future, but for not it works well.
1736  */
1737 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1738                                        struct rx_ring *rx_ring,
1739                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1740 {
1741         struct bq_desc *lbq_desc;
1742         struct bq_desc *sbq_desc;
1743         struct sk_buff *skb = NULL;
1744         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1745        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1746
1747         /*
1748          * Handle the header buffer if present.
1749          */
1750         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1751             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1752                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1753                              "Header of %d bytes in small buffer.\n", hdr_len);
1754                 /*
1755                  * Headers fit nicely into a small buffer.
1756                  */
1757                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758                 pci_unmap_single(qdev->pdev,
1759                                 dma_unmap_addr(sbq_desc, mapaddr),
1760                                 dma_unmap_len(sbq_desc, maplen),
1761                                 PCI_DMA_FROMDEVICE);
1762                 skb = sbq_desc->p.skb;
1763                 ql_realign_skb(skb, hdr_len);
1764                 skb_put(skb, hdr_len);
1765                 sbq_desc->p.skb = NULL;
1766         }
1767
1768         /*
1769          * Handle the data buffer(s).
1770          */
1771         if (unlikely(!length)) {        /* Is there data too? */
1772                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1773                              "No Data buffer in this packet.\n");
1774                 return skb;
1775         }
1776
1777         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1778                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1779                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780                                      "Headers in small, data of %d bytes in small, combine them.\n",
1781                                      length);
1782                         /*
1783                          * Data is less than small buffer size so it's
1784                          * stuffed in a small buffer.
1785                          * For this case we append the data
1786                          * from the "data" small buffer to the "header" small
1787                          * buffer.
1788                          */
1789                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1790                         pci_dma_sync_single_for_cpu(qdev->pdev,
1791                                                     dma_unmap_addr
1792                                                     (sbq_desc, mapaddr),
1793                                                     dma_unmap_len
1794                                                     (sbq_desc, maplen),
1795                                                     PCI_DMA_FROMDEVICE);
1796                         memcpy(skb_put(skb, length),
1797                                sbq_desc->p.skb->data, length);
1798                         pci_dma_sync_single_for_device(qdev->pdev,
1799                                                        dma_unmap_addr
1800                                                        (sbq_desc,
1801                                                         mapaddr),
1802                                                        dma_unmap_len
1803                                                        (sbq_desc,
1804                                                         maplen),
1805                                                        PCI_DMA_FROMDEVICE);
1806                 } else {
1807                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1808                                      "%d bytes in a single small buffer.\n",
1809                                      length);
1810                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1811                         skb = sbq_desc->p.skb;
1812                         ql_realign_skb(skb, length);
1813                         skb_put(skb, length);
1814                         pci_unmap_single(qdev->pdev,
1815                                          dma_unmap_addr(sbq_desc,
1816                                                         mapaddr),
1817                                          dma_unmap_len(sbq_desc,
1818                                                        maplen),
1819                                          PCI_DMA_FROMDEVICE);
1820                         sbq_desc->p.skb = NULL;
1821                 }
1822         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1823                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1824                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1825                                      "Header in small, %d bytes in large. Chain large to small!\n",
1826                                      length);
1827                         /*
1828                          * The data is in a single large buffer.  We
1829                          * chain it to the header buffer's skb and let
1830                          * it rip.
1831                          */
1832                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1833                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1834                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1835                                      lbq_desc->p.pg_chunk.offset, length);
1836                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1837                                                 lbq_desc->p.pg_chunk.offset,
1838                                                 length);
1839                         skb->len += length;
1840                         skb->data_len += length;
1841                         skb->truesize += length;
1842                 } else {
1843                         /*
1844                          * The headers and data are in a single large buffer. We
1845                          * copy it to a new skb and let it go. This can happen with
1846                          * jumbo mtu on a non-TCP/UDP frame.
1847                          */
1848                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1849                         skb = netdev_alloc_skb(qdev->ndev, length);
1850                         if (skb == NULL) {
1851                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1852                                              "No skb available, drop the packet.\n");
1853                                 return NULL;
1854                         }
1855                         pci_unmap_page(qdev->pdev,
1856                                        dma_unmap_addr(lbq_desc,
1857                                                       mapaddr),
1858                                        dma_unmap_len(lbq_desc, maplen),
1859                                        PCI_DMA_FROMDEVICE);
1860                         skb_reserve(skb, NET_IP_ALIGN);
1861                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1862                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1863                                      length);
1864                         skb_fill_page_desc(skb, 0,
1865                                                 lbq_desc->p.pg_chunk.page,
1866                                                 lbq_desc->p.pg_chunk.offset,
1867                                                 length);
1868                         skb->len += length;
1869                         skb->data_len += length;
1870                         skb->truesize += length;
1871                         length -= length;
1872                         __pskb_pull_tail(skb,
1873                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1874                                 VLAN_ETH_HLEN : ETH_HLEN);
1875                 }
1876         } else {
1877                 /*
1878                  * The data is in a chain of large buffers
1879                  * pointed to by a small buffer.  We loop
1880                  * thru and chain them to the our small header
1881                  * buffer's skb.
1882                  * frags:  There are 18 max frags and our small
1883                  *         buffer will hold 32 of them. The thing is,
1884                  *         we'll use 3 max for our 9000 byte jumbo
1885                  *         frames.  If the MTU goes up we could
1886                  *          eventually be in trouble.
1887                  */
1888                 int size, i = 0;
1889                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1890                 pci_unmap_single(qdev->pdev,
1891                                  dma_unmap_addr(sbq_desc, mapaddr),
1892                                  dma_unmap_len(sbq_desc, maplen),
1893                                  PCI_DMA_FROMDEVICE);
1894                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1895                         /*
1896                          * This is an non TCP/UDP IP frame, so
1897                          * the headers aren't split into a small
1898                          * buffer.  We have to use the small buffer
1899                          * that contains our sg list as our skb to
1900                          * send upstairs. Copy the sg list here to
1901                          * a local buffer and use it to find the
1902                          * pages to chain.
1903                          */
1904                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905                                      "%d bytes of headers & data in chain of large.\n",
1906                                      length);
1907                         skb = sbq_desc->p.skb;
1908                         sbq_desc->p.skb = NULL;
1909                         skb_reserve(skb, NET_IP_ALIGN);
1910                 }
1911                 while (length > 0) {
1912                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1913                         size = (length < rx_ring->lbq_buf_size) ? length :
1914                                 rx_ring->lbq_buf_size;
1915
1916                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917                                      "Adding page %d to skb for %d bytes.\n",
1918                                      i, size);
1919                         skb_fill_page_desc(skb, i,
1920                                                 lbq_desc->p.pg_chunk.page,
1921                                                 lbq_desc->p.pg_chunk.offset,
1922                                                 size);
1923                         skb->len += size;
1924                         skb->data_len += size;
1925                         skb->truesize += size;
1926                         length -= size;
1927                         i++;
1928                 }
1929                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1930                                 VLAN_ETH_HLEN : ETH_HLEN);
1931         }
1932         return skb;
1933 }
1934
1935 /* Process an inbound completion from an rx ring. */
1936 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1937                                    struct rx_ring *rx_ring,
1938                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1939                                    u16 vlan_id)
1940 {
1941         struct net_device *ndev = qdev->ndev;
1942         struct sk_buff *skb = NULL;
1943
1944         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1945
1946         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1947         if (unlikely(!skb)) {
1948                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1949                              "No skb available, drop packet.\n");
1950                 rx_ring->rx_dropped++;
1951                 return;
1952         }
1953
1954         /* Frame error, so drop the packet. */
1955         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1956                 netif_info(qdev, drv, qdev->ndev,
1957                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1958                 dev_kfree_skb_any(skb);
1959                 rx_ring->rx_errors++;
1960                 return;
1961         }
1962
1963         /* The max framesize filter on this chip is set higher than
1964          * MTU since FCoE uses 2k frames.
1965          */
1966         if (skb->len > ndev->mtu + ETH_HLEN) {
1967                 dev_kfree_skb_any(skb);
1968                 rx_ring->rx_dropped++;
1969                 return;
1970         }
1971
1972         /* loopback self test for ethtool */
1973         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1974                 ql_check_lb_frame(qdev, skb);
1975                 dev_kfree_skb_any(skb);
1976                 return;
1977         }
1978
1979         prefetch(skb->data);
1980         skb->dev = ndev;
1981         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1982                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1983                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1984                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1985                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1987                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1989                 rx_ring->rx_multicast++;
1990         }
1991         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1992                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993                              "Promiscuous Packet.\n");
1994         }
1995
1996         skb->protocol = eth_type_trans(skb, ndev);
1997         skb_checksum_none_assert(skb);
1998
1999         /* If rx checksum is on, and there are no
2000          * csum or frame errors.
2001          */
2002         if ((ndev->features & NETIF_F_RXCSUM) &&
2003                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2004                 /* TCP frame. */
2005                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2006                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2007                                      "TCP checksum done!\n");
2008                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2009                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2010                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2011                 /* Unfragmented ipv4 UDP frame. */
2012                         struct iphdr *iph = (struct iphdr *) skb->data;
2013                         if (!(iph->frag_off &
2014                                 ntohs(IP_MF|IP_OFFSET))) {
2015                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2017                                              "TCP checksum done!\n");
2018                         }
2019                 }
2020         }
2021
2022         rx_ring->rx_packets++;
2023         rx_ring->rx_bytes += skb->len;
2024         skb_record_rx_queue(skb, rx_ring->cq_id);
2025         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2026                 __vlan_hwaccel_put_tag(skb, vlan_id);
2027         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2028                 napi_gro_receive(&rx_ring->napi, skb);
2029         else
2030                 netif_receive_skb(skb);
2031 }
2032
2033 /* Process an inbound completion from an rx ring. */
2034 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2035                                         struct rx_ring *rx_ring,
2036                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2037 {
2038         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2039         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2040                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2041                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2042
2043         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2044
2045         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2046                 /* The data and headers are split into
2047                  * separate buffers.
2048                  */
2049                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2050                                                 vlan_id);
2051         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2052                 /* The data fit in a single small buffer.
2053                  * Allocate a new skb, copy the data and
2054                  * return the buffer to the free pool.
2055                  */
2056                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2057                                                 length, vlan_id);
2058         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2059                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2060                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2061                 /* TCP packet in a page chunk that's been checksummed.
2062                  * Tack it on to our GRO skb and let it go.
2063                  */
2064                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2065                                                 length, vlan_id);
2066         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2067                 /* Non-TCP packet in a page chunk. Allocate an
2068                  * skb, tack it on frags, and send it up.
2069                  */
2070                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2071                                                 length, vlan_id);
2072         } else {
2073                 /* Non-TCP/UDP large frames that span multiple buffers
2074                  * can be processed corrrectly by the split frame logic.
2075                  */
2076                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2077                                                 vlan_id);
2078         }
2079
2080         return (unsigned long)length;
2081 }
2082
2083 /* Process an outbound completion from an rx ring. */
2084 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2085                                    struct ob_mac_iocb_rsp *mac_rsp)
2086 {
2087         struct tx_ring *tx_ring;
2088         struct tx_ring_desc *tx_ring_desc;
2089
2090         QL_DUMP_OB_MAC_RSP(mac_rsp);
2091         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2092         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2093         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2094         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2095         tx_ring->tx_packets++;
2096         dev_kfree_skb(tx_ring_desc->skb);
2097         tx_ring_desc->skb = NULL;
2098
2099         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2100                                         OB_MAC_IOCB_RSP_S |
2101                                         OB_MAC_IOCB_RSP_L |
2102                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2103                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2104                         netif_warn(qdev, tx_done, qdev->ndev,
2105                                    "Total descriptor length did not match transfer length.\n");
2106                 }
2107                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2108                         netif_warn(qdev, tx_done, qdev->ndev,
2109                                    "Frame too short to be valid, not sent.\n");
2110                 }
2111                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2112                         netif_warn(qdev, tx_done, qdev->ndev,
2113                                    "Frame too long, but sent anyway.\n");
2114                 }
2115                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2116                         netif_warn(qdev, tx_done, qdev->ndev,
2117                                    "PCI backplane error. Frame not sent.\n");
2118                 }
2119         }
2120         atomic_inc(&tx_ring->tx_count);
2121 }
2122
2123 /* Fire up a handler to reset the MPI processor. */
2124 void ql_queue_fw_error(struct ql_adapter *qdev)
2125 {
2126         ql_link_off(qdev);
2127         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2128 }
2129
2130 void ql_queue_asic_error(struct ql_adapter *qdev)
2131 {
2132         ql_link_off(qdev);
2133         ql_disable_interrupts(qdev);
2134         /* Clear adapter up bit to signal the recovery
2135          * process that it shouldn't kill the reset worker
2136          * thread
2137          */
2138         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2139         /* Set asic recovery bit to indicate reset process that we are
2140          * in fatal error recovery process rather than normal close
2141          */
2142         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2143         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2144 }
2145
2146 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2147                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2148 {
2149         switch (ib_ae_rsp->event) {
2150         case MGMT_ERR_EVENT:
2151                 netif_err(qdev, rx_err, qdev->ndev,
2152                           "Management Processor Fatal Error.\n");
2153                 ql_queue_fw_error(qdev);
2154                 return;
2155
2156         case CAM_LOOKUP_ERR_EVENT:
2157                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2158                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2159                 ql_queue_asic_error(qdev);
2160                 return;
2161
2162         case SOFT_ECC_ERROR_EVENT:
2163                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2164                 ql_queue_asic_error(qdev);
2165                 break;
2166
2167         case PCI_ERR_ANON_BUF_RD:
2168                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2169                                         "anonymous buffers from rx_ring %d.\n",
2170                                         ib_ae_rsp->q_id);
2171                 ql_queue_asic_error(qdev);
2172                 break;
2173
2174         default:
2175                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2176                           ib_ae_rsp->event);
2177                 ql_queue_asic_error(qdev);
2178                 break;
2179         }
2180 }
2181
2182 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2183 {
2184         struct ql_adapter *qdev = rx_ring->qdev;
2185         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2186         struct ob_mac_iocb_rsp *net_rsp = NULL;
2187         int count = 0;
2188
2189         struct tx_ring *tx_ring;
2190         /* While there are entries in the completion queue. */
2191         while (prod != rx_ring->cnsmr_idx) {
2192
2193                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2194                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2195                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2196
2197                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2198                 rmb();
2199                 switch (net_rsp->opcode) {
2200
2201                 case OPCODE_OB_MAC_TSO_IOCB:
2202                 case OPCODE_OB_MAC_IOCB:
2203                         ql_process_mac_tx_intr(qdev, net_rsp);
2204                         break;
2205                 default:
2206                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2208                                      net_rsp->opcode);
2209                 }
2210                 count++;
2211                 ql_update_cq(rx_ring);
2212                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2213         }
2214         if (!net_rsp)
2215                 return 0;
2216         ql_write_cq_idx(rx_ring);
2217         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2218         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2219                 if (atomic_read(&tx_ring->queue_stopped) &&
2220                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2221                         /*
2222                          * The queue got stopped because the tx_ring was full.
2223                          * Wake it up, because it's now at least 25% empty.
2224                          */
2225                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2226         }
2227
2228         return count;
2229 }
2230
2231 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2232 {
2233         struct ql_adapter *qdev = rx_ring->qdev;
2234         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2235         struct ql_net_rsp_iocb *net_rsp;
2236         int count = 0;
2237
2238         /* While there are entries in the completion queue. */
2239         while (prod != rx_ring->cnsmr_idx) {
2240
2241                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2242                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2243                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2244
2245                 net_rsp = rx_ring->curr_entry;
2246                 rmb();
2247                 switch (net_rsp->opcode) {
2248                 case OPCODE_IB_MAC_IOCB:
2249                         ql_process_mac_rx_intr(qdev, rx_ring,
2250                                                (struct ib_mac_iocb_rsp *)
2251                                                net_rsp);
2252                         break;
2253
2254                 case OPCODE_IB_AE_IOCB:
2255                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2256                                                 net_rsp);
2257                         break;
2258                 default:
2259                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2260                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2261                                      net_rsp->opcode);
2262                         break;
2263                 }
2264                 count++;
2265                 ql_update_cq(rx_ring);
2266                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2267                 if (count == budget)
2268                         break;
2269         }
2270         ql_update_buffer_queues(qdev, rx_ring);
2271         ql_write_cq_idx(rx_ring);
2272         return count;
2273 }
2274
2275 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2276 {
2277         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2278         struct ql_adapter *qdev = rx_ring->qdev;
2279         struct rx_ring *trx_ring;
2280         int i, work_done = 0;
2281         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2282
2283         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2284                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2285
2286         /* Service the TX rings first.  They start
2287          * right after the RSS rings. */
2288         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2289                 trx_ring = &qdev->rx_ring[i];
2290                 /* If this TX completion ring belongs to this vector and
2291                  * it's not empty then service it.
2292                  */
2293                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2294                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2295                                         trx_ring->cnsmr_idx)) {
2296                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2297                                      "%s: Servicing TX completion ring %d.\n",
2298                                      __func__, trx_ring->cq_id);
2299                         ql_clean_outbound_rx_ring(trx_ring);
2300                 }
2301         }
2302
2303         /*
2304          * Now service the RSS ring if it's active.
2305          */
2306         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2307                                         rx_ring->cnsmr_idx) {
2308                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309                              "%s: Servicing RX completion ring %d.\n",
2310                              __func__, rx_ring->cq_id);
2311                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2312         }
2313
2314         if (work_done < budget) {
2315                 napi_complete(napi);
2316                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2317         }
2318         return work_done;
2319 }
2320
2321 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2322 {
2323         struct ql_adapter *qdev = netdev_priv(ndev);
2324
2325         if (features & NETIF_F_HW_VLAN_RX) {
2326                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2327                              "Turning on VLAN in NIC_RCV_CFG.\n");
2328                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2329                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2330         } else {
2331                 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2332                              "Turning off VLAN in NIC_RCV_CFG.\n");
2333                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2334         }
2335 }
2336
2337 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2338 {
2339         /*
2340          * Since there is no support for separate rx/tx vlan accel
2341          * enable/disable make sure tx flag is always in same state as rx.
2342          */
2343         if (features & NETIF_F_HW_VLAN_RX)
2344                 features |= NETIF_F_HW_VLAN_TX;
2345         else
2346                 features &= ~NETIF_F_HW_VLAN_TX;
2347
2348         return features;
2349 }
2350
2351 static int qlge_set_features(struct net_device *ndev, u32 features)
2352 {
2353         u32 changed = ndev->features ^ features;
2354
2355         if (changed & NETIF_F_HW_VLAN_RX)
2356                 qlge_vlan_mode(ndev, features);
2357
2358         return 0;
2359 }
2360
2361 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2362 {
2363         u32 enable_bit = MAC_ADDR_E;
2364
2365         if (ql_set_mac_addr_reg
2366             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2367                 netif_err(qdev, ifup, qdev->ndev,
2368                           "Failed to init vlan address.\n");
2369         }
2370 }
2371
2372 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2373 {
2374         struct ql_adapter *qdev = netdev_priv(ndev);
2375         int status;
2376
2377         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2378         if (status)
2379                 return;
2380
2381         __qlge_vlan_rx_add_vid(qdev, vid);
2382         set_bit(vid, qdev->active_vlans);
2383
2384         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2385 }
2386
2387 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2388 {
2389         u32 enable_bit = 0;
2390
2391         if (ql_set_mac_addr_reg
2392             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2393                 netif_err(qdev, ifup, qdev->ndev,
2394                           "Failed to clear vlan address.\n");
2395         }
2396 }
2397
2398 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2399 {
2400         struct ql_adapter *qdev = netdev_priv(ndev);
2401         int status;
2402
2403         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2404         if (status)
2405                 return;
2406
2407         __qlge_vlan_rx_kill_vid(qdev, vid);
2408         clear_bit(vid, qdev->active_vlans);
2409
2410         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2411 }
2412
2413 static void qlge_restore_vlan(struct ql_adapter *qdev)
2414 {
2415         int status;
2416         u16 vid;
2417
2418         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2419         if (status)
2420                 return;
2421
2422         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2423                 __qlge_vlan_rx_add_vid(qdev, vid);
2424
2425         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2426 }
2427
2428 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2429 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2430 {
2431         struct rx_ring *rx_ring = dev_id;
2432         napi_schedule(&rx_ring->napi);
2433         return IRQ_HANDLED;
2434 }
2435
2436 /* This handles a fatal error, MPI activity, and the default
2437  * rx_ring in an MSI-X multiple vector environment.
2438  * In MSI/Legacy environment it also process the rest of
2439  * the rx_rings.
2440  */
2441 static irqreturn_t qlge_isr(int irq, void *dev_id)
2442 {
2443         struct rx_ring *rx_ring = dev_id;
2444         struct ql_adapter *qdev = rx_ring->qdev;
2445         struct intr_context *intr_context = &qdev->intr_context[0];
2446         u32 var;
2447         int work_done = 0;
2448
2449         spin_lock(&qdev->hw_lock);
2450         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2451                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2452                              "Shared Interrupt, Not ours!\n");
2453                 spin_unlock(&qdev->hw_lock);
2454                 return IRQ_NONE;
2455         }
2456         spin_unlock(&qdev->hw_lock);
2457
2458         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2459
2460         /*
2461          * Check for fatal error.
2462          */
2463         if (var & STS_FE) {
2464                 ql_queue_asic_error(qdev);
2465                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2466                 var = ql_read32(qdev, ERR_STS);
2467                 netdev_err(qdev->ndev, "Resetting chip. "
2468                                         "Error Status Register = 0x%x\n", var);
2469                 return IRQ_HANDLED;
2470         }
2471
2472         /*
2473          * Check MPI processor activity.
2474          */
2475         if ((var & STS_PI) &&
2476                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2477                 /*
2478                  * We've got an async event or mailbox completion.
2479                  * Handle it and clear the source of the interrupt.
2480                  */
2481                 netif_err(qdev, intr, qdev->ndev,
2482                           "Got MPI processor interrupt.\n");
2483                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2484                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2485                 queue_delayed_work_on(smp_processor_id(),
2486                                 qdev->workqueue, &qdev->mpi_work, 0);
2487                 work_done++;
2488         }
2489
2490         /*
2491          * Get the bit-mask that shows the active queues for this
2492          * pass.  Compare it to the queues that this irq services
2493          * and call napi if there's a match.
2494          */
2495         var = ql_read32(qdev, ISR1);
2496         if (var & intr_context->irq_mask) {
2497                 netif_info(qdev, intr, qdev->ndev,
2498                            "Waking handler for rx_ring[0].\n");
2499                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2500                 napi_schedule(&rx_ring->napi);
2501                 work_done++;
2502         }
2503         ql_enable_completion_interrupt(qdev, intr_context->intr);
2504         return work_done ? IRQ_HANDLED : IRQ_NONE;
2505 }
2506
2507 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2508 {
2509
2510         if (skb_is_gso(skb)) {
2511                 int err;
2512                 if (skb_header_cloned(skb)) {
2513                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2514                         if (err)
2515                                 return err;
2516                 }
2517
2518                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2519                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2520                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2521                 mac_iocb_ptr->total_hdrs_len =
2522                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2523                 mac_iocb_ptr->net_trans_offset =
2524                     cpu_to_le16(skb_network_offset(skb) |
2525                                 skb_transport_offset(skb)
2526                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2527                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2528                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2529                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2530                         struct iphdr *iph = ip_hdr(skb);
2531                         iph->check = 0;
2532                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2534                                                                  iph->daddr, 0,
2535                                                                  IPPROTO_TCP,
2536                                                                  0);
2537                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2538                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2539                         tcp_hdr(skb)->check =
2540                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2541                                              &ipv6_hdr(skb)->daddr,
2542                                              0, IPPROTO_TCP, 0);
2543                 }
2544                 return 1;
2545         }
2546         return 0;
2547 }
2548
2549 static void ql_hw_csum_setup(struct sk_buff *skb,
2550                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2551 {
2552         int len;
2553         struct iphdr *iph = ip_hdr(skb);
2554         __sum16 *check;
2555         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2556         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2557         mac_iocb_ptr->net_trans_offset =
2558                 cpu_to_le16(skb_network_offset(skb) |
2559                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2560
2561         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2562         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2563         if (likely(iph->protocol == IPPROTO_TCP)) {
2564                 check = &(tcp_hdr(skb)->check);
2565                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2566                 mac_iocb_ptr->total_hdrs_len =
2567                     cpu_to_le16(skb_transport_offset(skb) +
2568                                 (tcp_hdr(skb)->doff << 2));
2569         } else {
2570                 check = &(udp_hdr(skb)->check);
2571                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2572                 mac_iocb_ptr->total_hdrs_len =
2573                     cpu_to_le16(skb_transport_offset(skb) +
2574                                 sizeof(struct udphdr));
2575         }
2576         *check = ~csum_tcpudp_magic(iph->saddr,
2577                                     iph->daddr, len, iph->protocol, 0);
2578 }
2579
2580 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2581 {
2582         struct tx_ring_desc *tx_ring_desc;
2583         struct ob_mac_iocb_req *mac_iocb_ptr;
2584         struct ql_adapter *qdev = netdev_priv(ndev);
2585         int tso;
2586         struct tx_ring *tx_ring;
2587         u32 tx_ring_idx = (u32) skb->queue_mapping;
2588
2589         tx_ring = &qdev->tx_ring[tx_ring_idx];
2590
2591         if (skb_padto(skb, ETH_ZLEN))
2592                 return NETDEV_TX_OK;
2593
2594         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2595                 netif_info(qdev, tx_queued, qdev->ndev,
2596                            "%s: shutting down tx queue %d du to lack of resources.\n",
2597                            __func__, tx_ring_idx);
2598                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2599                 atomic_inc(&tx_ring->queue_stopped);
2600                 tx_ring->tx_errors++;
2601                 return NETDEV_TX_BUSY;
2602         }
2603         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2604         mac_iocb_ptr = tx_ring_desc->queue_entry;
2605         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2606
2607         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2608         mac_iocb_ptr->tid = tx_ring_desc->index;
2609         /* We use the upper 32-bits to store the tx queue for this IO.
2610          * When we get the completion we can use it to establish the context.
2611          */
2612         mac_iocb_ptr->txq_idx = tx_ring_idx;
2613         tx_ring_desc->skb = skb;
2614
2615         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2616
2617         if (vlan_tx_tag_present(skb)) {
2618                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2619                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2620                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2621                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2622         }
2623         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2624         if (tso < 0) {
2625                 dev_kfree_skb_any(skb);
2626                 return NETDEV_TX_OK;
2627         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2628                 ql_hw_csum_setup(skb,
2629                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2630         }
2631         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2632                         NETDEV_TX_OK) {
2633                 netif_err(qdev, tx_queued, qdev->ndev,
2634                           "Could not map the segments.\n");
2635                 tx_ring->tx_errors++;
2636                 return NETDEV_TX_BUSY;
2637         }
2638         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2639         tx_ring->prod_idx++;
2640         if (tx_ring->prod_idx == tx_ring->wq_len)
2641                 tx_ring->prod_idx = 0;
2642         wmb();
2643
2644         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2645         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2646                      "tx queued, slot %d, len %d\n",
2647                      tx_ring->prod_idx, skb->len);
2648
2649         atomic_dec(&tx_ring->tx_count);
2650         return NETDEV_TX_OK;
2651 }
2652
2653
2654 static void ql_free_shadow_space(struct ql_adapter *qdev)
2655 {
2656         if (qdev->rx_ring_shadow_reg_area) {
2657                 pci_free_consistent(qdev->pdev,
2658                                     PAGE_SIZE,
2659                                     qdev->rx_ring_shadow_reg_area,
2660                                     qdev->rx_ring_shadow_reg_dma);
2661                 qdev->rx_ring_shadow_reg_area = NULL;
2662         }
2663         if (qdev->tx_ring_shadow_reg_area) {
2664                 pci_free_consistent(qdev->pdev,
2665                                     PAGE_SIZE,
2666                                     qdev->tx_ring_shadow_reg_area,
2667                                     qdev->tx_ring_shadow_reg_dma);
2668                 qdev->tx_ring_shadow_reg_area = NULL;
2669         }
2670 }
2671
2672 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2673 {
2674         qdev->rx_ring_shadow_reg_area =
2675             pci_alloc_consistent(qdev->pdev,
2676                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2677         if (qdev->rx_ring_shadow_reg_area == NULL) {
2678                 netif_err(qdev, ifup, qdev->ndev,
2679                           "Allocation of RX shadow space failed.\n");
2680                 return -ENOMEM;
2681         }
2682         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2683         qdev->tx_ring_shadow_reg_area =
2684             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2685                                  &qdev->tx_ring_shadow_reg_dma);
2686         if (qdev->tx_ring_shadow_reg_area == NULL) {
2687                 netif_err(qdev, ifup, qdev->ndev,
2688                           "Allocation of TX shadow space failed.\n");
2689                 goto err_wqp_sh_area;
2690         }
2691         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2692         return 0;
2693
2694 err_wqp_sh_area:
2695         pci_free_consistent(qdev->pdev,
2696                             PAGE_SIZE,
2697                             qdev->rx_ring_shadow_reg_area,
2698                             qdev->rx_ring_shadow_reg_dma);
2699         return -ENOMEM;
2700 }
2701
2702 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2703 {
2704         struct tx_ring_desc *tx_ring_desc;
2705         int i;
2706         struct ob_mac_iocb_req *mac_iocb_ptr;
2707
2708         mac_iocb_ptr = tx_ring->wq_base;
2709         tx_ring_desc = tx_ring->q;
2710         for (i = 0; i < tx_ring->wq_len; i++) {
2711                 tx_ring_desc->index = i;
2712                 tx_ring_desc->skb = NULL;
2713                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2714                 mac_iocb_ptr++;
2715                 tx_ring_desc++;
2716         }
2717         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2718         atomic_set(&tx_ring->queue_stopped, 0);
2719 }
2720
2721 static void ql_free_tx_resources(struct ql_adapter *qdev,
2722                                  struct tx_ring *tx_ring)
2723 {
2724         if (tx_ring->wq_base) {
2725                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2726                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2727                 tx_ring->wq_base = NULL;
2728         }
2729         kfree(tx_ring->q);
2730         tx_ring->q = NULL;
2731 }
2732
2733 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2734                                  struct tx_ring *tx_ring)
2735 {
2736         tx_ring->wq_base =
2737             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2738                                  &tx_ring->wq_base_dma);
2739
2740         if ((tx_ring->wq_base == NULL) ||
2741             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2742                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2743                 return -ENOMEM;
2744         }
2745         tx_ring->q =
2746             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2747         if (tx_ring->q == NULL)
2748                 goto err;
2749
2750         return 0;
2751 err:
2752         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2753                             tx_ring->wq_base, tx_ring->wq_base_dma);
2754         return -ENOMEM;