bnx2x: rename MF related fields
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18
19 #include <linux/etherdevice.h>
20 #include <linux/ip.h>
21 #include <linux/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
25
26 #ifdef BCM_VLAN
27 #include <linux/if_vlan.h>
28 #endif
29
30 #include "bnx2x_init.h"
31
32 static int bnx2x_poll(struct napi_struct *napi, int budget);
33
34 /* free skb in the packet ring at pos idx
35  * return idx of last bd freed
36  */
37 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
38                              u16 idx)
39 {
40         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
41         struct eth_tx_start_bd *tx_start_bd;
42         struct eth_tx_bd *tx_data_bd;
43         struct sk_buff *skb = tx_buf->skb;
44         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
45         int nbd;
46
47         /* prefetch skb end pointer to speedup dev_kfree_skb() */
48         prefetch(&skb->end);
49
50         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
51            idx, tx_buf, skb);
52
53         /* unmap first bd */
54         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
55         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
56         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
57                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
58
59         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
60 #ifdef BNX2X_STOP_ON_ERROR
61         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
62                 BNX2X_ERR("BAD nbd!\n");
63                 bnx2x_panic();
64         }
65 #endif
66         new_cons = nbd + tx_buf->first_bd;
67
68         /* Get the next bd */
69         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71         /* Skip a parse bd... */
72         --nbd;
73         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
74
75         /* ...and the TSO split header bd since they have no mapping */
76         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
77                 --nbd;
78                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
79         }
80
81         /* now free frags */
82         while (nbd > 0) {
83
84                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
85                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
86                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
87                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
88                 if (--nbd)
89                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
90         }
91
92         /* release skb */
93         WARN_ON(!skb);
94         dev_kfree_skb(skb);
95         tx_buf->first_bd = 0;
96         tx_buf->skb = NULL;
97
98         return new_cons;
99 }
100
101 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
102 {
103         struct bnx2x *bp = fp->bp;
104         struct netdev_queue *txq;
105         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
106
107 #ifdef BNX2X_STOP_ON_ERROR
108         if (unlikely(bp->panic))
109                 return -1;
110 #endif
111
112         txq = netdev_get_tx_queue(bp->dev, fp->index);
113         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
114         sw_cons = fp->tx_pkt_cons;
115
116         while (sw_cons != hw_cons) {
117                 u16 pkt_cons;
118
119                 pkt_cons = TX_BD(sw_cons);
120
121                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
122
123                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
124                    hw_cons, sw_cons, pkt_cons);
125
126 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
127                         rmb();
128                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
129                 }
130 */
131                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
132                 sw_cons++;
133         }
134
135         fp->tx_pkt_cons = sw_cons;
136         fp->tx_bd_cons = bd_cons;
137
138         /* Need to make the tx_bd_cons update visible to start_xmit()
139          * before checking for netif_tx_queue_stopped().  Without the
140          * memory barrier, there is a small possibility that
141          * start_xmit() will miss it and cause the queue to be stopped
142          * forever.
143          */
144         smp_mb();
145
146         /* TBD need a thresh? */
147         if (unlikely(netif_tx_queue_stopped(txq))) {
148                 /* Taking tx_lock() is needed to prevent reenabling the queue
149                  * while it's empty. This could have happen if rx_action() gets
150                  * suspended in bnx2x_tx_int() after the condition before
151                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
152                  *
153                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
154                  * sends some packets consuming the whole queue again->
155                  * stops the queue
156                  */
157
158                 __netif_tx_lock(txq, smp_processor_id());
159
160                 if ((netif_tx_queue_stopped(txq)) &&
161                     (bp->state == BNX2X_STATE_OPEN) &&
162                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
163                         netif_tx_wake_queue(txq);
164
165                 __netif_tx_unlock(txq);
166         }
167         return 0;
168 }
169
170 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
171                                              u16 idx)
172 {
173         u16 last_max = fp->last_max_sge;
174
175         if (SUB_S16(idx, last_max) > 0)
176                 fp->last_max_sge = idx;
177 }
178
179 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
180                                   struct eth_fast_path_rx_cqe *fp_cqe)
181 {
182         struct bnx2x *bp = fp->bp;
183         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
184                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
185                       SGE_PAGE_SHIFT;
186         u16 last_max, last_elem, first_elem;
187         u16 delta = 0;
188         u16 i;
189
190         if (!sge_len)
191                 return;
192
193         /* First mark all used pages */
194         for (i = 0; i < sge_len; i++)
195                 SGE_MASK_CLEAR_BIT(fp,
196                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
197
198         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
199            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
200
201         /* Here we assume that the last SGE index is the biggest */
202         prefetch((void *)(fp->sge_mask));
203         bnx2x_update_last_max_sge(fp,
204                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
205
206         last_max = RX_SGE(fp->last_max_sge);
207         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
208         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
209
210         /* If ring is not full */
211         if (last_elem + 1 != first_elem)
212                 last_elem++;
213
214         /* Now update the prod */
215         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
216                 if (likely(fp->sge_mask[i]))
217                         break;
218
219                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
220                 delta += RX_SGE_MASK_ELEM_SZ;
221         }
222
223         if (delta > 0) {
224                 fp->rx_sge_prod += delta;
225                 /* clear page-end entries */
226                 bnx2x_clear_sge_mask_next_elems(fp);
227         }
228
229         DP(NETIF_MSG_RX_STATUS,
230            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
231            fp->last_max_sge, fp->rx_sge_prod);
232 }
233
234 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
235                             struct sk_buff *skb, u16 cons, u16 prod)
236 {
237         struct bnx2x *bp = fp->bp;
238         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
239         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
240         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
241         dma_addr_t mapping;
242
243         /* move empty skb from pool to prod and map it */
244         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
245         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
246                                  bp->rx_buf_size, DMA_FROM_DEVICE);
247         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
248
249         /* move partial skb from cons to pool (don't unmap yet) */
250         fp->tpa_pool[queue] = *cons_rx_buf;
251
252         /* mark bin state as start - print error if current state != stop */
253         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
254                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
255
256         fp->tpa_state[queue] = BNX2X_TPA_START;
257
258         /* point prod_bd to new skb */
259         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
260         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
261
262 #ifdef BNX2X_STOP_ON_ERROR
263         fp->tpa_queue_used |= (1 << queue);
264 #ifdef _ASM_GENERIC_INT_L64_H
265         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
266 #else
267         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
268 #endif
269            fp->tpa_queue_used);
270 #endif
271 }
272
273 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
274                                struct sk_buff *skb,
275                                struct eth_fast_path_rx_cqe *fp_cqe,
276                                u16 cqe_idx)
277 {
278         struct sw_rx_page *rx_pg, old_rx_pg;
279         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
280         u32 i, frag_len, frag_size, pages;
281         int err;
282         int j;
283
284         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
285         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
286
287         /* This is needed in order to enable forwarding support */
288         if (frag_size)
289                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
290                                                max(frag_size, (u32)len_on_bd));
291
292 #ifdef BNX2X_STOP_ON_ERROR
293         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
294                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
295                           pages, cqe_idx);
296                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
297                           fp_cqe->pkt_len, len_on_bd);
298                 bnx2x_panic();
299                 return -EINVAL;
300         }
301 #endif
302
303         /* Run through the SGL and compose the fragmented skb */
304         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
305                 u16 sge_idx =
306                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
307
308                 /* FW gives the indices of the SGE as if the ring is an array
309                    (meaning that "next" element will consume 2 indices) */
310                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
311                 rx_pg = &fp->rx_page_ring[sge_idx];
312                 old_rx_pg = *rx_pg;
313
314                 /* If we fail to allocate a substitute page, we simply stop
315                    where we are and drop the whole packet */
316                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
317                 if (unlikely(err)) {
318                         fp->eth_q_stats.rx_skb_alloc_failed++;
319                         return err;
320                 }
321
322                 /* Unmap the page as we r going to pass it to the stack */
323                 dma_unmap_page(&bp->pdev->dev,
324                                dma_unmap_addr(&old_rx_pg, mapping),
325                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
326
327                 /* Add one frag and update the appropriate fields in the skb */
328                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
329
330                 skb->data_len += frag_len;
331                 skb->truesize += frag_len;
332                 skb->len += frag_len;
333
334                 frag_size -= frag_len;
335         }
336
337         return 0;
338 }
339
340 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
341                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
342                            u16 cqe_idx)
343 {
344         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
345         struct sk_buff *skb = rx_buf->skb;
346         /* alloc new skb */
347         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
348
349         /* Unmap skb in the pool anyway, as we are going to change
350            pool entry status to BNX2X_TPA_STOP even if new skb allocation
351            fails. */
352         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
353                          bp->rx_buf_size, DMA_FROM_DEVICE);
354
355         if (likely(new_skb)) {
356                 /* fix ip xsum and give it to the stack */
357                 /* (no need to map the new skb) */
358 #ifdef BCM_VLAN
359                 int is_vlan_cqe =
360                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
361                          PARSING_FLAGS_VLAN);
362                 int is_not_hwaccel_vlan_cqe =
363                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
364 #endif
365
366                 prefetch(skb);
367                 prefetch(((char *)(skb)) + 128);
368
369 #ifdef BNX2X_STOP_ON_ERROR
370                 if (pad + len > bp->rx_buf_size) {
371                         BNX2X_ERR("skb_put is about to fail...  "
372                                   "pad %d  len %d  rx_buf_size %d\n",
373                                   pad, len, bp->rx_buf_size);
374                         bnx2x_panic();
375                         return;
376                 }
377 #endif
378
379                 skb_reserve(skb, pad);
380                 skb_put(skb, len);
381
382                 skb->protocol = eth_type_trans(skb, bp->dev);
383                 skb->ip_summed = CHECKSUM_UNNECESSARY;
384
385                 {
386                         struct iphdr *iph;
387
388                         iph = (struct iphdr *)skb->data;
389 #ifdef BCM_VLAN
390                         /* If there is no Rx VLAN offloading -
391                            take VLAN tag into an account */
392                         if (unlikely(is_not_hwaccel_vlan_cqe))
393                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
394 #endif
395                         iph->check = 0;
396                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
397                 }
398
399                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
400                                          &cqe->fast_path_cqe, cqe_idx)) {
401 #ifdef BCM_VLAN
402                         if ((bp->vlgrp != NULL) &&
403                                 (le16_to_cpu(cqe->fast_path_cqe.
404                                 pars_flags.flags) & PARSING_FLAGS_VLAN))
405                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
406                                                  le16_to_cpu(cqe->fast_path_cqe.
407                                                              vlan_tag), skb);
408                         else
409 #endif
410                                 napi_gro_receive(&fp->napi, skb);
411                 } else {
412                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
413                            " - dropping packet!\n");
414                         dev_kfree_skb(skb);
415                 }
416
417
418                 /* put new skb in bin */
419                 fp->tpa_pool[queue].skb = new_skb;
420
421         } else {
422                 /* else drop the packet and keep the buffer in the bin */
423                 DP(NETIF_MSG_RX_STATUS,
424                    "Failed to allocate new skb - dropping packet!\n");
425                 fp->eth_q_stats.rx_skb_alloc_failed++;
426         }
427
428         fp->tpa_state[queue] = BNX2X_TPA_STOP;
429 }
430
431 /* Set Toeplitz hash value in the skb using the value from the
432  * CQE (calculated by HW).
433  */
434 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
435                                         struct sk_buff *skb)
436 {
437         /* Set Toeplitz hash from CQE */
438         if ((bp->dev->features & NETIF_F_RXHASH) &&
439             (cqe->fast_path_cqe.status_flags &
440              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
441                 skb->rxhash =
442                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
443 }
444
445 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
446 {
447         struct bnx2x *bp = fp->bp;
448         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
449         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
450         int rx_pkt = 0;
451
452 #ifdef BNX2X_STOP_ON_ERROR
453         if (unlikely(bp->panic))
454                 return 0;
455 #endif
456
457         /* CQ "next element" is of the size of the regular element,
458            that's why it's ok here */
459         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
460         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
461                 hw_comp_cons++;
462
463         bd_cons = fp->rx_bd_cons;
464         bd_prod = fp->rx_bd_prod;
465         bd_prod_fw = bd_prod;
466         sw_comp_cons = fp->rx_comp_cons;
467         sw_comp_prod = fp->rx_comp_prod;
468
469         /* Memory barrier necessary as speculative reads of the rx
470          * buffer can be ahead of the index in the status block
471          */
472         rmb();
473
474         DP(NETIF_MSG_RX_STATUS,
475            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
476            fp->index, hw_comp_cons, sw_comp_cons);
477
478         while (sw_comp_cons != hw_comp_cons) {
479                 struct sw_rx_bd *rx_buf = NULL;
480                 struct sk_buff *skb;
481                 union eth_rx_cqe *cqe;
482                 u8 cqe_fp_flags;
483                 u16 len, pad;
484
485                 comp_ring_cons = RCQ_BD(sw_comp_cons);
486                 bd_prod = RX_BD(bd_prod);
487                 bd_cons = RX_BD(bd_cons);
488
489                 /* Prefetch the page containing the BD descriptor
490                    at producer's index. It will be needed when new skb is
491                    allocated */
492                 prefetch((void *)(PAGE_ALIGN((unsigned long)
493                                              (&fp->rx_desc_ring[bd_prod])) -
494                                   PAGE_SIZE + 1));
495
496                 cqe = &fp->rx_comp_ring[comp_ring_cons];
497                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
498
499                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
500                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
501                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
502                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
503                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
504                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
505
506                 /* is this a slowpath msg? */
507                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
508                         bnx2x_sp_event(fp, cqe);
509                         goto next_cqe;
510
511                 /* this is an rx packet */
512                 } else {
513                         rx_buf = &fp->rx_buf_ring[bd_cons];
514                         skb = rx_buf->skb;
515                         prefetch(skb);
516                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
517                         pad = cqe->fast_path_cqe.placement_offset;
518
519                         /* If CQE is marked both TPA_START and TPA_END
520                            it is a non-TPA CQE */
521                         if ((!fp->disable_tpa) &&
522                             (TPA_TYPE(cqe_fp_flags) !=
523                                         (TPA_TYPE_START | TPA_TYPE_END))) {
524                                 u16 queue = cqe->fast_path_cqe.queue_index;
525
526                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
527                                         DP(NETIF_MSG_RX_STATUS,
528                                            "calling tpa_start on queue %d\n",
529                                            queue);
530
531                                         bnx2x_tpa_start(fp, queue, skb,
532                                                         bd_cons, bd_prod);
533
534                                         /* Set Toeplitz hash for an LRO skb */
535                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
536
537                                         goto next_rx;
538                                 }
539
540                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
541                                         DP(NETIF_MSG_RX_STATUS,
542                                            "calling tpa_stop on queue %d\n",
543                                            queue);
544
545                                         if (!BNX2X_RX_SUM_FIX(cqe))
546                                                 BNX2X_ERR("STOP on none TCP "
547                                                           "data\n");
548
549                                         /* This is a size of the linear data
550                                            on this skb */
551                                         len = le16_to_cpu(cqe->fast_path_cqe.
552                                                                 len_on_bd);
553                                         bnx2x_tpa_stop(bp, fp, queue, pad,
554                                                     len, cqe, comp_ring_cons);
555 #ifdef BNX2X_STOP_ON_ERROR
556                                         if (bp->panic)
557                                                 return 0;
558 #endif
559
560                                         bnx2x_update_sge_prod(fp,
561                                                         &cqe->fast_path_cqe);
562                                         goto next_cqe;
563                                 }
564                         }
565
566                         dma_sync_single_for_device(&bp->pdev->dev,
567                                         dma_unmap_addr(rx_buf, mapping),
568                                                    pad + RX_COPY_THRESH,
569                                                    DMA_FROM_DEVICE);
570                         prefetch(((char *)(skb)) + 128);
571
572                         /* is this an error packet? */
573                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
574                                 DP(NETIF_MSG_RX_ERR,
575                                    "ERROR  flags %x  rx packet %u\n",
576                                    cqe_fp_flags, sw_comp_cons);
577                                 fp->eth_q_stats.rx_err_discard_pkt++;
578                                 goto reuse_rx;
579                         }
580
581                         /* Since we don't have a jumbo ring
582                          * copy small packets if mtu > 1500
583                          */
584                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
585                             (len <= RX_COPY_THRESH)) {
586                                 struct sk_buff *new_skb;
587
588                                 new_skb = netdev_alloc_skb(bp->dev,
589                                                            len + pad);
590                                 if (new_skb == NULL) {
591                                         DP(NETIF_MSG_RX_ERR,
592                                            "ERROR  packet dropped "
593                                            "because of alloc failure\n");
594                                         fp->eth_q_stats.rx_skb_alloc_failed++;
595                                         goto reuse_rx;
596                                 }
597
598                                 /* aligned copy */
599                                 skb_copy_from_linear_data_offset(skb, pad,
600                                                     new_skb->data + pad, len);
601                                 skb_reserve(new_skb, pad);
602                                 skb_put(new_skb, len);
603
604                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
605
606                                 skb = new_skb;
607
608                         } else
609                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
610                                 dma_unmap_single(&bp->pdev->dev,
611                                         dma_unmap_addr(rx_buf, mapping),
612                                                  bp->rx_buf_size,
613                                                  DMA_FROM_DEVICE);
614                                 skb_reserve(skb, pad);
615                                 skb_put(skb, len);
616
617                         } else {
618                                 DP(NETIF_MSG_RX_ERR,
619                                    "ERROR  packet dropped because "
620                                    "of alloc failure\n");
621                                 fp->eth_q_stats.rx_skb_alloc_failed++;
622 reuse_rx:
623                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
624                                 goto next_rx;
625                         }
626
627                         skb->protocol = eth_type_trans(skb, bp->dev);
628
629                         /* Set Toeplitz hash for a none-LRO skb */
630                         bnx2x_set_skb_rxhash(bp, cqe, skb);
631
632                         skb_checksum_none_assert(skb);
633                         if (bp->rx_csum) {
634                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
635                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
636                                 else
637                                         fp->eth_q_stats.hw_csum_err++;
638                         }
639                 }
640
641                 skb_record_rx_queue(skb, fp->index);
642
643 #ifdef BCM_VLAN
644                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
645                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
646                      PARSING_FLAGS_VLAN))
647                         vlan_gro_receive(&fp->napi, bp->vlgrp,
648                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
649                 else
650 #endif
651                         napi_gro_receive(&fp->napi, skb);
652
653
654 next_rx:
655                 rx_buf->skb = NULL;
656
657                 bd_cons = NEXT_RX_IDX(bd_cons);
658                 bd_prod = NEXT_RX_IDX(bd_prod);
659                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
660                 rx_pkt++;
661 next_cqe:
662                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
663                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
664
665                 if (rx_pkt == budget)
666                         break;
667         } /* while */
668
669         fp->rx_bd_cons = bd_cons;
670         fp->rx_bd_prod = bd_prod_fw;
671         fp->rx_comp_cons = sw_comp_cons;
672         fp->rx_comp_prod = sw_comp_prod;
673
674         /* Update producers */
675         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
676                              fp->rx_sge_prod);
677
678         fp->rx_pkt += rx_pkt;
679         fp->rx_calls++;
680
681         return rx_pkt;
682 }
683
684 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
685 {
686         struct bnx2x_fastpath *fp = fp_cookie;
687         struct bnx2x *bp = fp->bp;
688
689         /* Return here if interrupt is disabled */
690         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
691                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
692                 return IRQ_HANDLED;
693         }
694
695         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
696                          "[fp %d fw_sd %d igusb %d]\n",
697            fp->index, fp->fw_sb_id, fp->igu_sb_id);
698         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
699
700 #ifdef BNX2X_STOP_ON_ERROR
701         if (unlikely(bp->panic))
702                 return IRQ_HANDLED;
703 #endif
704
705         /* Handle Rx and Tx according to MSI-X vector */
706         prefetch(fp->rx_cons_sb);
707         prefetch(fp->tx_cons_sb);
708         prefetch(&fp->sb_running_index[SM_RX_ID]);
709         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
710
711         return IRQ_HANDLED;
712 }
713
714
715 /* HW Lock for shared dual port PHYs */
716 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
717 {
718         mutex_lock(&bp->port.phy_mutex);
719
720         if (bp->port.need_hw_lock)
721                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
722 }
723
724 void bnx2x_release_phy_lock(struct bnx2x *bp)
725 {
726         if (bp->port.need_hw_lock)
727                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
728
729         mutex_unlock(&bp->port.phy_mutex);
730 }
731
732 void bnx2x_link_report(struct bnx2x *bp)
733 {
734         if (bp->flags & MF_FUNC_DIS) {
735                 netif_carrier_off(bp->dev);
736                 netdev_err(bp->dev, "NIC Link is Down\n");
737                 return;
738         }
739
740         if (bp->link_vars.link_up) {
741                 u16 line_speed;
742
743                 if (bp->state == BNX2X_STATE_OPEN)
744                         netif_carrier_on(bp->dev);
745                 netdev_info(bp->dev, "NIC Link is Up, ");
746
747                 line_speed = bp->link_vars.line_speed;
748                 if (IS_MF(bp)) {
749                         u16 vn_max_rate;
750
751                         vn_max_rate =
752                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
753                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
754                         if (vn_max_rate < line_speed)
755                                 line_speed = vn_max_rate;
756                 }
757                 pr_cont("%d Mbps ", line_speed);
758
759                 if (bp->link_vars.duplex == DUPLEX_FULL)
760                         pr_cont("full duplex");
761                 else
762                         pr_cont("half duplex");
763
764                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
765                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
766                                 pr_cont(", receive ");
767                                 if (bp->link_vars.flow_ctrl &
768                                     BNX2X_FLOW_CTRL_TX)
769                                         pr_cont("& transmit ");
770                         } else {
771                                 pr_cont(", transmit ");
772                         }
773                         pr_cont("flow control ON");
774                 }
775                 pr_cont("\n");
776
777         } else { /* link_down */
778                 netif_carrier_off(bp->dev);
779                 netdev_err(bp->dev, "NIC Link is Down\n");
780         }
781 }
782
783 /* Returns the number of actually allocated BDs */
784 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
785                                       int rx_ring_size)
786 {
787         struct bnx2x *bp = fp->bp;
788         u16 ring_prod, cqe_ring_prod;
789         int i;
790
791         fp->rx_comp_cons = 0;
792         cqe_ring_prod = ring_prod = 0;
793         for (i = 0; i < rx_ring_size; i++) {
794                 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
795                         BNX2X_ERR("was only able to allocate "
796                                   "%d rx skbs on queue[%d]\n", i, fp->index);
797                         fp->eth_q_stats.rx_skb_alloc_failed++;
798                         break;
799                 }
800                 ring_prod = NEXT_RX_IDX(ring_prod);
801                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
802                 WARN_ON(ring_prod <= i);
803         }
804
805         fp->rx_bd_prod = ring_prod;
806         /* Limit the CQE producer by the CQE ring size */
807         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
808                                cqe_ring_prod);
809         fp->rx_pkt = fp->rx_calls = 0;
810
811         return i;
812 }
813
814 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
815 {
816         struct bnx2x *bp = fp->bp;
817         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
818                                               MAX_RX_AVAIL/bp->num_queues;
819
820         rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
821
822         bnx2x_alloc_rx_bds(fp, rx_ring_size);
823
824         /* Warning!
825          * this will generate an interrupt (to the TSTORM)
826          * must only be done after chip is initialized
827          */
828         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
829                              fp->rx_sge_prod);
830 }
831
832 void bnx2x_init_rx_rings(struct bnx2x *bp)
833 {
834         int func = BP_FUNC(bp);
835         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
836                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
837         u16 ring_prod;
838         int i, j;
839
840         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
841                 BNX2X_FW_IP_HDR_ALIGN_PAD;
842
843         DP(NETIF_MSG_IFUP,
844            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
845
846         for_each_queue(bp, j) {
847                 struct bnx2x_fastpath *fp = &bp->fp[j];
848
849                 if (!fp->disable_tpa) {
850                         for (i = 0; i < max_agg_queues; i++) {
851                                 fp->tpa_pool[i].skb =
852                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
853                                 if (!fp->tpa_pool[i].skb) {
854                                         BNX2X_ERR("Failed to allocate TPA "
855                                                   "skb pool for queue[%d] - "
856                                                   "disabling TPA on this "
857                                                   "queue!\n", j);
858                                         bnx2x_free_tpa_pool(bp, fp, i);
859                                         fp->disable_tpa = 1;
860                                         break;
861                                 }
862                                 dma_unmap_addr_set((struct sw_rx_bd *)
863                                                         &bp->fp->tpa_pool[i],
864                                                    mapping, 0);
865                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
866                         }
867
868                         /* "next page" elements initialization */
869                         bnx2x_set_next_page_sgl(fp);
870
871                         /* set SGEs bit mask */
872                         bnx2x_init_sge_ring_bit_mask(fp);
873
874                         /* Allocate SGEs and initialize the ring elements */
875                         for (i = 0, ring_prod = 0;
876                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
877
878                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
879                                         BNX2X_ERR("was only able to allocate "
880                                                   "%d rx sges\n", i);
881                                         BNX2X_ERR("disabling TPA for"
882                                                   " queue[%d]\n", j);
883                                         /* Cleanup already allocated elements */
884                                         bnx2x_free_rx_sge_range(bp,
885                                                                 fp, ring_prod);
886                                         bnx2x_free_tpa_pool(bp,
887                                                             fp, max_agg_queues);
888                                         fp->disable_tpa = 1;
889                                         ring_prod = 0;
890                                         break;
891                                 }
892                                 ring_prod = NEXT_SGE_IDX(ring_prod);
893                         }
894
895                         fp->rx_sge_prod = ring_prod;
896                 }
897         }
898
899         for_each_queue(bp, j) {
900                 struct bnx2x_fastpath *fp = &bp->fp[j];
901
902                 fp->rx_bd_cons = 0;
903
904                 bnx2x_set_next_page_rx_bd(fp);
905
906                 /* CQ ring */
907                 bnx2x_set_next_page_rx_cq(fp);
908
909                 /* Allocate BDs and initialize BD ring */
910                 bnx2x_alloc_rx_bd_ring(fp);
911
912                 if (j != 0)
913                         continue;
914
915                 REG_WR(bp, BAR_USTRORM_INTMEM +
916                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
917                        U64_LO(fp->rx_comp_mapping));
918                 REG_WR(bp, BAR_USTRORM_INTMEM +
919                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
920                        U64_HI(fp->rx_comp_mapping));
921         }
922
923 }
924 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
925 {
926         int i;
927
928         for_each_queue(bp, i) {
929                 struct bnx2x_fastpath *fp = &bp->fp[i];
930
931                 u16 bd_cons = fp->tx_bd_cons;
932                 u16 sw_prod = fp->tx_pkt_prod;
933                 u16 sw_cons = fp->tx_pkt_cons;
934
935                 while (sw_cons != sw_prod) {
936                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
937                         sw_cons++;
938                 }
939         }
940 }
941
942 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
943 {
944         int i, j;
945
946         for_each_queue(bp, j) {
947                 struct bnx2x_fastpath *fp = &bp->fp[j];
948
949                 for (i = 0; i < NUM_RX_BD; i++) {
950                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
951                         struct sk_buff *skb = rx_buf->skb;
952
953                         if (skb == NULL)
954                                 continue;
955
956                         dma_unmap_single(&bp->pdev->dev,
957                                          dma_unmap_addr(rx_buf, mapping),
958                                          bp->rx_buf_size, DMA_FROM_DEVICE);
959
960                         rx_buf->skb = NULL;
961                         dev_kfree_skb(skb);
962                 }
963                 if (!fp->disable_tpa)
964                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
965                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
966                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
967         }
968 }
969
970 void bnx2x_free_skbs(struct bnx2x *bp)
971 {
972         bnx2x_free_tx_skbs(bp);
973         bnx2x_free_rx_skbs(bp);
974 }
975
976 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
977 {
978         int i, offset = 1;
979
980         free_irq(bp->msix_table[0].vector, bp->dev);
981         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
982            bp->msix_table[0].vector);
983
984 #ifdef BCM_CNIC
985         offset++;
986 #endif
987         for_each_queue(bp, i) {
988                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
989                    "state %x\n", i, bp->msix_table[i + offset].vector,
990                    bnx2x_fp(bp, i, state));
991
992                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
993         }
994 }
995
996 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
997 {
998         if (bp->flags & USING_MSIX_FLAG) {
999                 if (!disable_only)
1000                         bnx2x_free_msix_irqs(bp);
1001                 pci_disable_msix(bp->pdev);
1002                 bp->flags &= ~USING_MSIX_FLAG;
1003
1004         } else if (bp->flags & USING_MSI_FLAG) {
1005                 if (!disable_only)
1006                         free_irq(bp->pdev->irq, bp->dev);
1007                 pci_disable_msi(bp->pdev);
1008                 bp->flags &= ~USING_MSI_FLAG;
1009
1010         } else if (!disable_only)
1011                 free_irq(bp->pdev->irq, bp->dev);
1012 }
1013
1014 static int bnx2x_enable_msix(struct bnx2x *bp)
1015 {
1016         int i, rc, offset = 1;
1017         int igu_vec = 0;
1018
1019         bp->msix_table[0].entry = igu_vec;
1020         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1021
1022 #ifdef BCM_CNIC
1023         igu_vec = BP_L_ID(bp) + offset;
1024         bp->msix_table[1].entry = igu_vec;
1025         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1026         offset++;
1027 #endif
1028         for_each_queue(bp, i) {
1029                 igu_vec = BP_L_ID(bp) + offset + i;
1030                 bp->msix_table[i + offset].entry = igu_vec;
1031                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1032                    "(fastpath #%u)\n", i + offset, igu_vec, i);
1033         }
1034
1035         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1036                              BNX2X_NUM_QUEUES(bp) + offset);
1037
1038         /*
1039          * reconfigure number of tx/rx queues according to available
1040          * MSI-X vectors
1041          */
1042         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1043                 /* vectors available for FP */
1044                 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1045
1046                 DP(NETIF_MSG_IFUP,
1047                    "Trying to use less MSI-X vectors: %d\n", rc);
1048
1049                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1050
1051                 if (rc) {
1052                         DP(NETIF_MSG_IFUP,
1053                            "MSI-X is not attainable  rc %d\n", rc);
1054                         return rc;
1055                 }
1056
1057                 bp->num_queues = min(bp->num_queues, fp_vec);
1058
1059                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1060                                   bp->num_queues);
1061         } else if (rc) {
1062                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1063                 return rc;
1064         }
1065
1066         bp->flags |= USING_MSIX_FLAG;
1067
1068         return 0;
1069 }
1070
1071 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1072 {
1073         int i, rc, offset = 1;
1074
1075         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1076                          bp->dev->name, bp->dev);
1077         if (rc) {
1078                 BNX2X_ERR("request sp irq failed\n");
1079                 return -EBUSY;
1080         }
1081
1082 #ifdef BCM_CNIC
1083         offset++;
1084 #endif
1085         for_each_queue(bp, i) {
1086                 struct bnx2x_fastpath *fp = &bp->fp[i];
1087                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1088                          bp->dev->name, i);
1089
1090                 rc = request_irq(bp->msix_table[i + offset].vector,
1091                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1092                 if (rc) {
1093                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
1094                         bnx2x_free_msix_irqs(bp);
1095                         return -EBUSY;
1096                 }
1097
1098                 fp->state = BNX2X_FP_STATE_IRQ;
1099         }
1100
1101         i = BNX2X_NUM_QUEUES(bp);
1102         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1103                " ... fp[%d] %d\n",
1104                bp->msix_table[0].vector,
1105                0, bp->msix_table[offset].vector,
1106                i - 1, bp->msix_table[offset + i - 1].vector);
1107
1108         return 0;
1109 }
1110
1111 static int bnx2x_enable_msi(struct bnx2x *bp)
1112 {
1113         int rc;
1114
1115         rc = pci_enable_msi(bp->pdev);
1116         if (rc) {
1117                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1118                 return -1;
1119         }
1120         bp->flags |= USING_MSI_FLAG;
1121
1122         return 0;
1123 }
1124
1125 static int bnx2x_req_irq(struct bnx2x *bp)
1126 {
1127         unsigned long flags;
1128         int rc;
1129
1130         if (bp->flags & USING_MSI_FLAG)
1131                 flags = 0;
1132         else
1133                 flags = IRQF_SHARED;
1134
1135         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1136                          bp->dev->name, bp->dev);
1137         if (!rc)
1138                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1139
1140         return rc;
1141 }
1142
1143 static void bnx2x_napi_enable(struct bnx2x *bp)
1144 {
1145         int i;
1146
1147         for_each_queue(bp, i)
1148                 napi_enable(&bnx2x_fp(bp, i, napi));
1149 }
1150
1151 static void bnx2x_napi_disable(struct bnx2x *bp)
1152 {
1153         int i;
1154
1155         for_each_queue(bp, i)
1156                 napi_disable(&bnx2x_fp(bp, i, napi));
1157 }
1158
1159 void bnx2x_netif_start(struct bnx2x *bp)
1160 {
1161         int intr_sem;
1162
1163         intr_sem = atomic_dec_and_test(&bp->intr_sem);
1164         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1165
1166         if (intr_sem) {
1167                 if (netif_running(bp->dev)) {
1168                         bnx2x_napi_enable(bp);
1169                         bnx2x_int_enable(bp);
1170                         if (bp->state == BNX2X_STATE_OPEN)
1171                                 netif_tx_wake_all_queues(bp->dev);
1172                 }
1173         }
1174 }
1175
1176 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1177 {
1178         bnx2x_int_disable_sync(bp, disable_hw);
1179         bnx2x_napi_disable(bp);
1180         netif_tx_disable(bp->dev);
1181 }
1182 static int bnx2x_set_num_queues(struct bnx2x *bp)
1183 {
1184         int rc = 0;
1185
1186         switch (bp->int_mode) {
1187         case INT_MODE_MSI:
1188                 bnx2x_enable_msi(bp);
1189                 /* falling through... */
1190         case INT_MODE_INTx:
1191                 bp->num_queues = 1;
1192                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1193                 break;
1194         default:
1195                 /* Set number of queues according to bp->multi_mode value */
1196                 bnx2x_set_num_queues_msix(bp);
1197
1198                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1199                    bp->num_queues);
1200
1201                 /* if we can't use MSI-X we only need one fp,
1202                  * so try to enable MSI-X with the requested number of fp's
1203                  * and fallback to MSI or legacy INTx with one fp
1204                  */
1205                 rc = bnx2x_enable_msix(bp);
1206                 if (rc) {
1207                         /* failed to enable MSI-X */
1208                         bp->num_queues = 1;
1209
1210                         /* Fall to INTx if failed to enable MSI-X due to lack of
1211                          * memory (in bnx2x_set_num_queues()) */
1212                         if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1213                                 bnx2x_enable_msi(bp);
1214                 }
1215
1216                 break;
1217         }
1218         netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1219         return netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1220 }
1221
1222 static void bnx2x_release_firmware(struct bnx2x *bp)
1223 {
1224         kfree(bp->init_ops_offsets);
1225         kfree(bp->init_ops);
1226         kfree(bp->init_data);
1227         release_firmware(bp->firmware);
1228 }
1229
1230 /* must be called with rtnl_lock */
1231 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1232 {
1233         u32 load_code;
1234         int i, rc;
1235
1236         /* Set init arrays */
1237         rc = bnx2x_init_firmware(bp);
1238         if (rc) {
1239                 BNX2X_ERR("Error loading firmware\n");
1240                 return rc;
1241         }
1242
1243 #ifdef BNX2X_STOP_ON_ERROR
1244         if (unlikely(bp->panic))
1245                 return -EPERM;
1246 #endif
1247
1248         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1249
1250         rc = bnx2x_set_num_queues(bp);
1251         if (rc)
1252                 return rc;
1253
1254         /* must be called before memory allocation and HW init */
1255         bnx2x_ilt_set_info(bp);
1256
1257         if (bnx2x_alloc_mem(bp)) {
1258                 bnx2x_free_irq(bp, true);
1259                 return -ENOMEM;
1260         }
1261
1262         for_each_queue(bp, i)
1263                 bnx2x_fp(bp, i, disable_tpa) =
1264                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1265
1266         for_each_queue(bp, i)
1267                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1268                                bnx2x_poll, 128);
1269
1270         bnx2x_napi_enable(bp);
1271
1272         if (bp->flags & USING_MSIX_FLAG) {
1273                 rc = bnx2x_req_msix_irqs(bp);
1274                 if (rc) {
1275                         bnx2x_free_irq(bp, true);
1276                         goto load_error1;
1277                 }
1278         } else {
1279                 bnx2x_ack_int(bp);
1280                 rc = bnx2x_req_irq(bp);
1281                 if (rc) {
1282                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1283                         bnx2x_free_irq(bp, true);
1284                         goto load_error1;
1285                 }
1286                 if (bp->flags & USING_MSI_FLAG) {
1287                         bp->dev->irq = bp->pdev->irq;
1288                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
1289                                     bp->pdev->irq);
1290                 }
1291         }
1292
1293         /* Send LOAD_REQUEST command to MCP
1294            Returns the type of LOAD command:
1295            if it is the first port to be initialized
1296            common blocks should be initialized, otherwise - not
1297         */
1298         if (!BP_NOMCP(bp)) {
1299                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1300                 if (!load_code) {
1301                         BNX2X_ERR("MCP response failure, aborting\n");
1302                         rc = -EBUSY;
1303                         goto load_error2;
1304                 }
1305                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1306                         rc = -EBUSY; /* other port in diagnostic mode */
1307                         goto load_error2;
1308                 }
1309
1310         } else {
1311                 int port = BP_PORT(bp);
1312
1313                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
1314                    load_count[0], load_count[1], load_count[2]);
1315                 load_count[0]++;
1316                 load_count[1 + port]++;
1317                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
1318                    load_count[0], load_count[1], load_count[2]);
1319                 if (load_count[0] == 1)
1320                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1321                 else if (load_count[1 + port] == 1)
1322                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1323                 else
1324                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1325         }
1326
1327         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1328             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1329                 bp->port.pmf = 1;
1330         else
1331                 bp->port.pmf = 0;
1332         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1333
1334         /* Initialize HW */
1335         rc = bnx2x_init_hw(bp, load_code);
1336         if (rc) {
1337                 BNX2X_ERR("HW init failed, aborting\n");
1338                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1339                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1340                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1341                 goto load_error2;
1342         }
1343
1344         if (rc) {
1345                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1346                 goto load_error2;
1347         }
1348
1349         /* Setup NIC internals and enable interrupts */
1350         bnx2x_nic_init(bp, load_code);
1351
1352         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1353             (bp->common.shmem2_base))
1354                 SHMEM2_WR(bp, dcc_support,
1355                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1356                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1357
1358         /* Send LOAD_DONE command to MCP */
1359         if (!BP_NOMCP(bp)) {
1360                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1361                 if (!load_code) {
1362                         BNX2X_ERR("MCP response failure, aborting\n");
1363                         rc = -EBUSY;
1364                         goto load_error3;
1365                 }
1366         }
1367
1368         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1369
1370         rc = bnx2x_func_start(bp);
1371         if (rc) {
1372                 BNX2X_ERR("Function start failed!\n");
1373 #ifndef BNX2X_STOP_ON_ERROR
1374                 goto load_error3;
1375 #else
1376                 bp->panic = 1;
1377                 return -EBUSY;
1378 #endif
1379         }
1380
1381         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1382         if (rc) {
1383                 BNX2X_ERR("Setup leading failed!\n");
1384 #ifndef BNX2X_STOP_ON_ERROR
1385                 goto load_error3;
1386 #else
1387                 bp->panic = 1;
1388                 return -EBUSY;
1389 #endif
1390         }
1391
1392         if (CHIP_IS_E1H(bp))
1393                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1394                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1395                         bp->flags |= MF_FUNC_DIS;
1396                 }
1397
1398 #ifdef BCM_CNIC
1399         /* Enable Timer scan */
1400         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1401 #endif
1402         for_each_nondefault_queue(bp, i) {
1403                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1404                 if (rc)
1405 #ifdef BCM_CNIC
1406                         goto load_error4;
1407 #else
1408                         goto load_error3;
1409 #endif
1410         }
1411
1412         /* Now when Clients are configured we are ready to work */
1413         bp->state = BNX2X_STATE_OPEN;
1414
1415         bnx2x_set_eth_mac(bp, 1);
1416
1417 #ifdef BCM_CNIC
1418         /* Set iSCSI L2 MAC */
1419         mutex_lock(&bp->cnic_mutex);
1420         if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1421                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1422                 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1423                 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
1424                               BNX2X_VF_ID_INVALID, false,
1425                               CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
1426         }
1427         mutex_unlock(&bp->cnic_mutex);
1428 #endif
1429
1430         if (bp->port.pmf)
1431                 bnx2x_initial_phy_init(bp, load_mode);
1432
1433         /* Start fast path */
1434         switch (load_mode) {
1435         case LOAD_NORMAL:
1436                 /* Tx queue should be only reenabled */
1437                 netif_tx_wake_all_queues(bp->dev);
1438                 /* Initialize the receive filter. */
1439                 bnx2x_set_rx_mode(bp->dev);
1440                 break;
1441
1442         case LOAD_OPEN:
1443                 netif_tx_start_all_queues(bp->dev);
1444                 smp_mb__after_clear_bit();
1445                 /* Initialize the receive filter. */
1446                 bnx2x_set_rx_mode(bp->dev);
1447                 break;
1448
1449         case LOAD_DIAG:
1450                 /* Initialize the receive filter. */
1451                 bnx2x_set_rx_mode(bp->dev);
1452                 bp->state = BNX2X_STATE_DIAG;
1453                 break;
1454
1455         default:
1456                 break;
1457         }
1458
1459         if (!bp->port.pmf)
1460                 bnx2x__link_status_update(bp);
1461
1462         /* start the timer */
1463         mod_timer(&bp->timer, jiffies + bp->current_interval);
1464
1465 #ifdef BCM_CNIC
1466         bnx2x_setup_cnic_irq_info(bp);
1467         if (bp->state == BNX2X_STATE_OPEN)
1468                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1469 #endif
1470         bnx2x_inc_load_cnt(bp);
1471
1472         bnx2x_release_firmware(bp);
1473
1474         return 0;
1475
1476 #ifdef BCM_CNIC
1477 load_error4:
1478         /* Disable Timer scan */
1479         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1480 #endif
1481 load_error3:
1482         bnx2x_int_disable_sync(bp, 1);
1483         if (!BP_NOMCP(bp)) {
1484                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1485                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1486         }
1487         bp->port.pmf = 0;
1488         /* Free SKBs, SGEs, TPA pool and driver internals */
1489         bnx2x_free_skbs(bp);
1490         for_each_queue(bp, i)
1491                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1492 load_error2:
1493         /* Release IRQs */
1494         bnx2x_free_irq(bp, false);
1495 load_error1:
1496         bnx2x_napi_disable(bp);
1497         for_each_queue(bp, i)
1498                 netif_napi_del(&bnx2x_fp(bp, i, napi));
1499         bnx2x_free_mem(bp);
1500
1501         bnx2x_release_firmware(bp);
1502
1503         return rc;
1504 }
1505
1506 /* must be called with rtnl_lock */
1507 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1508 {
1509         int i;
1510
1511         if (bp->state == BNX2X_STATE_CLOSED) {
1512                 /* Interface has been removed - nothing to recover */
1513                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1514                 bp->is_leader = 0;
1515                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1516                 smp_wmb();
1517
1518                 return -EINVAL;
1519         }
1520
1521 #ifdef BCM_CNIC
1522         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1523 #endif
1524         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1525
1526         /* Set "drop all" */
1527         bp->rx_mode = BNX2X_RX_MODE_NONE;
1528         bnx2x_set_storm_rx_mode(bp);
1529
1530         del_timer_sync(&bp->timer);
1531         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1532                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1533         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1534
1535
1536         /* Cleanup the chip if needed */
1537         if (unload_mode != UNLOAD_RECOVERY)
1538                 bnx2x_chip_cleanup(bp, unload_mode);
1539         else {
1540                 /* Disable HW interrupts, NAPI and Tx */
1541                 bnx2x_netif_stop(bp, 1);
1542
1543                 /* Release IRQs */
1544                 bnx2x_free_irq(bp, false);
1545         }
1546
1547         bp->port.pmf = 0;
1548
1549         /* Free SKBs, SGEs, TPA pool and driver internals */
1550         bnx2x_free_skbs(bp);
1551         for_each_queue(bp, i)
1552                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1553         for_each_queue(bp, i)
1554                 netif_napi_del(&bnx2x_fp(bp, i, napi));
1555         bnx2x_free_mem(bp);
1556
1557         bp->state = BNX2X_STATE_CLOSED;
1558
1559         /* The last driver must disable a "close the gate" if there is no
1560          * parity attention or "process kill" pending.
1561          */
1562         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1563             bnx2x_reset_is_done(bp))
1564                 bnx2x_disable_close_the_gate(bp);
1565
1566         /* Reset MCP mail box sequence if there is on going recovery */
1567         if (unload_mode == UNLOAD_RECOVERY)
1568                 bp->fw_seq = 0;
1569
1570         return 0;
1571 }
1572 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1573 {
1574         u16 pmcsr;
1575
1576         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1577
1578         switch (state) {
1579         case PCI_D0:
1580                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1581                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1582                                        PCI_PM_CTRL_PME_STATUS));
1583
1584                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1585                         /* delay required during transition out of D3hot */
1586                         msleep(20);
1587                 break;
1588
1589         case PCI_D3hot:
1590                 /* If there are other clients above don't
1591                    shut down the power */
1592                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1593                         return 0;
1594                 /* Don't shut down the power for emulation and FPGA */
1595                 if (CHIP_REV_IS_SLOW(bp))
1596                         return 0;
1597
1598                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1599                 pmcsr |= 3;
1600
1601                 if (bp->wol)
1602                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1603
1604                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1605                                       pmcsr);
1606
1607                 /* No more memory access after this point until
1608                 * device is brought back to D0.
1609                 */
1610                 break;
1611
1612         default:
1613                 return -EINVAL;
1614         }
1615         return 0;
1616 }
1617
1618
1619
1620 /*
1621  * net_device service functions
1622  */
1623
1624 static int bnx2x_poll(struct napi_struct *napi, int budget)
1625 {
1626         int work_done = 0;
1627         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1628                                                  napi);
1629         struct bnx2x *bp = fp->bp;
1630
1631         while (1) {
1632 #ifdef BNX2X_STOP_ON_ERROR
1633                 if (unlikely(bp->panic)) {
1634                         napi_complete(napi);
1635                         return 0;
1636                 }
1637 #endif
1638
1639                 if (bnx2x_has_tx_work(fp))
1640                         bnx2x_tx_int(fp);
1641
1642                 if (bnx2x_has_rx_work(fp)) {
1643                         work_done += bnx2x_rx_int(fp, budget - work_done);
1644
1645                         /* must not complete if we consumed full budget */
1646                         if (work_done >= budget)
1647                                 break;
1648                 }
1649
1650                 /* Fall out from the NAPI loop if needed */
1651                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1652                         bnx2x_update_fpsb_idx(fp);
1653                 /* bnx2x_has_rx_work() reads the status block,
1654                  * thus we need to ensure that status block indices
1655                  * have been actually read (bnx2x_update_fpsb_idx)
1656                  * prior to this check (bnx2x_has_rx_work) so that
1657                  * we won't write the "newer" value of the status block
1658                  * to IGU (if there was a DMA right after
1659                  * bnx2x_has_rx_work and if there is no rmb, the memory
1660                  * reading (bnx2x_update_fpsb_idx) may be postponed
1661                  * to right before bnx2x_ack_sb). In this case there
1662                  * will never be another interrupt until there is
1663                  * another update of the status block, while there
1664                  * is still unhandled work.
1665                  */
1666                         rmb();
1667
1668                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1669                                 napi_complete(napi);
1670                                 /* Re-enable interrupts */
1671                                 DP(NETIF_MSG_HW,
1672                                    "Update index to %d\n", fp->fp_hc_idx);
1673                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1674                                              le16_to_cpu(fp->fp_hc_idx),
1675                                              IGU_INT_ENABLE, 1);
1676                                 break;
1677                         }
1678                 }
1679         }
1680
1681         return work_done;
1682 }
1683
1684
1685 /* we split the first BD into headers and data BDs
1686  * to ease the pain of our fellow microcode engineers
1687  * we use one mapping for both BDs
1688  * So far this has only been observed to happen
1689  * in Other Operating Systems(TM)
1690  */
1691 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1692                                    struct bnx2x_fastpath *fp,
1693                                    struct sw_tx_bd *tx_buf,
1694                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1695                                    u16 bd_prod, int nbd)
1696 {
1697         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1698         struct eth_tx_bd *d_tx_bd;
1699         dma_addr_t mapping;
1700         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1701
1702         /* first fix first BD */
1703         h_tx_bd->nbd = cpu_to_le16(nbd);
1704         h_tx_bd->nbytes = cpu_to_le16(hlen);
1705
1706         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1707            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1708            h_tx_bd->addr_lo, h_tx_bd->nbd);
1709
1710         /* now get a new data BD
1711          * (after the pbd) and fill it */
1712         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1713         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1714
1715         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1716                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1717
1718         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1719         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1720         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1721
1722         /* this marks the BD as one that has no individual mapping */
1723         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1724
1725         DP(NETIF_MSG_TX_QUEUED,
1726            "TSO split data size is %d (%x:%x)\n",
1727            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1728
1729         /* update tx_bd */
1730         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1731
1732         return bd_prod;
1733 }
1734
1735 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1736 {
1737         if (fix > 0)
1738                 csum = (u16) ~csum_fold(csum_sub(csum,
1739                                 csum_partial(t_header - fix, fix, 0)));
1740
1741         else if (fix < 0)
1742                 csum = (u16) ~csum_fold(csum_add(csum,
1743                                 csum_partial(t_header, -fix, 0)));
1744
1745         return swab16(csum);
1746 }
1747
1748 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1749 {
1750         u32 rc;
1751
1752         if (skb->ip_summed != CHECKSUM_PARTIAL)
1753                 rc = XMIT_PLAIN;
1754
1755         else {
1756                 if (skb->protocol == htons(ETH_P_IPV6)) {
1757                         rc = XMIT_CSUM_V6;
1758                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1759                                 rc |= XMIT_CSUM_TCP;
1760
1761                 } else {
1762                         rc = XMIT_CSUM_V4;
1763                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1764                                 rc |= XMIT_CSUM_TCP;
1765                 }
1766         }
1767
1768         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1769                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1770
1771         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1772                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1773
1774         return rc;
1775 }
1776
1777 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1778 /* check if packet requires linearization (packet is too fragmented)
1779    no need to check fragmentation if page size > 8K (there will be no
1780    violation to FW restrictions) */
1781 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1782                              u32 xmit_type)
1783 {
1784         int to_copy = 0;
1785         int hlen = 0;
1786         int first_bd_sz = 0;
1787
1788         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1789         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1790
1791                 if (xmit_type & XMIT_GSO) {
1792                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1793                         /* Check if LSO packet needs to be copied:
1794                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1795                         int wnd_size = MAX_FETCH_BD - 3;
1796                         /* Number of windows to check */
1797                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1798                         int wnd_idx = 0;
1799                         int frag_idx = 0;
1800                         u32 wnd_sum = 0;
1801
1802                         /* Headers length */
1803                         hlen = (int)(skb_transport_header(skb) - skb->data) +
1804                                 tcp_hdrlen(skb);
1805
1806                         /* Amount of data (w/o headers) on linear part of SKB*/
1807                         first_bd_sz = skb_headlen(skb) - hlen;
1808
1809                         wnd_sum  = first_bd_sz;
1810
1811                         /* Calculate the first sum - it's special */
1812                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1813                                 wnd_sum +=
1814                                         skb_shinfo(skb)->frags[frag_idx].size;
1815
1816                         /* If there was data on linear skb data - check it */
1817                         if (first_bd_sz > 0) {
1818                                 if (unlikely(wnd_sum < lso_mss)) {
1819                                         to_copy = 1;
1820                                         goto exit_lbl;
1821                                 }
1822
1823                                 wnd_sum -= first_bd_sz;
1824                         }
1825
1826                         /* Others are easier: run through the frag list and
1827                            check all windows */
1828                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1829                                 wnd_sum +=
1830                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1831
1832                                 if (unlikely(wnd_sum < lso_mss)) {
1833                                         to_copy = 1;
1834                                         break;
1835                                 }
1836                                 wnd_sum -=
1837                                         skb_shinfo(skb)->frags[wnd_idx].size;
1838                         }
1839                 } else {
1840                         /* in non-LSO too fragmented packet should always
1841                            be linearized */
1842                         to_copy = 1;
1843                 }
1844         }
1845
1846 exit_lbl:
1847         if (unlikely(to_copy))
1848                 DP(NETIF_MSG_TX_QUEUED,
1849                    "Linearization IS REQUIRED for %s packet. "
1850                    "num_frags %d  hlen %d  first_bd_sz %d\n",
1851                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1852                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1853
1854         return to_copy;
1855 }
1856 #endif
1857
1858 /* called with netif_tx_lock
1859  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1860  * netif_wake_queue()
1861  */
1862 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1863 {
1864         struct bnx2x *bp = netdev_priv(dev);
1865         struct bnx2x_fastpath *fp;
1866         struct netdev_queue *txq;
1867         struct sw_tx_bd *tx_buf;
1868         struct eth_tx_start_bd *tx_start_bd;
1869         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1870         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1871         u16 pkt_prod, bd_prod;
1872         int nbd, fp_index;
1873         dma_addr_t mapping;
1874         u32 xmit_type = bnx2x_xmit_type(bp, skb);
1875         int i;
1876         u8 hlen = 0;
1877         __le16 pkt_size = 0;
1878         struct ethhdr *eth;
1879         u8 mac_type = UNICAST_ADDRESS;
1880
1881 #ifdef BNX2X_STOP_ON_ERROR
1882         if (unlikely(bp->panic))
1883                 return NETDEV_TX_BUSY;
1884 #endif
1885
1886         fp_index = skb_get_queue_mapping(skb);
1887         txq = netdev_get_tx_queue(dev, fp_index);
1888
1889         fp = &bp->fp[fp_index];
1890
1891         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1892                 fp->eth_q_stats.driver_xoff++;
1893                 netif_tx_stop_queue(txq);
1894                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1895                 return NETDEV_TX_BUSY;
1896         }
1897
1898         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
1899            "  gso type %x  xmit_type %x\n",
1900            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1901            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1902
1903         eth = (struct ethhdr *)skb->data;
1904
1905         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1906         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1907                 if (is_broadcast_ether_addr(eth->h_dest))
1908                         mac_type = BROADCAST_ADDRESS;
1909                 else
1910                         mac_type = MULTICAST_ADDRESS;
1911         }
1912
1913 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1914         /* First, check if we need to linearize the skb (due to FW
1915            restrictions). No need to check fragmentation if page size > 8K
1916            (there will be no violation to FW restrictions) */
1917         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1918                 /* Statistics of linearization */
1919                 bp->lin_cnt++;
1920                 if (skb_linearize(skb) != 0) {
1921                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1922                            "silently dropping this SKB\n");
1923                         dev_kfree_skb_any(skb);
1924                         return NETDEV_TX_OK;
1925                 }
1926         }
1927 #endif
1928
1929         /*
1930         Please read carefully. First we use one BD which we mark as start,
1931         then we have a parsing info BD (used for TSO or xsum),
1932         and only then we have the rest of the TSO BDs.
1933         (don't forget to mark the last one as last,
1934         and to unmap only AFTER you write to the BD ...)
1935         And above all, all pdb sizes are in words - NOT DWORDS!
1936         */
1937
1938         pkt_prod = fp->tx_pkt_prod++;
1939         bd_prod = TX_BD(fp->tx_bd_prod);
1940
1941         /* get a tx_buf and first BD */
1942         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1943         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1944
1945         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1946         SET_FLAG(tx_start_bd->general_data,
1947                   ETH_TX_START_BD_ETH_ADDR_TYPE,
1948                   mac_type);
1949         /* header nbd */
1950         SET_FLAG(tx_start_bd->general_data,
1951                   ETH_TX_START_BD_HDR_NBDS,
1952                   1);
1953
1954         /* remember the first BD of the packet */
1955         tx_buf->first_bd = fp->tx_bd_prod;
1956         tx_buf->skb = skb;
1957         tx_buf->flags = 0;
1958
1959         DP(NETIF_MSG_TX_QUEUED,
1960            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
1961            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1962
1963 #ifdef BCM_VLAN
1964         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1965             (bp->flags & HW_VLAN_TX_FLAG)) {
1966                 tx_start_bd->vlan_or_ethertype =
1967                     cpu_to_le16(vlan_tx_tag_get(skb));
1968                 tx_start_bd->bd_flags.as_bitfield |=
1969                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
1970         } else
1971 #endif
1972                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1973
1974         /* turn on parsing and get a BD */
1975         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1976
1977         if (xmit_type & XMIT_CSUM) {
1978                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1979
1980                 if (xmit_type & XMIT_CSUM_V4)
1981                         tx_start_bd->bd_flags.as_bitfield |=
1982                                                 ETH_TX_BD_FLAGS_IP_CSUM;
1983                 else
1984                         tx_start_bd->bd_flags.as_bitfield |=
1985                                                 ETH_TX_BD_FLAGS_IPV6;
1986
1987                 if (!(xmit_type & XMIT_CSUM_TCP))
1988                         tx_start_bd->bd_flags.as_bitfield |=
1989                                                 ETH_TX_BD_FLAGS_IS_UDP;
1990         }
1991         pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
1992         memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1993         /* Set PBD in checksum offload case */
1994         if (xmit_type & XMIT_CSUM) {
1995                 hlen = (skb_network_header(skb) - skb->data) / 2;
1996
1997                 /* for now NS flag is not used in Linux */
1998                 pbd_e1x->global_data =
1999                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2000                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2001
2002                 pbd_e1x->ip_hlen_w = (skb_transport_header(skb) -
2003                                 skb_network_header(skb)) / 2;
2004
2005                 hlen += pbd_e1x->ip_hlen_w + tcp_hdrlen(skb) / 2;
2006
2007                 pbd_e1x->total_hlen_w = cpu_to_le16(hlen);
2008                 hlen = hlen*2;
2009
2010                 if (xmit_type & XMIT_CSUM_TCP) {
2011                         pbd_e1x->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2012
2013                 } else {
2014                         s8 fix = SKB_CS_OFF(skb); /* signed! */
2015
2016                         DP(NETIF_MSG_TX_QUEUED,
2017                            "hlen %d  fix %d  csum before fix %x\n",
2018                            le16_to_cpu(pbd_e1x->total_hlen_w),
2019                            fix, SKB_CS(skb));
2020
2021                         /* HW bug: fixup the CSUM */
2022                         pbd_e1x->tcp_pseudo_csum =
2023                                 bnx2x_csum_fix(skb_transport_header(skb),
2024                                                SKB_CS(skb), fix);
2025
2026                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2027                            pbd_e1x->tcp_pseudo_csum);
2028                 }
2029         }
2030
2031         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2032                                  skb_headlen(skb), DMA_TO_DEVICE);
2033
2034         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2035         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2036         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2037         tx_start_bd->nbd = cpu_to_le16(nbd);
2038         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2039         pkt_size = tx_start_bd->nbytes;
2040
2041         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2042            "  nbytes %d  flags %x  vlan %x\n",
2043            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2044            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2045            tx_start_bd->bd_flags.as_bitfield,
2046            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2047
2048         if (xmit_type & XMIT_GSO) {
2049
2050                 DP(NETIF_MSG_TX_QUEUED,
2051                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2052                    skb->len, hlen, skb_headlen(skb),
2053                    skb_shinfo(skb)->gso_size);
2054
2055                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2056
2057                 if (unlikely(skb_headlen(skb) > hlen))
2058                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2059                                                  hlen, bd_prod, ++nbd);
2060
2061                 pbd_e1x->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2062                 pbd_e1x->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2063                 pbd_e1x->tcp_flags = pbd_tcp_flags(skb);
2064
2065                 if (xmit_type & XMIT_GSO_V4) {
2066                         pbd_e1x->ip_id = swab16(ip_hdr(skb)->id);
2067                         pbd_e1x->tcp_pseudo_csum =
2068                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2069                                                           ip_hdr(skb)->daddr,
2070                                                           0, IPPROTO_TCP, 0));
2071
2072                 } else
2073                         pbd_e1x->tcp_pseudo_csum =
2074                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2075                                                         &ipv6_hdr(skb)->daddr,
2076                                                         0, IPPROTO_TCP, 0));
2077
2078                 pbd_e1x->global_data |=
2079                                 ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2080         }
2081         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2082
2083         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2084                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2085
2086                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2087                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2088                 if (total_pkt_bd == NULL)
2089                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2090
2091                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2092                                        frag->page_offset,
2093                                        frag->size, DMA_TO_DEVICE);
2094
2095                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2096                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2097                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2098                 le16_add_cpu(&pkt_size, frag->size);
2099
2100                 DP(NETIF_MSG_TX_QUEUED,
2101                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2102                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2103                    le16_to_cpu(tx_data_bd->nbytes));
2104         }
2105
2106         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2107
2108         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2109
2110         /* now send a tx doorbell, counting the next BD
2111          * if the packet contains or ends with it
2112          */
2113         if (TX_BD_POFF(bd_prod) < nbd)
2114                 nbd++;
2115
2116         if (total_pkt_bd != NULL)
2117                 total_pkt_bd->total_pkt_bytes = pkt_size;
2118
2119         if (pbd_e1x)
2120                 DP(NETIF_MSG_TX_QUEUED,
2121                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2122                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2123                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2124                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2125                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2126                     le16_to_cpu(pbd_e1x->total_hlen_w));
2127
2128         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2129
2130         /*
2131          * Make sure that the BD data is updated before updating the producer
2132          * since FW might read the BD right after the producer is updated.
2133          * This is only applicable for weak-ordered memory model archs such
2134          * as IA-64. The following barrier is also mandatory since FW will
2135          * assumes packets must have BDs.
2136          */
2137         wmb();
2138
2139         fp->tx_db.data.prod += nbd;
2140         barrier();
2141         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2142
2143         mmiowb();
2144
2145         fp->tx_bd_prod += nbd;
2146
2147         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2148                 netif_tx_stop_queue(txq);
2149
2150                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2151                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2152                  * fp->bd_tx_cons */
2153                 smp_mb();
2154
2155                 fp->eth_q_stats.driver_xoff++;
2156                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2157                         netif_tx_wake_queue(txq);
2158         }
2159         fp->tx_pkt++;
2160
2161         return NETDEV_TX_OK;
2162 }
2163 /* called with rtnl_lock */
2164 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2165 {
2166         struct sockaddr *addr = p;
2167         struct bnx2x *bp = netdev_priv(dev);
2168
2169         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2170                 return -EINVAL;
2171
2172         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2173         if (netif_running(dev))
2174                 bnx2x_set_eth_mac(bp, 1);
2175
2176         return 0;
2177 }
2178
2179 void bnx2x_free_mem_bp(struct bnx2x *bp)
2180 {
2181         kfree(bp->fp);
2182         kfree(bp->msix_table);
2183         kfree(bp->ilt);
2184 }
2185
2186 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2187 {
2188         struct bnx2x_fastpath *fp;
2189         struct msix_entry *tbl;
2190         struct bnx2x_ilt *ilt;
2191
2192         /* fp array */
2193         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2194         if (!fp)
2195                 goto alloc_err;
2196         bp->fp = fp;
2197
2198         /* msix table */
2199         tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2200                                   GFP_KERNEL);
2201         if (!tbl)
2202                 goto alloc_err;
2203         bp->msix_table = tbl;
2204
2205         /* ilt */
2206         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2207         if (!ilt)
2208                 goto alloc_err;
2209         bp->ilt = ilt;
2210
2211         return 0;
2212 alloc_err:
2213         bnx2x_free_mem_bp(bp);
2214         return -ENOMEM;
2215
2216 }
2217
2218 /* called with rtnl_lock */
2219 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2220 {
2221         struct bnx2x *bp = netdev_priv(dev);
2222         int rc = 0;
2223
2224         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2225                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2226                 return -EAGAIN;
2227         }
2228
2229         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2230             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2231                 return -EINVAL;
2232
2233         /* This does not race with packet allocation
2234          * because the actual alloc size is
2235          * only updated as part of load
2236          */
2237         dev->mtu = new_mtu;
2238
2239         if (netif_running(dev)) {
2240                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2241                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2242         }
2243
2244         return rc;
2245 }
2246
2247 void bnx2x_tx_timeout(struct net_device *dev)
2248 {
2249         struct bnx2x *bp = netdev_priv(dev);
2250
2251 #ifdef BNX2X_STOP_ON_ERROR
2252         if (!bp->panic)
2253                 bnx2x_panic();
2254 #endif
2255         /* This allows the netif to be shutdown gracefully before resetting */
2256         schedule_delayed_work(&bp->reset_task, 0);
2257 }
2258
2259 #ifdef BCM_VLAN
2260 /* called with rtnl_lock */
2261 void bnx2x_vlan_rx_register(struct net_device *dev,
2262                                    struct vlan_group *vlgrp)
2263 {
2264         struct bnx2x *bp = netdev_priv(dev);
2265
2266         bp->vlgrp = vlgrp;
2267 }
2268
2269 #endif
2270 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2271 {
2272         struct net_device *dev = pci_get_drvdata(pdev);
2273         struct bnx2x *bp;
2274
2275         if (!dev) {
2276                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2277                 return -ENODEV;
2278         }
2279         bp = netdev_priv(dev);
2280
2281         rtnl_lock();
2282
2283         pci_save_state(pdev);
2284
2285         if (!netif_running(dev)) {
2286                 rtnl_unlock();
2287                 return 0;
2288         }
2289
2290         netif_device_detach(dev);
2291
2292         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2293
2294         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2295
2296         rtnl_unlock();
2297
2298         return 0;
2299 }
2300
2301 int bnx2x_resume(struct pci_dev *pdev)
2302 {
2303         struct net_device *dev = pci_get_drvdata(pdev);
2304         struct bnx2x *bp;
2305         int rc;
2306
2307         if (!dev) {
2308                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2309                 return -ENODEV;
2310         }
2311         bp = netdev_priv(dev);
2312
2313         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2314                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2315                 return -EAGAIN;
2316         }
2317
2318         rtnl_lock();
2319
2320         pci_restore_state(pdev);
2321
2322         if (!netif_running(dev)) {
2323                 rtnl_unlock();
2324                 return 0;
2325         }
2326
2327         bnx2x_set_power_state(bp, PCI_D0);
2328         netif_device_attach(dev);
2329
2330         rc = bnx2x_nic_load(bp, LOAD_OPEN);
2331
2332         rtnl_unlock();
2333
2334         return rc;
2335 }