Merge branches 'stable/ia64', 'stable/blkfront-cleanup' and 'stable/cleanup' of git...
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/ip.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
25
26 #include "bnx2x_init.h"
27
28 static int bnx2x_setup_irqs(struct bnx2x *bp);
29
30 /* free skb in the packet ring at pos idx
31  * return idx of last bd freed
32  */
33 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34                              u16 idx)
35 {
36         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37         struct eth_tx_start_bd *tx_start_bd;
38         struct eth_tx_bd *tx_data_bd;
39         struct sk_buff *skb = tx_buf->skb;
40         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41         int nbd;
42
43         /* prefetch skb end pointer to speedup dev_kfree_skb() */
44         prefetch(&skb->end);
45
46         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
47            idx, tx_buf, skb);
48
49         /* unmap first bd */
50         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
53                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
54
55         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56 #ifdef BNX2X_STOP_ON_ERROR
57         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58                 BNX2X_ERR("BAD nbd!\n");
59                 bnx2x_panic();
60         }
61 #endif
62         new_cons = nbd + tx_buf->first_bd;
63
64         /* Get the next bd */
65         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67         /* Skip a parse bd... */
68         --nbd;
69         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71         /* ...and the TSO split header bd since they have no mapping */
72         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73                 --nbd;
74                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75         }
76
77         /* now free frags */
78         while (nbd > 0) {
79
80                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84                 if (--nbd)
85                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86         }
87
88         /* release skb */
89         WARN_ON(!skb);
90         dev_kfree_skb(skb);
91         tx_buf->first_bd = 0;
92         tx_buf->skb = NULL;
93
94         return new_cons;
95 }
96
97 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98 {
99         struct bnx2x *bp = fp->bp;
100         struct netdev_queue *txq;
101         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103 #ifdef BNX2X_STOP_ON_ERROR
104         if (unlikely(bp->panic))
105                 return -1;
106 #endif
107
108         txq = netdev_get_tx_queue(bp->dev, fp->index);
109         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110         sw_cons = fp->tx_pkt_cons;
111
112         while (sw_cons != hw_cons) {
113                 u16 pkt_cons;
114
115                 pkt_cons = TX_BD(sw_cons);
116
117                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
118                                       " pkt_cons %u\n",
119                    fp->index, hw_cons, sw_cons, pkt_cons);
120
121                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122                 sw_cons++;
123         }
124
125         fp->tx_pkt_cons = sw_cons;
126         fp->tx_bd_cons = bd_cons;
127
128         /* Need to make the tx_bd_cons update visible to start_xmit()
129          * before checking for netif_tx_queue_stopped().  Without the
130          * memory barrier, there is a small possibility that
131          * start_xmit() will miss it and cause the queue to be stopped
132          * forever.
133          */
134         smp_mb();
135
136         if (unlikely(netif_tx_queue_stopped(txq))) {
137                 /* Taking tx_lock() is needed to prevent reenabling the queue
138                  * while it's empty. This could have happen if rx_action() gets
139                  * suspended in bnx2x_tx_int() after the condition before
140                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141                  *
142                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
143                  * sends some packets consuming the whole queue again->
144                  * stops the queue
145                  */
146
147                 __netif_tx_lock(txq, smp_processor_id());
148
149                 if ((netif_tx_queue_stopped(txq)) &&
150                     (bp->state == BNX2X_STATE_OPEN) &&
151                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152                         netif_tx_wake_queue(txq);
153
154                 __netif_tx_unlock(txq);
155         }
156         return 0;
157 }
158
159 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160                                              u16 idx)
161 {
162         u16 last_max = fp->last_max_sge;
163
164         if (SUB_S16(idx, last_max) > 0)
165                 fp->last_max_sge = idx;
166 }
167
168 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169                                   struct eth_fast_path_rx_cqe *fp_cqe)
170 {
171         struct bnx2x *bp = fp->bp;
172         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
174                       SGE_PAGE_SHIFT;
175         u16 last_max, last_elem, first_elem;
176         u16 delta = 0;
177         u16 i;
178
179         if (!sge_len)
180                 return;
181
182         /* First mark all used pages */
183         for (i = 0; i < sge_len; i++)
184                 SGE_MASK_CLEAR_BIT(fp,
185                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
186
187         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
188            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
189
190         /* Here we assume that the last SGE index is the biggest */
191         prefetch((void *)(fp->sge_mask));
192         bnx2x_update_last_max_sge(fp,
193                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
194
195         last_max = RX_SGE(fp->last_max_sge);
196         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199         /* If ring is not full */
200         if (last_elem + 1 != first_elem)
201                 last_elem++;
202
203         /* Now update the prod */
204         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205                 if (likely(fp->sge_mask[i]))
206                         break;
207
208                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209                 delta += RX_SGE_MASK_ELEM_SZ;
210         }
211
212         if (delta > 0) {
213                 fp->rx_sge_prod += delta;
214                 /* clear page-end entries */
215                 bnx2x_clear_sge_mask_next_elems(fp);
216         }
217
218         DP(NETIF_MSG_RX_STATUS,
219            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
220            fp->last_max_sge, fp->rx_sge_prod);
221 }
222
223 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224                             struct sk_buff *skb, u16 cons, u16 prod)
225 {
226         struct bnx2x *bp = fp->bp;
227         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230         dma_addr_t mapping;
231
232         /* move empty skb from pool to prod and map it */
233         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235                                  bp->rx_buf_size, DMA_FROM_DEVICE);
236         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238         /* move partial skb from cons to pool (don't unmap yet) */
239         fp->tpa_pool[queue] = *cons_rx_buf;
240
241         /* mark bin state as start - print error if current state != stop */
242         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245         fp->tpa_state[queue] = BNX2X_TPA_START;
246
247         /* point prod_bd to new skb */
248         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251 #ifdef BNX2X_STOP_ON_ERROR
252         fp->tpa_queue_used |= (1 << queue);
253 #ifdef _ASM_GENERIC_INT_L64_H
254         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255 #else
256         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257 #endif
258            fp->tpa_queue_used);
259 #endif
260 }
261
262 /* Timestamp option length allowed for TPA aggregation:
263  *
264  *              nop nop kind length echo val
265  */
266 #define TPA_TSTAMP_OPT_LEN      12
267 /**
268  * Calculate the approximate value of the MSS for this
269  * aggregation using the first packet of it.
270  *
271  * @param bp
272  * @param parsing_flags Parsing flags from the START CQE
273  * @param len_on_bd Total length of the first packet for the
274  *                   aggregation.
275  */
276 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277                                     u16 len_on_bd)
278 {
279         /* TPA arrgregation won't have an IP options and TCP options
280          * other than timestamp.
281          */
282         u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
283
284
285         /* Check if there was a TCP timestamp, if there is it's will
286          * always be 12 bytes length: nop nop kind length echo val.
287          *
288          * Otherwise FW would close the aggregation.
289          */
290         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
291                 hdrs_len += TPA_TSTAMP_OPT_LEN;
292
293         return len_on_bd - hdrs_len;
294 }
295
296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
297                                struct sk_buff *skb,
298                                struct eth_fast_path_rx_cqe *fp_cqe,
299                                u16 cqe_idx, u16 parsing_flags)
300 {
301         struct sw_rx_page *rx_pg, old_rx_pg;
302         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
303         u32 i, frag_len, frag_size, pages;
304         int err;
305         int j;
306
307         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
308         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
309
310         /* This is needed in order to enable forwarding support */
311         if (frag_size)
312                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
313                                                               len_on_bd);
314
315 #ifdef BNX2X_STOP_ON_ERROR
316         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
317                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
318                           pages, cqe_idx);
319                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
320                           fp_cqe->pkt_len, len_on_bd);
321                 bnx2x_panic();
322                 return -EINVAL;
323         }
324 #endif
325
326         /* Run through the SGL and compose the fragmented skb */
327         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
328                 u16 sge_idx =
329                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
330
331                 /* FW gives the indices of the SGE as if the ring is an array
332                    (meaning that "next" element will consume 2 indices) */
333                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
334                 rx_pg = &fp->rx_page_ring[sge_idx];
335                 old_rx_pg = *rx_pg;
336
337                 /* If we fail to allocate a substitute page, we simply stop
338                    where we are and drop the whole packet */
339                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
340                 if (unlikely(err)) {
341                         fp->eth_q_stats.rx_skb_alloc_failed++;
342                         return err;
343                 }
344
345                 /* Unmap the page as we r going to pass it to the stack */
346                 dma_unmap_page(&bp->pdev->dev,
347                                dma_unmap_addr(&old_rx_pg, mapping),
348                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
349
350                 /* Add one frag and update the appropriate fields in the skb */
351                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
352
353                 skb->data_len += frag_len;
354                 skb->truesize += frag_len;
355                 skb->len += frag_len;
356
357                 frag_size -= frag_len;
358         }
359
360         return 0;
361 }
362
363 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
364                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
365                            u16 cqe_idx)
366 {
367         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
368         struct sk_buff *skb = rx_buf->skb;
369         /* alloc new skb */
370         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
371
372         /* Unmap skb in the pool anyway, as we are going to change
373            pool entry status to BNX2X_TPA_STOP even if new skb allocation
374            fails. */
375         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
376                          bp->rx_buf_size, DMA_FROM_DEVICE);
377
378         if (likely(new_skb)) {
379                 /* fix ip xsum and give it to the stack */
380                 /* (no need to map the new skb) */
381                 u16 parsing_flags =
382                         le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
383
384                 prefetch(skb);
385                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
386
387 #ifdef BNX2X_STOP_ON_ERROR
388                 if (pad + len > bp->rx_buf_size) {
389                         BNX2X_ERR("skb_put is about to fail...  "
390                                   "pad %d  len %d  rx_buf_size %d\n",
391                                   pad, len, bp->rx_buf_size);
392                         bnx2x_panic();
393                         return;
394                 }
395 #endif
396
397                 skb_reserve(skb, pad);
398                 skb_put(skb, len);
399
400                 skb->protocol = eth_type_trans(skb, bp->dev);
401                 skb->ip_summed = CHECKSUM_UNNECESSARY;
402
403                 {
404                         struct iphdr *iph;
405
406                         iph = (struct iphdr *)skb->data;
407                         iph->check = 0;
408                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
409                 }
410
411                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
412                                          &cqe->fast_path_cqe, cqe_idx,
413                                          parsing_flags)) {
414                         if (parsing_flags & PARSING_FLAGS_VLAN)
415                                 __vlan_hwaccel_put_tag(skb,
416                                                  le16_to_cpu(cqe->fast_path_cqe.
417                                                              vlan_tag));
418                         napi_gro_receive(&fp->napi, skb);
419                 } else {
420                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
421                            " - dropping packet!\n");
422                         dev_kfree_skb(skb);
423                 }
424
425
426                 /* put new skb in bin */
427                 fp->tpa_pool[queue].skb = new_skb;
428
429         } else {
430                 /* else drop the packet and keep the buffer in the bin */
431                 DP(NETIF_MSG_RX_STATUS,
432                    "Failed to allocate new skb - dropping packet!\n");
433                 fp->eth_q_stats.rx_skb_alloc_failed++;
434         }
435
436         fp->tpa_state[queue] = BNX2X_TPA_STOP;
437 }
438
439 /* Set Toeplitz hash value in the skb using the value from the
440  * CQE (calculated by HW).
441  */
442 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
443                                         struct sk_buff *skb)
444 {
445         /* Set Toeplitz hash from CQE */
446         if ((bp->dev->features & NETIF_F_RXHASH) &&
447             (cqe->fast_path_cqe.status_flags &
448              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
449                 skb->rxhash =
450                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
451 }
452
453 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
454 {
455         struct bnx2x *bp = fp->bp;
456         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
457         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
458         int rx_pkt = 0;
459
460 #ifdef BNX2X_STOP_ON_ERROR
461         if (unlikely(bp->panic))
462                 return 0;
463 #endif
464
465         /* CQ "next element" is of the size of the regular element,
466            that's why it's ok here */
467         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
468         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
469                 hw_comp_cons++;
470
471         bd_cons = fp->rx_bd_cons;
472         bd_prod = fp->rx_bd_prod;
473         bd_prod_fw = bd_prod;
474         sw_comp_cons = fp->rx_comp_cons;
475         sw_comp_prod = fp->rx_comp_prod;
476
477         /* Memory barrier necessary as speculative reads of the rx
478          * buffer can be ahead of the index in the status block
479          */
480         rmb();
481
482         DP(NETIF_MSG_RX_STATUS,
483            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
484            fp->index, hw_comp_cons, sw_comp_cons);
485
486         while (sw_comp_cons != hw_comp_cons) {
487                 struct sw_rx_bd *rx_buf = NULL;
488                 struct sk_buff *skb;
489                 union eth_rx_cqe *cqe;
490                 u8 cqe_fp_flags;
491                 u16 len, pad;
492
493                 comp_ring_cons = RCQ_BD(sw_comp_cons);
494                 bd_prod = RX_BD(bd_prod);
495                 bd_cons = RX_BD(bd_cons);
496
497                 /* Prefetch the page containing the BD descriptor
498                    at producer's index. It will be needed when new skb is
499                    allocated */
500                 prefetch((void *)(PAGE_ALIGN((unsigned long)
501                                              (&fp->rx_desc_ring[bd_prod])) -
502                                   PAGE_SIZE + 1));
503
504                 cqe = &fp->rx_comp_ring[comp_ring_cons];
505                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
506
507                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
508                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
509                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
510                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
511                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
512                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
513
514                 /* is this a slowpath msg? */
515                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
516                         bnx2x_sp_event(fp, cqe);
517                         goto next_cqe;
518
519                 /* this is an rx packet */
520                 } else {
521                         rx_buf = &fp->rx_buf_ring[bd_cons];
522                         skb = rx_buf->skb;
523                         prefetch(skb);
524                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
525                         pad = cqe->fast_path_cqe.placement_offset;
526
527                         /* - If CQE is marked both TPA_START and TPA_END it is
528                          *   a non-TPA CQE.
529                          * - FP CQE will always have either TPA_START or/and
530                          *   TPA_STOP flags set.
531                          */
532                         if ((!fp->disable_tpa) &&
533                             (TPA_TYPE(cqe_fp_flags) !=
534                                         (TPA_TYPE_START | TPA_TYPE_END))) {
535                                 u16 queue = cqe->fast_path_cqe.queue_index;
536
537                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
538                                         DP(NETIF_MSG_RX_STATUS,
539                                            "calling tpa_start on queue %d\n",
540                                            queue);
541
542                                         bnx2x_tpa_start(fp, queue, skb,
543                                                         bd_cons, bd_prod);
544
545                                         /* Set Toeplitz hash for an LRO skb */
546                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
547
548                                         goto next_rx;
549                                 } else { /* TPA_STOP */
550                                         DP(NETIF_MSG_RX_STATUS,
551                                            "calling tpa_stop on queue %d\n",
552                                            queue);
553
554                                         if (!BNX2X_RX_SUM_FIX(cqe))
555                                                 BNX2X_ERR("STOP on none TCP "
556                                                           "data\n");
557
558                                         /* This is a size of the linear data
559                                            on this skb */
560                                         len = le16_to_cpu(cqe->fast_path_cqe.
561                                                                 len_on_bd);
562                                         bnx2x_tpa_stop(bp, fp, queue, pad,
563                                                     len, cqe, comp_ring_cons);
564 #ifdef BNX2X_STOP_ON_ERROR
565                                         if (bp->panic)
566                                                 return 0;
567 #endif
568
569                                         bnx2x_update_sge_prod(fp,
570                                                         &cqe->fast_path_cqe);
571                                         goto next_cqe;
572                                 }
573                         }
574
575                         dma_sync_single_for_device(&bp->pdev->dev,
576                                         dma_unmap_addr(rx_buf, mapping),
577                                                    pad + RX_COPY_THRESH,
578                                                    DMA_FROM_DEVICE);
579                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
580
581                         /* is this an error packet? */
582                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
583                                 DP(NETIF_MSG_RX_ERR,
584                                    "ERROR  flags %x  rx packet %u\n",
585                                    cqe_fp_flags, sw_comp_cons);
586                                 fp->eth_q_stats.rx_err_discard_pkt++;
587                                 goto reuse_rx;
588                         }
589
590                         /* Since we don't have a jumbo ring
591                          * copy small packets if mtu > 1500
592                          */
593                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
594                             (len <= RX_COPY_THRESH)) {
595                                 struct sk_buff *new_skb;
596
597                                 new_skb = netdev_alloc_skb(bp->dev,
598                                                            len + pad);
599                                 if (new_skb == NULL) {
600                                         DP(NETIF_MSG_RX_ERR,
601                                            "ERROR  packet dropped "
602                                            "because of alloc failure\n");
603                                         fp->eth_q_stats.rx_skb_alloc_failed++;
604                                         goto reuse_rx;
605                                 }
606
607                                 /* aligned copy */
608                                 skb_copy_from_linear_data_offset(skb, pad,
609                                                     new_skb->data + pad, len);
610                                 skb_reserve(new_skb, pad);
611                                 skb_put(new_skb, len);
612
613                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
614
615                                 skb = new_skb;
616
617                         } else
618                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
619                                 dma_unmap_single(&bp->pdev->dev,
620                                         dma_unmap_addr(rx_buf, mapping),
621                                                  bp->rx_buf_size,
622                                                  DMA_FROM_DEVICE);
623                                 skb_reserve(skb, pad);
624                                 skb_put(skb, len);
625
626                         } else {
627                                 DP(NETIF_MSG_RX_ERR,
628                                    "ERROR  packet dropped because "
629                                    "of alloc failure\n");
630                                 fp->eth_q_stats.rx_skb_alloc_failed++;
631 reuse_rx:
632                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
633                                 goto next_rx;
634                         }
635
636                         skb->protocol = eth_type_trans(skb, bp->dev);
637
638                         /* Set Toeplitz hash for a none-LRO skb */
639                         bnx2x_set_skb_rxhash(bp, cqe, skb);
640
641                         skb_checksum_none_assert(skb);
642
643                         if (bp->rx_csum) {
644                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
645                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
646                                 else
647                                         fp->eth_q_stats.hw_csum_err++;
648                         }
649                 }
650
651                 skb_record_rx_queue(skb, fp->index);
652
653                 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
654                      PARSING_FLAGS_VLAN)
655                         __vlan_hwaccel_put_tag(skb,
656                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
657                 napi_gro_receive(&fp->napi, skb);
658
659
660 next_rx:
661                 rx_buf->skb = NULL;
662
663                 bd_cons = NEXT_RX_IDX(bd_cons);
664                 bd_prod = NEXT_RX_IDX(bd_prod);
665                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
666                 rx_pkt++;
667 next_cqe:
668                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
669                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
670
671                 if (rx_pkt == budget)
672                         break;
673         } /* while */
674
675         fp->rx_bd_cons = bd_cons;
676         fp->rx_bd_prod = bd_prod_fw;
677         fp->rx_comp_cons = sw_comp_cons;
678         fp->rx_comp_prod = sw_comp_prod;
679
680         /* Update producers */
681         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
682                              fp->rx_sge_prod);
683
684         fp->rx_pkt += rx_pkt;
685         fp->rx_calls++;
686
687         return rx_pkt;
688 }
689
690 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
691 {
692         struct bnx2x_fastpath *fp = fp_cookie;
693         struct bnx2x *bp = fp->bp;
694
695         /* Return here if interrupt is disabled */
696         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
697                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
698                 return IRQ_HANDLED;
699         }
700
701         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
702                          "[fp %d fw_sd %d igusb %d]\n",
703            fp->index, fp->fw_sb_id, fp->igu_sb_id);
704         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
705
706 #ifdef BNX2X_STOP_ON_ERROR
707         if (unlikely(bp->panic))
708                 return IRQ_HANDLED;
709 #endif
710
711         /* Handle Rx and Tx according to MSI-X vector */
712         prefetch(fp->rx_cons_sb);
713         prefetch(fp->tx_cons_sb);
714         prefetch(&fp->sb_running_index[SM_RX_ID]);
715         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
716
717         return IRQ_HANDLED;
718 }
719
720 /* HW Lock for shared dual port PHYs */
721 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
722 {
723         mutex_lock(&bp->port.phy_mutex);
724
725         if (bp->port.need_hw_lock)
726                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
727 }
728
729 void bnx2x_release_phy_lock(struct bnx2x *bp)
730 {
731         if (bp->port.need_hw_lock)
732                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
733
734         mutex_unlock(&bp->port.phy_mutex);
735 }
736
737 /* calculates MF speed according to current linespeed and MF configuration */
738 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
739 {
740         u16 line_speed = bp->link_vars.line_speed;
741         if (IS_MF(bp)) {
742                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
743                                                    bp->mf_config[BP_VN(bp)]);
744
745                 /* Calculate the current MAX line speed limit for the MF
746                  * devices
747                  */
748                 if (IS_MF_SI(bp))
749                         line_speed = (line_speed * maxCfg) / 100;
750                 else { /* SD mode */
751                         u16 vn_max_rate = maxCfg * 100;
752
753                         if (vn_max_rate < line_speed)
754                                 line_speed = vn_max_rate;
755                 }
756         }
757
758         return line_speed;
759 }
760
761 void bnx2x_link_report(struct bnx2x *bp)
762 {
763         if (bp->flags & MF_FUNC_DIS) {
764                 netif_carrier_off(bp->dev);
765                 netdev_err(bp->dev, "NIC Link is Down\n");
766                 return;
767         }
768
769         if (bp->link_vars.link_up) {
770                 u16 line_speed;
771
772                 if (bp->state == BNX2X_STATE_OPEN)
773                         netif_carrier_on(bp->dev);
774                 netdev_info(bp->dev, "NIC Link is Up, ");
775
776                 line_speed = bnx2x_get_mf_speed(bp);
777
778                 pr_cont("%d Mbps ", line_speed);
779
780                 if (bp->link_vars.duplex == DUPLEX_FULL)
781                         pr_cont("full duplex");
782                 else
783                         pr_cont("half duplex");
784
785                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
786                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
787                                 pr_cont(", receive ");
788                                 if (bp->link_vars.flow_ctrl &
789                                     BNX2X_FLOW_CTRL_TX)
790                                         pr_cont("& transmit ");
791                         } else {
792                                 pr_cont(", transmit ");
793                         }
794                         pr_cont("flow control ON");
795                 }
796                 pr_cont("\n");
797
798         } else { /* link_down */
799                 netif_carrier_off(bp->dev);
800                 netdev_err(bp->dev, "NIC Link is Down\n");
801         }
802 }
803
804 /* Returns the number of actually allocated BDs */
805 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
806                                       int rx_ring_size)
807 {
808         struct bnx2x *bp = fp->bp;
809         u16 ring_prod, cqe_ring_prod;
810         int i;
811
812         fp->rx_comp_cons = 0;
813         cqe_ring_prod = ring_prod = 0;
814         for (i = 0; i < rx_ring_size; i++) {
815                 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
816                         BNX2X_ERR("was only able to allocate "
817                                   "%d rx skbs on queue[%d]\n", i, fp->index);
818                         fp->eth_q_stats.rx_skb_alloc_failed++;
819                         break;
820                 }
821                 ring_prod = NEXT_RX_IDX(ring_prod);
822                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
823                 WARN_ON(ring_prod <= i);
824         }
825
826         fp->rx_bd_prod = ring_prod;
827         /* Limit the CQE producer by the CQE ring size */
828         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
829                                cqe_ring_prod);
830         fp->rx_pkt = fp->rx_calls = 0;
831
832         return i;
833 }
834
835 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
836 {
837         struct bnx2x *bp = fp->bp;
838         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
839                                               MAX_RX_AVAIL/bp->num_queues;
840
841         rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
842
843         bnx2x_alloc_rx_bds(fp, rx_ring_size);
844
845         /* Warning!
846          * this will generate an interrupt (to the TSTORM)
847          * must only be done after chip is initialized
848          */
849         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
850                              fp->rx_sge_prod);
851 }
852
853 void bnx2x_init_rx_rings(struct bnx2x *bp)
854 {
855         int func = BP_FUNC(bp);
856         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
857                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
858         u16 ring_prod;
859         int i, j;
860
861         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
862                 IP_HEADER_ALIGNMENT_PADDING;
863
864         DP(NETIF_MSG_IFUP,
865            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
866
867         for_each_rx_queue(bp, j) {
868                 struct bnx2x_fastpath *fp = &bp->fp[j];
869
870                 if (!fp->disable_tpa) {
871                         for (i = 0; i < max_agg_queues; i++) {
872                                 fp->tpa_pool[i].skb =
873                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
874                                 if (!fp->tpa_pool[i].skb) {
875                                         BNX2X_ERR("Failed to allocate TPA "
876                                                   "skb pool for queue[%d] - "
877                                                   "disabling TPA on this "
878                                                   "queue!\n", j);
879                                         bnx2x_free_tpa_pool(bp, fp, i);
880                                         fp->disable_tpa = 1;
881                                         break;
882                                 }
883                                 dma_unmap_addr_set((struct sw_rx_bd *)
884                                                         &bp->fp->tpa_pool[i],
885                                                    mapping, 0);
886                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
887                         }
888
889                         /* "next page" elements initialization */
890                         bnx2x_set_next_page_sgl(fp);
891
892                         /* set SGEs bit mask */
893                         bnx2x_init_sge_ring_bit_mask(fp);
894
895                         /* Allocate SGEs and initialize the ring elements */
896                         for (i = 0, ring_prod = 0;
897                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
898
899                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
900                                         BNX2X_ERR("was only able to allocate "
901                                                   "%d rx sges\n", i);
902                                         BNX2X_ERR("disabling TPA for"
903                                                   " queue[%d]\n", j);
904                                         /* Cleanup already allocated elements */
905                                         bnx2x_free_rx_sge_range(bp,
906                                                                 fp, ring_prod);
907                                         bnx2x_free_tpa_pool(bp,
908                                                             fp, max_agg_queues);
909                                         fp->disable_tpa = 1;
910                                         ring_prod = 0;
911                                         break;
912                                 }
913                                 ring_prod = NEXT_SGE_IDX(ring_prod);
914                         }
915
916                         fp->rx_sge_prod = ring_prod;
917                 }
918         }
919
920         for_each_rx_queue(bp, j) {
921                 struct bnx2x_fastpath *fp = &bp->fp[j];
922
923                 fp->rx_bd_cons = 0;
924
925                 bnx2x_set_next_page_rx_bd(fp);
926
927                 /* CQ ring */
928                 bnx2x_set_next_page_rx_cq(fp);
929
930                 /* Allocate BDs and initialize BD ring */
931                 bnx2x_alloc_rx_bd_ring(fp);
932
933                 if (j != 0)
934                         continue;
935
936                 if (!CHIP_IS_E2(bp)) {
937                         REG_WR(bp, BAR_USTRORM_INTMEM +
938                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
939                                U64_LO(fp->rx_comp_mapping));
940                         REG_WR(bp, BAR_USTRORM_INTMEM +
941                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
942                                U64_HI(fp->rx_comp_mapping));
943                 }
944         }
945 }
946
947 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
948 {
949         int i;
950
951         for_each_tx_queue(bp, i) {
952                 struct bnx2x_fastpath *fp = &bp->fp[i];
953
954                 u16 bd_cons = fp->tx_bd_cons;
955                 u16 sw_prod = fp->tx_pkt_prod;
956                 u16 sw_cons = fp->tx_pkt_cons;
957
958                 while (sw_cons != sw_prod) {
959                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
960                         sw_cons++;
961                 }
962         }
963 }
964
965 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
966 {
967         int i, j;
968
969         for_each_rx_queue(bp, j) {
970                 struct bnx2x_fastpath *fp = &bp->fp[j];
971
972                 for (i = 0; i < NUM_RX_BD; i++) {
973                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
974                         struct sk_buff *skb = rx_buf->skb;
975
976                         if (skb == NULL)
977                                 continue;
978
979                         dma_unmap_single(&bp->pdev->dev,
980                                          dma_unmap_addr(rx_buf, mapping),
981                                          bp->rx_buf_size, DMA_FROM_DEVICE);
982
983                         rx_buf->skb = NULL;
984                         dev_kfree_skb(skb);
985                 }
986                 if (!fp->disable_tpa)
987                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
988                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
989                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
990         }
991 }
992
993 void bnx2x_free_skbs(struct bnx2x *bp)
994 {
995         bnx2x_free_tx_skbs(bp);
996         bnx2x_free_rx_skbs(bp);
997 }
998
999 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1000 {
1001         /* load old values */
1002         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1003
1004         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1005                 /* leave all but MAX value */
1006                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1007
1008                 /* set new MAX value */
1009                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1010                                 & FUNC_MF_CFG_MAX_BW_MASK;
1011
1012                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1013         }
1014 }
1015
1016 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1017 {
1018         int i, offset = 1;
1019
1020         free_irq(bp->msix_table[0].vector, bp->dev);
1021         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1022            bp->msix_table[0].vector);
1023
1024 #ifdef BCM_CNIC
1025         offset++;
1026 #endif
1027         for_each_eth_queue(bp, i) {
1028                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
1029                    "state %x\n", i, bp->msix_table[i + offset].vector,
1030                    bnx2x_fp(bp, i, state));
1031
1032                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1033         }
1034 }
1035
1036 void bnx2x_free_irq(struct bnx2x *bp)
1037 {
1038         if (bp->flags & USING_MSIX_FLAG)
1039                 bnx2x_free_msix_irqs(bp);
1040         else if (bp->flags & USING_MSI_FLAG)
1041                 free_irq(bp->pdev->irq, bp->dev);
1042         else
1043                 free_irq(bp->pdev->irq, bp->dev);
1044 }
1045
1046 int bnx2x_enable_msix(struct bnx2x *bp)
1047 {
1048         int msix_vec = 0, i, rc, req_cnt;
1049
1050         bp->msix_table[msix_vec].entry = msix_vec;
1051         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1052            bp->msix_table[0].entry);
1053         msix_vec++;
1054
1055 #ifdef BCM_CNIC
1056         bp->msix_table[msix_vec].entry = msix_vec;
1057         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1058            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1059         msix_vec++;
1060 #endif
1061         for_each_eth_queue(bp, i) {
1062                 bp->msix_table[msix_vec].entry = msix_vec;
1063                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1064                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1065                 msix_vec++;
1066         }
1067
1068         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1069
1070         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1071
1072         /*
1073          * reconfigure number of tx/rx queues according to available
1074          * MSI-X vectors
1075          */
1076         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1077                 /* how less vectors we will have? */
1078                 int diff = req_cnt - rc;
1079
1080                 DP(NETIF_MSG_IFUP,
1081                    "Trying to use less MSI-X vectors: %d\n", rc);
1082
1083                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1084
1085                 if (rc) {
1086                         DP(NETIF_MSG_IFUP,
1087                            "MSI-X is not attainable  rc %d\n", rc);
1088                         return rc;
1089                 }
1090                 /*
1091                  * decrease number of queues by number of unallocated entries
1092                  */
1093                 bp->num_queues -= diff;
1094
1095                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1096                                   bp->num_queues);
1097         } else if (rc) {
1098                 /* fall to INTx if not enough memory */
1099                 if (rc == -ENOMEM)
1100                         bp->flags |= DISABLE_MSI_FLAG;
1101                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1102                 return rc;
1103         }
1104
1105         bp->flags |= USING_MSIX_FLAG;
1106
1107         return 0;
1108 }
1109
1110 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1111 {
1112         int i, rc, offset = 1;
1113
1114         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1115                          bp->dev->name, bp->dev);
1116         if (rc) {
1117                 BNX2X_ERR("request sp irq failed\n");
1118                 return -EBUSY;
1119         }
1120
1121 #ifdef BCM_CNIC
1122         offset++;
1123 #endif
1124         for_each_eth_queue(bp, i) {
1125                 struct bnx2x_fastpath *fp = &bp->fp[i];
1126                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1127                          bp->dev->name, i);
1128
1129                 rc = request_irq(bp->msix_table[offset].vector,
1130                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1131                 if (rc) {
1132                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
1133                         bnx2x_free_msix_irqs(bp);
1134                         return -EBUSY;
1135                 }
1136
1137                 offset++;
1138                 fp->state = BNX2X_FP_STATE_IRQ;
1139         }
1140
1141         i = BNX2X_NUM_ETH_QUEUES(bp);
1142         offset = 1 + CNIC_CONTEXT_USE;
1143         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1144                " ... fp[%d] %d\n",
1145                bp->msix_table[0].vector,
1146                0, bp->msix_table[offset].vector,
1147                i - 1, bp->msix_table[offset + i - 1].vector);
1148
1149         return 0;
1150 }
1151
1152 int bnx2x_enable_msi(struct bnx2x *bp)
1153 {
1154         int rc;
1155
1156         rc = pci_enable_msi(bp->pdev);
1157         if (rc) {
1158                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1159                 return -1;
1160         }
1161         bp->flags |= USING_MSI_FLAG;
1162
1163         return 0;
1164 }
1165
1166 static int bnx2x_req_irq(struct bnx2x *bp)
1167 {
1168         unsigned long flags;
1169         int rc;
1170
1171         if (bp->flags & USING_MSI_FLAG)
1172                 flags = 0;
1173         else
1174                 flags = IRQF_SHARED;
1175
1176         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1177                          bp->dev->name, bp->dev);
1178         if (!rc)
1179                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1180
1181         return rc;
1182 }
1183
1184 static void bnx2x_napi_enable(struct bnx2x *bp)
1185 {
1186         int i;
1187
1188         for_each_napi_queue(bp, i)
1189                 napi_enable(&bnx2x_fp(bp, i, napi));
1190 }
1191
1192 static void bnx2x_napi_disable(struct bnx2x *bp)
1193 {
1194         int i;
1195
1196         for_each_napi_queue(bp, i)
1197                 napi_disable(&bnx2x_fp(bp, i, napi));
1198 }
1199
1200 void bnx2x_netif_start(struct bnx2x *bp)
1201 {
1202         int intr_sem;
1203
1204         intr_sem = atomic_dec_and_test(&bp->intr_sem);
1205         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1206
1207         if (intr_sem) {
1208                 if (netif_running(bp->dev)) {
1209                         bnx2x_napi_enable(bp);
1210                         bnx2x_int_enable(bp);
1211                         if (bp->state == BNX2X_STATE_OPEN)
1212                                 netif_tx_wake_all_queues(bp->dev);
1213                 }
1214         }
1215 }
1216
1217 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1218 {
1219         bnx2x_int_disable_sync(bp, disable_hw);
1220         bnx2x_napi_disable(bp);
1221         netif_tx_disable(bp->dev);
1222 }
1223
1224 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1225 {
1226 #ifdef BCM_CNIC
1227         struct bnx2x *bp = netdev_priv(dev);
1228         if (NO_FCOE(bp))
1229                 return skb_tx_hash(dev, skb);
1230         else {
1231                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1232                 u16 ether_type = ntohs(hdr->h_proto);
1233
1234                 /* Skip VLAN tag if present */
1235                 if (ether_type == ETH_P_8021Q) {
1236                         struct vlan_ethhdr *vhdr =
1237                                 (struct vlan_ethhdr *)skb->data;
1238
1239                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1240                 }
1241
1242                 /* If ethertype is FCoE or FIP - use FCoE ring */
1243                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1244                         return bnx2x_fcoe(bp, index);
1245         }
1246 #endif
1247         /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
1248          */
1249         return __skb_tx_hash(dev, skb,
1250                         dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1251 }
1252
1253 void bnx2x_set_num_queues(struct bnx2x *bp)
1254 {
1255         switch (bp->multi_mode) {
1256         case ETH_RSS_MODE_DISABLED:
1257                 bp->num_queues = 1;
1258                 break;
1259         case ETH_RSS_MODE_REGULAR:
1260                 bp->num_queues = bnx2x_calc_num_queues(bp);
1261                 break;
1262
1263         default:
1264                 bp->num_queues = 1;
1265                 break;
1266         }
1267
1268         /* Add special queues */
1269         bp->num_queues += NONE_ETH_CONTEXT_USE;
1270 }
1271
1272 #ifdef BCM_CNIC
1273 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1274 {
1275         if (!NO_FCOE(bp)) {
1276                 if (!IS_MF_SD(bp))
1277                         bnx2x_set_fip_eth_mac_addr(bp, 1);
1278                 bnx2x_set_all_enode_macs(bp, 1);
1279                 bp->flags |= FCOE_MACS_SET;
1280         }
1281 }
1282 #endif
1283
1284 static void bnx2x_release_firmware(struct bnx2x *bp)
1285 {
1286         kfree(bp->init_ops_offsets);
1287         kfree(bp->init_ops);
1288         kfree(bp->init_data);
1289         release_firmware(bp->firmware);
1290 }
1291
1292 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1293 {
1294         int rc, num = bp->num_queues;
1295
1296 #ifdef BCM_CNIC
1297         if (NO_FCOE(bp))
1298                 num -= FCOE_CONTEXT_USE;
1299
1300 #endif
1301         netif_set_real_num_tx_queues(bp->dev, num);
1302         rc = netif_set_real_num_rx_queues(bp->dev, num);
1303         return rc;
1304 }
1305
1306 /* must be called with rtnl_lock */
1307 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1308 {
1309         u32 load_code;
1310         int i, rc;
1311
1312         /* Set init arrays */
1313         rc = bnx2x_init_firmware(bp);
1314         if (rc) {
1315                 BNX2X_ERR("Error loading firmware\n");
1316                 return rc;
1317         }
1318
1319 #ifdef BNX2X_STOP_ON_ERROR
1320         if (unlikely(bp->panic))
1321                 return -EPERM;
1322 #endif
1323
1324         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1325
1326         /* must be called before memory allocation and HW init */
1327         bnx2x_ilt_set_info(bp);
1328
1329         if (bnx2x_alloc_mem(bp))
1330                 return -ENOMEM;
1331
1332         rc = bnx2x_set_real_num_queues(bp);
1333         if (rc) {
1334                 BNX2X_ERR("Unable to set real_num_queues\n");
1335                 goto load_error0;
1336         }
1337
1338         for_each_queue(bp, i)
1339                 bnx2x_fp(bp, i, disable_tpa) =
1340                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1341
1342 #ifdef BCM_CNIC
1343         /* We don't want TPA on FCoE L2 ring */
1344         bnx2x_fcoe(bp, disable_tpa) = 1;
1345 #endif
1346         bnx2x_napi_enable(bp);
1347
1348         /* Send LOAD_REQUEST command to MCP
1349            Returns the type of LOAD command:
1350            if it is the first port to be initialized
1351            common blocks should be initialized, otherwise - not
1352         */
1353         if (!BP_NOMCP(bp)) {
1354                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1355                 if (!load_code) {
1356                         BNX2X_ERR("MCP response failure, aborting\n");
1357                         rc = -EBUSY;
1358                         goto load_error1;
1359                 }
1360                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1361                         rc = -EBUSY; /* other port in diagnostic mode */
1362                         goto load_error1;
1363                 }
1364
1365         } else {
1366                 int path = BP_PATH(bp);
1367                 int port = BP_PORT(bp);
1368
1369                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1370                    path, load_count[path][0], load_count[path][1],
1371                    load_count[path][2]);
1372                 load_count[path][0]++;
1373                 load_count[path][1 + port]++;
1374                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1375                    path, load_count[path][0], load_count[path][1],
1376                    load_count[path][2]);
1377                 if (load_count[path][0] == 1)
1378                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1379                 else if (load_count[path][1 + port] == 1)
1380                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1381                 else
1382                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1383         }
1384
1385         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1386             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1387             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1388                 bp->port.pmf = 1;
1389         else
1390                 bp->port.pmf = 0;
1391         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1392
1393         /* Initialize HW */
1394         rc = bnx2x_init_hw(bp, load_code);
1395         if (rc) {
1396                 BNX2X_ERR("HW init failed, aborting\n");
1397                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1398                 goto load_error2;
1399         }
1400
1401         /* Connect to IRQs */
1402         rc = bnx2x_setup_irqs(bp);
1403         if (rc) {
1404                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1405                 goto load_error2;
1406         }
1407
1408         /* Setup NIC internals and enable interrupts */
1409         bnx2x_nic_init(bp, load_code);
1410
1411         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1412             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1413             (bp->common.shmem2_base))
1414                 SHMEM2_WR(bp, dcc_support,
1415                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1416                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1417
1418         /* Send LOAD_DONE command to MCP */
1419         if (!BP_NOMCP(bp)) {
1420                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1421                 if (!load_code) {
1422                         BNX2X_ERR("MCP response failure, aborting\n");
1423                         rc = -EBUSY;
1424                         goto load_error3;
1425                 }
1426         }
1427
1428         bnx2x_dcbx_init(bp);
1429
1430         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1431
1432         rc = bnx2x_func_start(bp);
1433         if (rc) {
1434                 BNX2X_ERR("Function start failed!\n");
1435 #ifndef BNX2X_STOP_ON_ERROR
1436                 goto load_error3;
1437 #else
1438                 bp->panic = 1;
1439                 return -EBUSY;
1440 #endif
1441         }
1442
1443         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1444         if (rc) {
1445                 BNX2X_ERR("Setup leading failed!\n");
1446 #ifndef BNX2X_STOP_ON_ERROR
1447                 goto load_error3;
1448 #else
1449                 bp->panic = 1;
1450                 return -EBUSY;
1451 #endif
1452         }
1453
1454         if (!CHIP_IS_E1(bp) &&
1455             (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1456                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1457                 bp->flags |= MF_FUNC_DIS;
1458         }
1459
1460 #ifdef BCM_CNIC
1461         /* Enable Timer scan */
1462         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1463 #endif
1464
1465         for_each_nondefault_queue(bp, i) {
1466                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1467                 if (rc)
1468 #ifdef BCM_CNIC
1469                         goto load_error4;
1470 #else
1471                         goto load_error3;
1472 #endif
1473         }
1474
1475         /* Now when Clients are configured we are ready to work */
1476         bp->state = BNX2X_STATE_OPEN;
1477
1478 #ifdef BCM_CNIC
1479         bnx2x_set_fcoe_eth_macs(bp);
1480 #endif
1481
1482         bnx2x_set_eth_mac(bp, 1);
1483
1484         if (bp->pending_max) {
1485                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1486                 bp->pending_max = 0;
1487         }
1488
1489         if (bp->port.pmf)
1490                 bnx2x_initial_phy_init(bp, load_mode);
1491
1492         /* Start fast path */
1493         switch (load_mode) {
1494         case LOAD_NORMAL:
1495                 /* Tx queue should be only reenabled */
1496                 netif_tx_wake_all_queues(bp->dev);
1497                 /* Initialize the receive filter. */
1498                 bnx2x_set_rx_mode(bp->dev);
1499                 break;
1500
1501         case LOAD_OPEN:
1502                 netif_tx_start_all_queues(bp->dev);
1503                 smp_mb__after_clear_bit();
1504                 /* Initialize the receive filter. */
1505                 bnx2x_set_rx_mode(bp->dev);
1506                 break;
1507
1508         case LOAD_DIAG:
1509                 /* Initialize the receive filter. */
1510                 bnx2x_set_rx_mode(bp->dev);
1511                 bp->state = BNX2X_STATE_DIAG;
1512                 break;
1513
1514         default:
1515                 break;
1516         }
1517
1518         if (!bp->port.pmf)
1519                 bnx2x__link_status_update(bp);
1520
1521         /* start the timer */
1522         mod_timer(&bp->timer, jiffies + bp->current_interval);
1523
1524 #ifdef BCM_CNIC
1525         bnx2x_setup_cnic_irq_info(bp);
1526         if (bp->state == BNX2X_STATE_OPEN)
1527                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1528 #endif
1529         bnx2x_inc_load_cnt(bp);
1530
1531         bnx2x_release_firmware(bp);
1532
1533         return 0;
1534
1535 #ifdef BCM_CNIC
1536 load_error4:
1537         /* Disable Timer scan */
1538         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1539 #endif
1540 load_error3:
1541         bnx2x_int_disable_sync(bp, 1);
1542
1543         /* Free SKBs, SGEs, TPA pool and driver internals */
1544         bnx2x_free_skbs(bp);
1545         for_each_rx_queue(bp, i)
1546                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1547
1548         /* Release IRQs */
1549         bnx2x_free_irq(bp);
1550 load_error2:
1551         if (!BP_NOMCP(bp)) {
1552                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1553                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1554         }
1555
1556         bp->port.pmf = 0;
1557 load_error1:
1558         bnx2x_napi_disable(bp);
1559 load_error0:
1560         bnx2x_free_mem(bp);
1561
1562         bnx2x_release_firmware(bp);
1563
1564         return rc;
1565 }
1566
1567 /* must be called with rtnl_lock */
1568 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1569 {
1570         int i;
1571
1572         if (bp->state == BNX2X_STATE_CLOSED) {
1573                 /* Interface has been removed - nothing to recover */
1574                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1575                 bp->is_leader = 0;
1576                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1577                 smp_wmb();
1578
1579                 return -EINVAL;
1580         }
1581
1582 #ifdef BCM_CNIC
1583         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1584 #endif
1585         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1586
1587         /* Set "drop all" */
1588         bp->rx_mode = BNX2X_RX_MODE_NONE;
1589         bnx2x_set_storm_rx_mode(bp);
1590
1591         /* Stop Tx */
1592         bnx2x_tx_disable(bp);
1593
1594         del_timer_sync(&bp->timer);
1595
1596         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1597                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1598
1599         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1600
1601         /* Cleanup the chip if needed */
1602         if (unload_mode != UNLOAD_RECOVERY)
1603                 bnx2x_chip_cleanup(bp, unload_mode);
1604         else {
1605                 /* Disable HW interrupts, NAPI and Tx */
1606                 bnx2x_netif_stop(bp, 1);
1607
1608                 /* Release IRQs */
1609                 bnx2x_free_irq(bp);
1610         }
1611
1612         bp->port.pmf = 0;
1613
1614         /* Free SKBs, SGEs, TPA pool and driver internals */
1615         bnx2x_free_skbs(bp);
1616         for_each_rx_queue(bp, i)
1617                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1618
1619         bnx2x_free_mem(bp);
1620
1621         bp->state = BNX2X_STATE_CLOSED;
1622
1623         /* The last driver must disable a "close the gate" if there is no
1624          * parity attention or "process kill" pending.
1625          */
1626         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1627             bnx2x_reset_is_done(bp))
1628                 bnx2x_disable_close_the_gate(bp);
1629
1630         /* Reset MCP mail box sequence if there is on going recovery */
1631         if (unload_mode == UNLOAD_RECOVERY)
1632                 bp->fw_seq = 0;
1633
1634         return 0;
1635 }
1636
1637 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1638 {
1639         u16 pmcsr;
1640
1641         /* If there is no power capability, silently succeed */
1642         if (!bp->pm_cap) {
1643                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1644                 return 0;
1645         }
1646
1647         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1648
1649         switch (state) {
1650         case PCI_D0:
1651                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1652                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1653                                        PCI_PM_CTRL_PME_STATUS));
1654
1655                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1656                         /* delay required during transition out of D3hot */
1657                         msleep(20);
1658                 break;
1659
1660         case PCI_D3hot:
1661                 /* If there are other clients above don't
1662                    shut down the power */
1663                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1664                         return 0;
1665                 /* Don't shut down the power for emulation and FPGA */
1666                 if (CHIP_REV_IS_SLOW(bp))
1667                         return 0;
1668
1669                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1670                 pmcsr |= 3;
1671
1672                 if (bp->wol)
1673                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1674
1675                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1676                                       pmcsr);
1677
1678                 /* No more memory access after this point until
1679                 * device is brought back to D0.
1680                 */
1681                 break;
1682
1683         default:
1684                 return -EINVAL;
1685         }
1686         return 0;
1687 }
1688
1689 /*
1690  * net_device service functions
1691  */
1692 int bnx2x_poll(struct napi_struct *napi, int budget)
1693 {
1694         int work_done = 0;
1695         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1696                                                  napi);
1697         struct bnx2x *bp = fp->bp;
1698
1699         while (1) {
1700 #ifdef BNX2X_STOP_ON_ERROR
1701                 if (unlikely(bp->panic)) {
1702                         napi_complete(napi);
1703                         return 0;
1704                 }
1705 #endif
1706
1707                 if (bnx2x_has_tx_work(fp))
1708                         bnx2x_tx_int(fp);
1709
1710                 if (bnx2x_has_rx_work(fp)) {
1711                         work_done += bnx2x_rx_int(fp, budget - work_done);
1712
1713                         /* must not complete if we consumed full budget */
1714                         if (work_done >= budget)
1715                                 break;
1716                 }
1717
1718                 /* Fall out from the NAPI loop if needed */
1719                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1720 #ifdef BCM_CNIC
1721                         /* No need to update SB for FCoE L2 ring as long as
1722                          * it's connected to the default SB and the SB
1723                          * has been updated when NAPI was scheduled.
1724                          */
1725                         if (IS_FCOE_FP(fp)) {
1726                                 napi_complete(napi);
1727                                 break;
1728                         }
1729 #endif
1730
1731                         bnx2x_update_fpsb_idx(fp);
1732                         /* bnx2x_has_rx_work() reads the status block,
1733                          * thus we need to ensure that status block indices
1734                          * have been actually read (bnx2x_update_fpsb_idx)
1735                          * prior to this check (bnx2x_has_rx_work) so that
1736                          * we won't write the "newer" value of the status block
1737                          * to IGU (if there was a DMA right after
1738                          * bnx2x_has_rx_work and if there is no rmb, the memory
1739                          * reading (bnx2x_update_fpsb_idx) may be postponed
1740                          * to right before bnx2x_ack_sb). In this case there
1741                          * will never be another interrupt until there is
1742                          * another update of the status block, while there
1743                          * is still unhandled work.
1744                          */
1745                         rmb();
1746
1747                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1748                                 napi_complete(napi);
1749                                 /* Re-enable interrupts */
1750                                 DP(NETIF_MSG_HW,
1751                                    "Update index to %d\n", fp->fp_hc_idx);
1752                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1753                                              le16_to_cpu(fp->fp_hc_idx),
1754                                              IGU_INT_ENABLE, 1);
1755                                 break;
1756                         }
1757                 }
1758         }
1759
1760         return work_done;
1761 }
1762
1763 /* we split the first BD into headers and data BDs
1764  * to ease the pain of our fellow microcode engineers
1765  * we use one mapping for both BDs
1766  * So far this has only been observed to happen
1767  * in Other Operating Systems(TM)
1768  */
1769 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1770                                    struct bnx2x_fastpath *fp,
1771                                    struct sw_tx_bd *tx_buf,
1772                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1773                                    u16 bd_prod, int nbd)
1774 {
1775         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1776         struct eth_tx_bd *d_tx_bd;
1777         dma_addr_t mapping;
1778         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1779
1780         /* first fix first BD */
1781         h_tx_bd->nbd = cpu_to_le16(nbd);
1782         h_tx_bd->nbytes = cpu_to_le16(hlen);
1783
1784         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1785            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1786            h_tx_bd->addr_lo, h_tx_bd->nbd);
1787
1788         /* now get a new data BD
1789          * (after the pbd) and fill it */
1790         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1791         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1792
1793         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1794                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1795
1796         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1797         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1798         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1799
1800         /* this marks the BD as one that has no individual mapping */
1801         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1802
1803         DP(NETIF_MSG_TX_QUEUED,
1804            "TSO split data size is %d (%x:%x)\n",
1805            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1806
1807         /* update tx_bd */
1808         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1809
1810         return bd_prod;
1811 }
1812
1813 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1814 {
1815         if (fix > 0)
1816                 csum = (u16) ~csum_fold(csum_sub(csum,
1817                                 csum_partial(t_header - fix, fix, 0)));
1818
1819         else if (fix < 0)
1820                 csum = (u16) ~csum_fold(csum_add(csum,
1821                                 csum_partial(t_header, -fix, 0)));
1822
1823         return swab16(csum);
1824 }
1825
1826 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1827 {
1828         u32 rc;
1829
1830         if (skb->ip_summed != CHECKSUM_PARTIAL)
1831                 rc = XMIT_PLAIN;
1832
1833         else {
1834                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1835                         rc = XMIT_CSUM_V6;
1836                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1837                                 rc |= XMIT_CSUM_TCP;
1838
1839                 } else {
1840                         rc = XMIT_CSUM_V4;
1841                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1842                                 rc |= XMIT_CSUM_TCP;
1843                 }
1844         }
1845
1846         if (skb_is_gso_v6(skb))
1847                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1848         else if (skb_is_gso(skb))
1849                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1850
1851         return rc;
1852 }
1853
1854 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1855 /* check if packet requires linearization (packet is too fragmented)
1856    no need to check fragmentation if page size > 8K (there will be no
1857    violation to FW restrictions) */
1858 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1859                              u32 xmit_type)
1860 {
1861         int to_copy = 0;
1862         int hlen = 0;
1863         int first_bd_sz = 0;
1864
1865         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1866         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1867
1868                 if (xmit_type & XMIT_GSO) {
1869                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1870                         /* Check if LSO packet needs to be copied:
1871                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1872                         int wnd_size = MAX_FETCH_BD - 3;
1873                         /* Number of windows to check */
1874                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1875                         int wnd_idx = 0;
1876                         int frag_idx = 0;
1877                         u32 wnd_sum = 0;
1878
1879                         /* Headers length */
1880                         hlen = (int)(skb_transport_header(skb) - skb->data) +
1881                                 tcp_hdrlen(skb);
1882
1883                         /* Amount of data (w/o headers) on linear part of SKB*/
1884                         first_bd_sz = skb_headlen(skb) - hlen;
1885
1886                         wnd_sum  = first_bd_sz;
1887
1888                         /* Calculate the first sum - it's special */
1889                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1890                                 wnd_sum +=
1891                                         skb_shinfo(skb)->frags[frag_idx].size;
1892
1893                         /* If there was data on linear skb data - check it */
1894                         if (first_bd_sz > 0) {
1895                                 if (unlikely(wnd_sum < lso_mss)) {
1896                                         to_copy = 1;
1897                                         goto exit_lbl;
1898                                 }
1899
1900                                 wnd_sum -= first_bd_sz;
1901                         }
1902
1903                         /* Others are easier: run through the frag list and
1904                            check all windows */
1905                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1906                                 wnd_sum +=
1907                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1908
1909                                 if (unlikely(wnd_sum < lso_mss)) {
1910                                         to_copy = 1;
1911                                         break;
1912                                 }
1913                                 wnd_sum -=
1914                                         skb_shinfo(skb)->frags[wnd_idx].size;
1915                         }
1916                 } else {
1917                         /* in non-LSO too fragmented packet should always
1918                            be linearized */
1919                         to_copy = 1;
1920                 }
1921         }
1922
1923 exit_lbl:
1924         if (unlikely(to_copy))
1925                 DP(NETIF_MSG_TX_QUEUED,
1926                    "Linearization IS REQUIRED for %s packet. "
1927                    "num_frags %d  hlen %d  first_bd_sz %d\n",
1928                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1929                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1930
1931         return to_copy;
1932 }
1933 #endif
1934
1935 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1936                                         u32 xmit_type)
1937 {
1938         *parsing_data |= (skb_shinfo(skb)->gso_size <<
1939                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1940                               ETH_TX_PARSE_BD_E2_LSO_MSS;
1941         if ((xmit_type & XMIT_GSO_V6) &&
1942             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1943                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1944 }
1945
1946 /**
1947  * Update PBD in GSO case.
1948  *
1949  * @param skb
1950  * @param tx_start_bd
1951  * @param pbd
1952  * @param xmit_type
1953  */
1954 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1955                                      struct eth_tx_parse_bd_e1x *pbd,
1956                                      u32 xmit_type)
1957 {
1958         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1959         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1960         pbd->tcp_flags = pbd_tcp_flags(skb);
1961
1962         if (xmit_type & XMIT_GSO_V4) {
1963                 pbd->ip_id = swab16(ip_hdr(skb)->id);
1964                 pbd->tcp_pseudo_csum =
1965                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1966                                                   ip_hdr(skb)->daddr,
1967                                                   0, IPPROTO_TCP, 0));
1968
1969         } else
1970                 pbd->tcp_pseudo_csum =
1971                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1972                                                 &ipv6_hdr(skb)->daddr,
1973                                                 0, IPPROTO_TCP, 0));
1974
1975         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1976 }
1977
1978 /**
1979  *
1980  * @param skb
1981  * @param tx_start_bd
1982  * @param pbd_e2
1983  * @param xmit_type
1984  *
1985  * @return header len
1986  */
1987 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1988         u32 *parsing_data, u32 xmit_type)
1989 {
1990         *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1991                 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1992                 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
1993
1994         *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1995                 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1996                 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
1997
1998         return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1999 }
2000
2001 /**
2002  *
2003  * @param skb
2004  * @param tx_start_bd
2005  * @param pbd
2006  * @param xmit_type
2007  *
2008  * @return Header length
2009  */
2010 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2011         struct eth_tx_parse_bd_e1x *pbd,
2012         u32 xmit_type)
2013 {
2014         u8 hlen = (skb_network_header(skb) - skb->data) / 2;
2015
2016         /* for now NS flag is not used in Linux */
2017         pbd->global_data =
2018                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2019                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2020
2021         pbd->ip_hlen_w = (skb_transport_header(skb) -
2022                         skb_network_header(skb)) / 2;
2023
2024         hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
2025
2026         pbd->total_hlen_w = cpu_to_le16(hlen);
2027         hlen = hlen*2;
2028
2029         if (xmit_type & XMIT_CSUM_TCP) {
2030                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2031
2032         } else {
2033                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2034
2035                 DP(NETIF_MSG_TX_QUEUED,
2036                    "hlen %d  fix %d  csum before fix %x\n",
2037                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2038
2039                 /* HW bug: fixup the CSUM */
2040                 pbd->tcp_pseudo_csum =
2041                         bnx2x_csum_fix(skb_transport_header(skb),
2042                                        SKB_CS(skb), fix);
2043
2044                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2045                    pbd->tcp_pseudo_csum);
2046         }
2047
2048         return hlen;
2049 }
2050
2051 /* called with netif_tx_lock
2052  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2053  * netif_wake_queue()
2054  */
2055 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2056 {
2057         struct bnx2x *bp = netdev_priv(dev);
2058         struct bnx2x_fastpath *fp;
2059         struct netdev_queue *txq;
2060         struct sw_tx_bd *tx_buf;
2061         struct eth_tx_start_bd *tx_start_bd;
2062         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2063         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2064         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2065         u32 pbd_e2_parsing_data = 0;
2066         u16 pkt_prod, bd_prod;
2067         int nbd, fp_index;
2068         dma_addr_t mapping;
2069         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2070         int i;
2071         u8 hlen = 0;
2072         __le16 pkt_size = 0;
2073         struct ethhdr *eth;
2074         u8 mac_type = UNICAST_ADDRESS;
2075
2076 #ifdef BNX2X_STOP_ON_ERROR
2077         if (unlikely(bp->panic))
2078                 return NETDEV_TX_BUSY;
2079 #endif
2080
2081         fp_index = skb_get_queue_mapping(skb);
2082         txq = netdev_get_tx_queue(dev, fp_index);
2083
2084         fp = &bp->fp[fp_index];
2085
2086         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2087                 fp->eth_q_stats.driver_xoff++;
2088                 netif_tx_stop_queue(txq);
2089                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2090                 return NETDEV_TX_BUSY;
2091         }
2092
2093         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2094                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2095            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2096            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2097
2098         eth = (struct ethhdr *)skb->data;
2099
2100         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2101         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2102                 if (is_broadcast_ether_addr(eth->h_dest))
2103                         mac_type = BROADCAST_ADDRESS;
2104                 else
2105                         mac_type = MULTICAST_ADDRESS;
2106         }
2107
2108 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2109         /* First, check if we need to linearize the skb (due to FW
2110            restrictions). No need to check fragmentation if page size > 8K
2111            (there will be no violation to FW restrictions) */
2112         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2113                 /* Statistics of linearization */
2114                 bp->lin_cnt++;
2115                 if (skb_linearize(skb) != 0) {
2116                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2117                            "silently dropping this SKB\n");
2118                         dev_kfree_skb_any(skb);
2119                         return NETDEV_TX_OK;
2120                 }
2121         }
2122 #endif
2123
2124         /*
2125         Please read carefully. First we use one BD which we mark as start,
2126         then we have a parsing info BD (used for TSO or xsum),
2127         and only then we have the rest of the TSO BDs.
2128         (don't forget to mark the last one as last,
2129         and to unmap only AFTER you write to the BD ...)
2130         And above all, all pdb sizes are in words - NOT DWORDS!
2131         */
2132
2133         pkt_prod = fp->tx_pkt_prod++;
2134         bd_prod = TX_BD(fp->tx_bd_prod);
2135
2136         /* get a tx_buf and first BD */
2137         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2138         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2139
2140         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2141         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2142                  mac_type);
2143
2144         /* header nbd */
2145         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2146
2147         /* remember the first BD of the packet */
2148         tx_buf->first_bd = fp->tx_bd_prod;
2149         tx_buf->skb = skb;
2150         tx_buf->flags = 0;
2151
2152         DP(NETIF_MSG_TX_QUEUED,
2153            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2154            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2155
2156         if (vlan_tx_tag_present(skb)) {
2157                 tx_start_bd->vlan_or_ethertype =
2158                     cpu_to_le16(vlan_tx_tag_get(skb));
2159                 tx_start_bd->bd_flags.as_bitfield |=
2160                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2161         } else
2162                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2163
2164         /* turn on parsing and get a BD */
2165         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2166
2167         if (xmit_type & XMIT_CSUM) {
2168                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2169
2170                 if (xmit_type & XMIT_CSUM_V4)
2171                         tx_start_bd->bd_flags.as_bitfield |=
2172                                                 ETH_TX_BD_FLAGS_IP_CSUM;
2173                 else
2174                         tx_start_bd->bd_flags.as_bitfield |=
2175                                                 ETH_TX_BD_FLAGS_IPV6;
2176
2177                 if (!(xmit_type & XMIT_CSUM_TCP))
2178                         tx_start_bd->bd_flags.as_bitfield |=
2179                                                 ETH_TX_BD_FLAGS_IS_UDP;
2180         }
2181
2182         if (CHIP_IS_E2(bp)) {
2183                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2184                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2185                 /* Set PBD in checksum offload case */
2186                 if (xmit_type & XMIT_CSUM)
2187                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2188                                                      &pbd_e2_parsing_data,
2189                                                      xmit_type);
2190         } else {
2191                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2192                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2193                 /* Set PBD in checksum offload case */
2194                 if (xmit_type & XMIT_CSUM)
2195                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2196
2197         }
2198
2199         /* Map skb linear data for DMA */
2200         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2201                                  skb_headlen(skb), DMA_TO_DEVICE);
2202
2203         /* Setup the data pointer of the first BD of the packet */
2204         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2205         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2206         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2207         tx_start_bd->nbd = cpu_to_le16(nbd);
2208         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2209         pkt_size = tx_start_bd->nbytes;
2210
2211         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2212            "  nbytes %d  flags %x  vlan %x\n",
2213            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2214            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2215            tx_start_bd->bd_flags.as_bitfield,
2216            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2217
2218         if (xmit_type & XMIT_GSO) {
2219
2220                 DP(NETIF_MSG_TX_QUEUED,
2221                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2222                    skb->len, hlen, skb_headlen(skb),
2223                    skb_shinfo(skb)->gso_size);
2224
2225                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2226
2227                 if (unlikely(skb_headlen(skb) > hlen))
2228                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2229                                                  hlen, bd_prod, ++nbd);
2230                 if (CHIP_IS_E2(bp))
2231                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2232                                              xmit_type);
2233                 else
2234                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2235         }
2236
2237         /* Set the PBD's parsing_data field if not zero
2238          * (for the chips newer than 57711).
2239          */
2240         if (pbd_e2_parsing_data)
2241                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2242
2243         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2244
2245         /* Handle fragmented skb */
2246         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2247                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2248
2249                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2250                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2251                 if (total_pkt_bd == NULL)
2252                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2253
2254                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2255                                        frag->page_offset,
2256                                        frag->size, DMA_TO_DEVICE);
2257
2258                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2259                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2260                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2261                 le16_add_cpu(&pkt_size, frag->size);
2262
2263                 DP(NETIF_MSG_TX_QUEUED,
2264                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2265                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2266                    le16_to_cpu(tx_data_bd->nbytes));
2267         }
2268
2269         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2270
2271         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2272
2273         /* now send a tx doorbell, counting the next BD
2274          * if the packet contains or ends with it
2275          */
2276         if (TX_BD_POFF(bd_prod) < nbd)
2277                 nbd++;
2278
2279         if (total_pkt_bd != NULL)
2280                 total_pkt_bd->total_pkt_bytes = pkt_size;
2281
2282         if (pbd_e1x)
2283                 DP(NETIF_MSG_TX_QUEUED,
2284                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2285                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2286                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2287                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2288                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2289                     le16_to_cpu(pbd_e1x->total_hlen_w));
2290         if (pbd_e2)
2291                 DP(NETIF_MSG_TX_QUEUED,
2292                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2293                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2294                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2295                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2296                    pbd_e2->parsing_data);
2297         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2298
2299         /*
2300          * Make sure that the BD data is updated before updating the producer
2301          * since FW might read the BD right after the producer is updated.
2302          * This is only applicable for weak-ordered memory model archs such
2303          * as IA-64. The following barrier is also mandatory since FW will
2304          * assumes packets must have BDs.
2305          */
2306         wmb();
2307
2308         fp->tx_db.data.prod += nbd;
2309         barrier();
2310
2311         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2312
2313         mmiowb();
2314
2315         fp->tx_bd_prod += nbd;
2316
2317         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2318                 netif_tx_stop_queue(txq);
2319
2320                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2321                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2322                  * fp->bd_tx_cons */
2323                 smp_mb();
2324
2325                 fp->eth_q_stats.driver_xoff++;
2326                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2327                         netif_tx_wake_queue(txq);
2328         }
2329         fp->tx_pkt++;
2330
2331         return NETDEV_TX_OK;
2332 }
2333
2334 /* called with rtnl_lock */
2335 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2336 {
2337         struct sockaddr *addr = p;
2338         struct bnx2x *bp = netdev_priv(dev);
2339
2340         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2341                 return -EINVAL;
2342
2343         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2344         if (netif_running(dev))
2345                 bnx2x_set_eth_mac(bp, 1);
2346
2347         return 0;
2348 }
2349
2350
2351 static int bnx2x_setup_irqs(struct bnx2x *bp)
2352 {
2353         int rc = 0;
2354         if (bp->flags & USING_MSIX_FLAG) {
2355                 rc = bnx2x_req_msix_irqs(bp);
2356                 if (rc)
2357                         return rc;
2358         } else {
2359                 bnx2x_ack_int(bp);
2360                 rc = bnx2x_req_irq(bp);
2361                 if (rc) {
2362                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
2363                         return rc;
2364                 }
2365                 if (bp->flags & USING_MSI_FLAG) {
2366                         bp->dev->irq = bp->pdev->irq;
2367                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
2368                                bp->pdev->irq);
2369                 }
2370         }
2371
2372         return 0;
2373 }
2374
2375 void bnx2x_free_mem_bp(struct bnx2x *bp)
2376 {
2377         kfree(bp->fp);
2378         kfree(bp->msix_table);
2379         kfree(bp->ilt);
2380 }
2381
2382 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2383 {
2384         struct bnx2x_fastpath *fp;
2385         struct msix_entry *tbl;
2386         struct bnx2x_ilt *ilt;
2387
2388         /* fp array */
2389         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2390         if (!fp)
2391                 goto alloc_err;
2392         bp->fp = fp;
2393
2394         /* msix table */
2395         tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2396                                   GFP_KERNEL);
2397         if (!tbl)
2398                 goto alloc_err;
2399         bp->msix_table = tbl;
2400
2401         /* ilt */
2402         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2403         if (!ilt)
2404                 goto alloc_err;
2405         bp->ilt = ilt;
2406
2407         return 0;
2408 alloc_err:
2409         bnx2x_free_mem_bp(bp);
2410         return -ENOMEM;
2411
2412 }
2413
2414 /* called with rtnl_lock */
2415 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2416 {
2417         struct bnx2x *bp = netdev_priv(dev);
2418         int rc = 0;
2419
2420         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2421                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2422                 return -EAGAIN;
2423         }
2424
2425         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2426             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2427                 return -EINVAL;
2428
2429         /* This does not race with packet allocation
2430          * because the actual alloc size is
2431          * only updated as part of load
2432          */
2433         dev->mtu = new_mtu;
2434
2435         if (netif_running(dev)) {
2436                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2437                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2438         }
2439
2440         return rc;
2441 }
2442
2443 void bnx2x_tx_timeout(struct net_device *dev)
2444 {
2445         struct bnx2x *bp = netdev_priv(dev);
2446
2447 #ifdef BNX2X_STOP_ON_ERROR
2448         if (!bp->panic)
2449                 bnx2x_panic();
2450 #endif
2451         /* This allows the netif to be shutdown gracefully before resetting */
2452         schedule_delayed_work(&bp->reset_task, 0);
2453 }
2454
2455 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2456 {
2457         struct net_device *dev = pci_get_drvdata(pdev);
2458         struct bnx2x *bp;
2459
2460         if (!dev) {
2461                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2462                 return -ENODEV;
2463         }
2464         bp = netdev_priv(dev);
2465
2466         rtnl_lock();
2467
2468         pci_save_state(pdev);
2469
2470         if (!netif_running(dev)) {
2471                 rtnl_unlock();
2472                 return 0;
2473         }
2474
2475         netif_device_detach(dev);
2476
2477         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2478
2479         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2480
2481         rtnl_unlock();
2482
2483         return 0;
2484 }
2485
2486 int bnx2x_resume(struct pci_dev *pdev)
2487 {
2488         struct net_device *dev = pci_get_drvdata(pdev);
2489         struct bnx2x *bp;
2490         int rc;
2491
2492         if (!dev) {
2493                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2494                 return -ENODEV;
2495         }
2496         bp = netdev_priv(dev);
2497
2498         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2499                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2500                 return -EAGAIN;
2501         }
2502
2503         rtnl_lock();
2504
2505         pci_restore_state(pdev);
2506
2507         if (!netif_running(dev)) {
2508                 rtnl_unlock();
2509                 return 0;
2510         }
2511
2512         bnx2x_set_power_state(bp, PCI_D0);
2513         netif_device_attach(dev);
2514
2515         /* Since the chip was reset, clear the FW sequence number */
2516         bp->fw_seq = 0;
2517         rc = bnx2x_nic_load(bp, LOAD_OPEN);
2518
2519         rtnl_unlock();
2520
2521         return rc;
2522 }