be2net: fix a race in be_xmit()
[pandora-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va) {
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133                 mem->va = NULL;
134         }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138                 u16 len, u16 entry_size)
139 {
140         struct be_dma_mem *mem = &q->dma_mem;
141
142         memset(q, 0, sizeof(*q));
143         q->len = len;
144         q->entry_size = entry_size;
145         mem->size = len * entry_size;
146         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147                                      GFP_KERNEL);
148         if (!mem->va)
149                 return -ENOMEM;
150         memset(mem->va, 0, mem->size);
151         return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156         u32 reg, enabled;
157
158         if (adapter->eeh_err)
159                 return;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_RQ_RING_ID_MASK;
180         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_TXULP_RING_ID_MASK;
190         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197                 bool arm, bool clear_int, u16 num_popped)
198 {
199         u32 val = 0;
200         val |= qid & DB_EQ_RING_ID_MASK;
201         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204         if (adapter->eeh_err)
205                 return;
206
207         if (arm)
208                 val |= 1 << DB_EQ_REARM_SHIFT;
209         if (clear_int)
210                 val |= 1 << DB_EQ_CLR_SHIFT;
211         val |= 1 << DB_EQ_EVNT_SHIFT;
212         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213         iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218         u32 val = 0;
219         val |= qid & DB_CQ_RING_ID_MASK;
220         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223         if (adapter->eeh_err)
224                 return;
225
226         if (arm)
227                 val |= 1 << DB_CQ_REARM_SHIFT;
228         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234         struct be_adapter *adapter = netdev_priv(netdev);
235         struct sockaddr *addr = p;
236         int status = 0;
237         u8 current_mac[ETH_ALEN];
238         u32 pmac_id = adapter->pmac_id[0];
239
240         if (!is_valid_ether_addr(addr->sa_data))
241                 return -EADDRNOTAVAIL;
242
243         status = be_cmd_mac_addr_query(adapter, current_mac,
244                                 MAC_ADDRESS_TYPE_NETWORK, false,
245                                 adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 }
562
563 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564                                         struct sk_buff *skb)
565 {
566         u8 vlan_prio;
567         u16 vlan_tag;
568
569         vlan_tag = vlan_tx_tag_get(skb);
570         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571         /* If vlan priority provided by OS is NOT in available bmap */
572         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574                                 adapter->recommended_prio;
575
576         return vlan_tag;
577 }
578
579 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
581 {
582         u16 vlan_tag;
583
584         memset(hdr, 0, sizeof(*hdr));
585
586         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
588         if (skb_is_gso(skb)) {
589                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591                         hdr, skb_shinfo(skb)->gso_size);
592                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
593                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
594                 if (lancer_chip(adapter) && adapter->sli_family  ==
595                                                         LANCER_A0_SLI_FAMILY) {
596                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597                         if (is_tcp_pkt(skb))
598                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599                                                                 tcpcs, hdr, 1);
600                         else if (is_udp_pkt(skb))
601                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602                                                                 udpcs, hdr, 1);
603                 }
604         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605                 if (is_tcp_pkt(skb))
606                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607                 else if (is_udp_pkt(skb))
608                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609         }
610
611         if (vlan_tx_tag_present(skb)) {
612                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
613                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
614                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
615         }
616
617         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621 }
622
623 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
624                 bool unmap_single)
625 {
626         dma_addr_t dma;
627
628         be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
631         if (wrb->frag_len) {
632                 if (unmap_single)
633                         dma_unmap_single(dev, dma, wrb->frag_len,
634                                          DMA_TO_DEVICE);
635                 else
636                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
637         }
638 }
639
640 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
641                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642 {
643         dma_addr_t busaddr;
644         int i, copied = 0;
645         struct device *dev = &adapter->pdev->dev;
646         struct sk_buff *first_skb = skb;
647         struct be_eth_wrb *wrb;
648         struct be_eth_hdr_wrb *hdr;
649         bool map_single = false;
650         u16 map_head;
651
652         hdr = queue_head_node(txq);
653         queue_head_inc(txq);
654         map_head = txq->head;
655
656         if (skb->len > skb->data_len) {
657                 int len = skb_headlen(skb);
658                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659                 if (dma_mapping_error(dev, busaddr))
660                         goto dma_err;
661                 map_single = true;
662                 wrb = queue_head_node(txq);
663                 wrb_fill(wrb, busaddr, len);
664                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665                 queue_head_inc(txq);
666                 copied += len;
667         }
668
669         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
670                 const struct skb_frag_struct *frag =
671                         &skb_shinfo(skb)->frags[i];
672                 busaddr = skb_frag_dma_map(dev, frag, 0,
673                                            skb_frag_size(frag), DMA_TO_DEVICE);
674                 if (dma_mapping_error(dev, busaddr))
675                         goto dma_err;
676                 wrb = queue_head_node(txq);
677                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
678                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679                 queue_head_inc(txq);
680                 copied += skb_frag_size(frag);
681         }
682
683         if (dummy_wrb) {
684                 wrb = queue_head_node(txq);
685                 wrb_fill(wrb, 0, 0);
686                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687                 queue_head_inc(txq);
688         }
689
690         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
691         be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693         return copied;
694 dma_err:
695         txq->head = map_head;
696         while (copied) {
697                 wrb = queue_head_node(txq);
698                 unmap_tx_frag(dev, wrb, map_single);
699                 map_single = false;
700                 copied -= wrb->frag_len;
701                 queue_head_inc(txq);
702         }
703         return 0;
704 }
705
706 static netdev_tx_t be_xmit(struct sk_buff *skb,
707                         struct net_device *netdev)
708 {
709         struct be_adapter *adapter = netdev_priv(netdev);
710         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711         struct be_queue_info *txq = &txo->q;
712         u32 wrb_cnt = 0, copied = 0;
713         u32 start = txq->head;
714         bool dummy_wrb, stopped = false;
715
716         /* For vlan tagged pkts, BE
717          * 1) calculates checksum even when CSO is not requested
718          * 2) calculates checksum wrongly for padded pkt less than
719          * 60 bytes long.
720          * As a workaround disable TX vlan offloading in such cases.
721          */
722         if (unlikely(vlan_tx_tag_present(skb) &&
723                      (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724                 skb = skb_share_check(skb, GFP_ATOMIC);
725                 if (unlikely(!skb))
726                         goto tx_drop;
727
728                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729                 if (unlikely(!skb))
730                         goto tx_drop;
731
732                 skb->vlan_tci = 0;
733         }
734
735         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
736
737         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738         if (copied) {
739                 int gso_segs = skb_shinfo(skb)->gso_segs;
740
741                 /* record the sent skb in the sent_skb table */
742                 BUG_ON(txo->sent_skb_list[start]);
743                 txo->sent_skb_list[start] = skb;
744
745                 /* Ensure txq has space for the next skb; Else stop the queue
746                  * *BEFORE* ringing the tx doorbell, so that we serialze the
747                  * tx compls of the current transmit which'll wake up the queue
748                  */
749                 atomic_add(wrb_cnt, &txq->used);
750                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
751                                                                 txq->len) {
752                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
753                         stopped = true;
754                 }
755
756                 be_txq_notify(adapter, txq->id, wrb_cnt);
757
758                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
759         } else {
760                 txq->head = start;
761                 dev_kfree_skb_any(skb);
762         }
763 tx_drop:
764         return NETDEV_TX_OK;
765 }
766
767 static int be_change_mtu(struct net_device *netdev, int new_mtu)
768 {
769         struct be_adapter *adapter = netdev_priv(netdev);
770         if (new_mtu < BE_MIN_MTU ||
771                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
772                                         (ETH_HLEN + ETH_FCS_LEN))) {
773                 dev_info(&adapter->pdev->dev,
774                         "MTU must be between %d and %d bytes\n",
775                         BE_MIN_MTU,
776                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
777                 return -EINVAL;
778         }
779         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
780                         netdev->mtu, new_mtu);
781         netdev->mtu = new_mtu;
782         return 0;
783 }
784
785 /*
786  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
787  * If the user configures more, place BE in vlan promiscuous mode.
788  */
789 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
790 {
791         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
792         u16 vtag[BE_NUM_VLANS_SUPPORTED];
793         u16 ntags = 0, i;
794         int status = 0;
795
796         if (vf) {
797                 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
798                 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
799                                             1, 1, 0);
800         }
801
802         /* No need to further configure vids if in promiscuous mode */
803         if (adapter->promiscuous)
804                 return 0;
805
806         if (adapter->vlans_added > adapter->max_vlans)
807                 goto set_vlan_promisc;
808
809         /* Construct VLAN Table to give to HW */
810         for (i = 0; i < VLAN_N_VID; i++)
811                 if (adapter->vlan_tag[i])
812                         vtag[ntags++] = cpu_to_le16(i);
813
814         status = be_cmd_vlan_config(adapter, adapter->if_handle,
815                                     vtag, ntags, 1, 0);
816
817         /* Set to VLAN promisc mode as setting VLAN filter failed */
818         if (status) {
819                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
820                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
821                 goto set_vlan_promisc;
822         }
823
824         return status;
825
826 set_vlan_promisc:
827         status = be_cmd_vlan_config(adapter, adapter->if_handle,
828                                     NULL, 0, 1, 1);
829         return status;
830 }
831
832 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
833 {
834         struct be_adapter *adapter = netdev_priv(netdev);
835         int status = 0;
836
837         if (!be_physfn(adapter)) {
838                 status = -EINVAL;
839                 goto ret;
840         }
841
842         adapter->vlan_tag[vid] = 1;
843         if (adapter->vlans_added <= (adapter->max_vlans + 1))
844                 status = be_vid_config(adapter, false, 0);
845
846         if (!status)
847                 adapter->vlans_added++;
848         else
849                 adapter->vlan_tag[vid] = 0;
850 ret:
851         return status;
852 }
853
854 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
855 {
856         struct be_adapter *adapter = netdev_priv(netdev);
857         int status = 0;
858
859         if (!be_physfn(adapter)) {
860                 status = -EINVAL;
861                 goto ret;
862         }
863
864         adapter->vlan_tag[vid] = 0;
865         if (adapter->vlans_added <= adapter->max_vlans)
866                 status = be_vid_config(adapter, false, 0);
867
868         if (!status)
869                 adapter->vlans_added--;
870         else
871                 adapter->vlan_tag[vid] = 1;
872 ret:
873         return status;
874 }
875
876 static void be_set_rx_mode(struct net_device *netdev)
877 {
878         struct be_adapter *adapter = netdev_priv(netdev);
879         int status;
880
881         if (netdev->flags & IFF_PROMISC) {
882                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
883                 adapter->promiscuous = true;
884                 goto done;
885         }
886
887         /* BE was previously in promiscuous mode; disable it */
888         if (adapter->promiscuous) {
889                 adapter->promiscuous = false;
890                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
891
892                 if (adapter->vlans_added)
893                         be_vid_config(adapter, false, 0);
894         }
895
896         /* Enable multicast promisc if num configured exceeds what we support */
897         if (netdev->flags & IFF_ALLMULTI ||
898                         netdev_mc_count(netdev) > BE_MAX_MC) {
899                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
900                 goto done;
901         }
902
903         if (netdev_uc_count(netdev) != adapter->uc_macs) {
904                 struct netdev_hw_addr *ha;
905                 int i = 1; /* First slot is claimed by the Primary MAC */
906
907                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
908                         be_cmd_pmac_del(adapter, adapter->if_handle,
909                                         adapter->pmac_id[i], 0);
910                 }
911
912                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
913                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
914                         adapter->promiscuous = true;
915                         goto done;
916                 }
917
918                 netdev_for_each_uc_addr(ha, adapter->netdev) {
919                         adapter->uc_macs++; /* First slot is for Primary MAC */
920                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
921                                         adapter->if_handle,
922                                         &adapter->pmac_id[adapter->uc_macs], 0);
923                 }
924         }
925
926         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
927
928         /* Set to MCAST promisc mode if setting MULTICAST address fails */
929         if (status) {
930                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
931                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
932                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
933         }
934 done:
935         return;
936 }
937
938 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
939 {
940         struct be_adapter *adapter = netdev_priv(netdev);
941         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
942         int status;
943
944         if (!sriov_enabled(adapter))
945                 return -EPERM;
946
947         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
948                 return -EINVAL;
949
950         if (lancer_chip(adapter)) {
951                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
952         } else {
953                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
954                                          vf_cfg->pmac_id, vf + 1);
955
956                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
957                                          &vf_cfg->pmac_id, vf + 1);
958         }
959
960         if (status)
961                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
962                                 mac, vf);
963         else
964                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
965
966         return status;
967 }
968
969 static int be_get_vf_config(struct net_device *netdev, int vf,
970                         struct ifla_vf_info *vi)
971 {
972         struct be_adapter *adapter = netdev_priv(netdev);
973         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
974
975         if (!sriov_enabled(adapter))
976                 return -EPERM;
977
978         if (vf >= adapter->num_vfs)
979                 return -EINVAL;
980
981         vi->vf = vf;
982         vi->tx_rate = vf_cfg->tx_rate;
983         vi->vlan = vf_cfg->vlan_tag;
984         vi->qos = 0;
985         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
986
987         return 0;
988 }
989
990 static int be_set_vf_vlan(struct net_device *netdev,
991                         int vf, u16 vlan, u8 qos)
992 {
993         struct be_adapter *adapter = netdev_priv(netdev);
994         int status = 0;
995
996         if (!sriov_enabled(adapter))
997                 return -EPERM;
998
999         if (vf >= adapter->num_vfs || vlan > 4095)
1000                 return -EINVAL;
1001
1002         if (vlan) {
1003                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1004                         /* If this is new value, program it. Else skip. */
1005                         adapter->vf_cfg[vf].vlan_tag = vlan;
1006
1007                         status = be_cmd_set_hsw_config(adapter, vlan,
1008                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1009                 }
1010         } else {
1011                 /* Reset Transparent Vlan Tagging. */
1012                 adapter->vf_cfg[vf].vlan_tag = 0;
1013                 vlan = adapter->vf_cfg[vf].def_vid;
1014                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1015                         adapter->vf_cfg[vf].if_handle);
1016         }
1017
1018
1019         if (status)
1020                 dev_info(&adapter->pdev->dev,
1021                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1022         return status;
1023 }
1024
1025 static int be_set_vf_tx_rate(struct net_device *netdev,
1026                         int vf, int rate)
1027 {
1028         struct be_adapter *adapter = netdev_priv(netdev);
1029         int status = 0;
1030
1031         if (!sriov_enabled(adapter))
1032                 return -EPERM;
1033
1034         if (vf >= adapter->num_vfs)
1035                 return -EINVAL;
1036
1037         if (rate < 100 || rate > 10000) {
1038                 dev_err(&adapter->pdev->dev,
1039                         "tx rate must be between 100 and 10000 Mbps\n");
1040                 return -EINVAL;
1041         }
1042
1043         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1044
1045         if (status)
1046                 dev_err(&adapter->pdev->dev,
1047                                 "tx rate %d on VF %d failed\n", rate, vf);
1048         else
1049                 adapter->vf_cfg[vf].tx_rate = rate;
1050         return status;
1051 }
1052
1053 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1054 {
1055         struct pci_dev *dev, *pdev = adapter->pdev;
1056         int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1057         u16 offset, stride;
1058
1059         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1060         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1061         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1062
1063         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1064         while (dev) {
1065                 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1066                 if (dev->is_virtfn && dev->devfn == vf_fn) {
1067                         vfs++;
1068                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1069                                 assigned_vfs++;
1070                 }
1071                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1072         }
1073         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1074 }
1075
1076 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1077 {
1078         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1079         ulong now = jiffies;
1080         ulong delta = now - stats->rx_jiffies;
1081         u64 pkts;
1082         unsigned int start, eqd;
1083
1084         if (!eqo->enable_aic) {
1085                 eqd = eqo->eqd;
1086                 goto modify_eqd;
1087         }
1088
1089         if (eqo->idx >= adapter->num_rx_qs)
1090                 return;
1091
1092         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1093
1094         /* Wrapped around */
1095         if (time_before(now, stats->rx_jiffies)) {
1096                 stats->rx_jiffies = now;
1097                 return;
1098         }
1099
1100         /* Update once a second */
1101         if (delta < HZ)
1102                 return;
1103
1104         do {
1105                 start = u64_stats_fetch_begin_bh(&stats->sync);
1106                 pkts = stats->rx_pkts;
1107         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1108
1109         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1110         stats->rx_pkts_prev = pkts;
1111         stats->rx_jiffies = now;
1112         eqd = (stats->rx_pps / 110000) << 3;
1113         eqd = min(eqd, eqo->max_eqd);
1114         eqd = max(eqd, eqo->min_eqd);
1115         if (eqd < 10)
1116                 eqd = 0;
1117
1118 modify_eqd:
1119         if (eqd != eqo->cur_eqd) {
1120                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1121                 eqo->cur_eqd = eqd;
1122         }
1123 }
1124
1125 static void be_rx_stats_update(struct be_rx_obj *rxo,
1126                 struct be_rx_compl_info *rxcp)
1127 {
1128         struct be_rx_stats *stats = rx_stats(rxo);
1129
1130         u64_stats_update_begin(&stats->sync);
1131         stats->rx_compl++;
1132         stats->rx_bytes += rxcp->pkt_size;
1133         stats->rx_pkts++;
1134         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1135                 stats->rx_mcast_pkts++;
1136         if (rxcp->err)
1137                 stats->rx_compl_err++;
1138         u64_stats_update_end(&stats->sync);
1139 }
1140
1141 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1142 {
1143         /* L4 checksum is not reliable for non TCP/UDP packets.
1144          * Also ignore ipcksm for ipv6 pkts */
1145         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1146                                 (rxcp->ip_csum || rxcp->ipv6);
1147 }
1148
1149 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1150                                                 u16 frag_idx)
1151 {
1152         struct be_adapter *adapter = rxo->adapter;
1153         struct be_rx_page_info *rx_page_info;
1154         struct be_queue_info *rxq = &rxo->q;
1155
1156         rx_page_info = &rxo->page_info_tbl[frag_idx];
1157         BUG_ON(!rx_page_info->page);
1158
1159         if (rx_page_info->last_page_user) {
1160                 dma_unmap_page(&adapter->pdev->dev,
1161                                dma_unmap_addr(rx_page_info, bus),
1162                                adapter->big_page_size, DMA_FROM_DEVICE);
1163                 rx_page_info->last_page_user = false;
1164         }
1165
1166         atomic_dec(&rxq->used);
1167         return rx_page_info;
1168 }
1169
1170 /* Throwaway the data in the Rx completion */
1171 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1172                                 struct be_rx_compl_info *rxcp)
1173 {
1174         struct be_queue_info *rxq = &rxo->q;
1175         struct be_rx_page_info *page_info;
1176         u16 i, num_rcvd = rxcp->num_rcvd;
1177
1178         for (i = 0; i < num_rcvd; i++) {
1179                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1180                 put_page(page_info->page);
1181                 memset(page_info, 0, sizeof(*page_info));
1182                 index_inc(&rxcp->rxq_idx, rxq->len);
1183         }
1184 }
1185
1186 /*
1187  * skb_fill_rx_data forms a complete skb for an ether frame
1188  * indicated by rxcp.
1189  */
1190 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1191                              struct be_rx_compl_info *rxcp)
1192 {
1193         struct be_queue_info *rxq = &rxo->q;
1194         struct be_rx_page_info *page_info;
1195         u16 i, j;
1196         u16 hdr_len, curr_frag_len, remaining;
1197         u8 *start;
1198
1199         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1200         start = page_address(page_info->page) + page_info->page_offset;
1201         prefetch(start);
1202
1203         /* Copy data in the first descriptor of this completion */
1204         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1205
1206         /* Copy the header portion into skb_data */
1207         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1208         memcpy(skb->data, start, hdr_len);
1209         skb->len = curr_frag_len;
1210         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1211                 /* Complete packet has now been moved to data */
1212                 put_page(page_info->page);
1213                 skb->data_len = 0;
1214                 skb->tail += curr_frag_len;
1215         } else {
1216                 skb_shinfo(skb)->nr_frags = 1;
1217                 skb_frag_set_page(skb, 0, page_info->page);
1218                 skb_shinfo(skb)->frags[0].page_offset =
1219                                         page_info->page_offset + hdr_len;
1220                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1221                 skb->data_len = curr_frag_len - hdr_len;
1222                 skb->truesize += rx_frag_size;
1223                 skb->tail += hdr_len;
1224         }
1225         page_info->page = NULL;
1226
1227         if (rxcp->pkt_size <= rx_frag_size) {
1228                 BUG_ON(rxcp->num_rcvd != 1);
1229                 return;
1230         }
1231
1232         /* More frags present for this completion */
1233         index_inc(&rxcp->rxq_idx, rxq->len);
1234         remaining = rxcp->pkt_size - curr_frag_len;
1235         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1236                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1237                 curr_frag_len = min(remaining, rx_frag_size);
1238
1239                 /* Coalesce all frags from the same physical page in one slot */
1240                 if (page_info->page_offset == 0) {
1241                         /* Fresh page */
1242                         j++;
1243                         skb_frag_set_page(skb, j, page_info->page);
1244                         skb_shinfo(skb)->frags[j].page_offset =
1245                                                         page_info->page_offset;
1246                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1247                         skb_shinfo(skb)->nr_frags++;
1248                 } else {
1249                         put_page(page_info->page);
1250                 }
1251
1252                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1253                 skb->len += curr_frag_len;
1254                 skb->data_len += curr_frag_len;
1255                 skb->truesize += rx_frag_size;
1256                 remaining -= curr_frag_len;
1257                 index_inc(&rxcp->rxq_idx, rxq->len);
1258                 page_info->page = NULL;
1259         }
1260         BUG_ON(j > MAX_SKB_FRAGS);
1261 }
1262
1263 /* Process the RX completion indicated by rxcp when GRO is disabled */
1264 static void be_rx_compl_process(struct be_rx_obj *rxo,
1265                                 struct be_rx_compl_info *rxcp)
1266 {
1267         struct be_adapter *adapter = rxo->adapter;
1268         struct net_device *netdev = adapter->netdev;
1269         struct sk_buff *skb;
1270
1271         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1272         if (unlikely(!skb)) {
1273                 rx_stats(rxo)->rx_drops_no_skbs++;
1274                 be_rx_compl_discard(rxo, rxcp);
1275                 return;
1276         }
1277
1278         skb_fill_rx_data(rxo, skb, rxcp);
1279
1280         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1281                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1282         else
1283                 skb_checksum_none_assert(skb);
1284
1285         skb->protocol = eth_type_trans(skb, netdev);
1286         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1287         if (netdev->features & NETIF_F_RXHASH)
1288                 skb->rxhash = rxcp->rss_hash;
1289
1290
1291         if (rxcp->vlanf)
1292                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1293
1294         netif_receive_skb(skb);
1295 }
1296
1297 /* Process the RX completion indicated by rxcp when GRO is enabled */
1298 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1299                              struct be_rx_compl_info *rxcp)
1300 {
1301         struct be_adapter *adapter = rxo->adapter;
1302         struct be_rx_page_info *page_info;
1303         struct sk_buff *skb = NULL;
1304         struct be_queue_info *rxq = &rxo->q;
1305         u16 remaining, curr_frag_len;
1306         u16 i, j;
1307
1308         skb = napi_get_frags(napi);
1309         if (!skb) {
1310                 be_rx_compl_discard(rxo, rxcp);
1311                 return;
1312         }
1313
1314         remaining = rxcp->pkt_size;
1315         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1316                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1317
1318                 curr_frag_len = min(remaining, rx_frag_size);
1319
1320                 /* Coalesce all frags from the same physical page in one slot */
1321                 if (i == 0 || page_info->page_offset == 0) {
1322                         /* First frag or Fresh page */
1323                         j++;
1324                         skb_frag_set_page(skb, j, page_info->page);
1325                         skb_shinfo(skb)->frags[j].page_offset =
1326                                                         page_info->page_offset;
1327                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1328                 } else {
1329                         put_page(page_info->page);
1330                 }
1331                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1332                 skb->truesize += rx_frag_size;
1333                 remaining -= curr_frag_len;
1334                 index_inc(&rxcp->rxq_idx, rxq->len);
1335                 memset(page_info, 0, sizeof(*page_info));
1336         }
1337         BUG_ON(j > MAX_SKB_FRAGS);
1338
1339         skb_shinfo(skb)->nr_frags = j + 1;
1340         skb->len = rxcp->pkt_size;
1341         skb->data_len = rxcp->pkt_size;
1342         skb->ip_summed = CHECKSUM_UNNECESSARY;
1343         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1344         if (adapter->netdev->features & NETIF_F_RXHASH)
1345                 skb->rxhash = rxcp->rss_hash;
1346
1347         if (rxcp->vlanf)
1348                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1349
1350         napi_gro_frags(napi);
1351 }
1352
1353 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1354                                  struct be_rx_compl_info *rxcp)
1355 {
1356         rxcp->pkt_size =
1357                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1358         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1359         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1360         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1361         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1362         rxcp->ip_csum =
1363                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1364         rxcp->l4_csum =
1365                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1366         rxcp->ipv6 =
1367                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1368         rxcp->rxq_idx =
1369                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1370         rxcp->num_rcvd =
1371                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1372         rxcp->pkt_type =
1373                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1374         rxcp->rss_hash =
1375                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1376         if (rxcp->vlanf) {
1377                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1378                                           compl);
1379                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1380                                                compl);
1381         }
1382         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1383 }
1384
1385 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1386                                  struct be_rx_compl_info *rxcp)
1387 {
1388         rxcp->pkt_size =
1389                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1390         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1391         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1392         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1393         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1394         rxcp->ip_csum =
1395                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1396         rxcp->l4_csum =
1397                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1398         rxcp->ipv6 =
1399                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1400         rxcp->rxq_idx =
1401                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1402         rxcp->num_rcvd =
1403                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1404         rxcp->pkt_type =
1405                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1406         rxcp->rss_hash =
1407                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1408         if (rxcp->vlanf) {
1409                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1410                                           compl);
1411                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1412                                                compl);
1413         }
1414         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1415 }
1416
1417 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1418 {
1419         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1420         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1421         struct be_adapter *adapter = rxo->adapter;
1422
1423         /* For checking the valid bit it is Ok to use either definition as the
1424          * valid bit is at the same position in both v0 and v1 Rx compl */
1425         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1426                 return NULL;
1427
1428         rmb();
1429         be_dws_le_to_cpu(compl, sizeof(*compl));
1430
1431         if (adapter->be3_native)
1432                 be_parse_rx_compl_v1(compl, rxcp);
1433         else
1434                 be_parse_rx_compl_v0(compl, rxcp);
1435
1436         if (rxcp->vlanf) {
1437                 /* vlanf could be wrongly set in some cards.
1438                  * ignore if vtm is not set */
1439                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1440                         rxcp->vlanf = 0;
1441
1442                 if (!lancer_chip(adapter))
1443                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1444
1445                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1446                     !adapter->vlan_tag[rxcp->vlan_tag])
1447                         rxcp->vlanf = 0;
1448         }
1449
1450         /* As the compl has been parsed, reset it; we wont touch it again */
1451         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1452
1453         queue_tail_inc(&rxo->cq);
1454         return rxcp;
1455 }
1456
1457 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1458 {
1459         u32 order = get_order(size);
1460
1461         if (order > 0)
1462                 gfp |= __GFP_COMP;
1463         return  alloc_pages(gfp, order);
1464 }
1465
1466 /*
1467  * Allocate a page, split it to fragments of size rx_frag_size and post as
1468  * receive buffers to BE
1469  */
1470 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1471 {
1472         struct be_adapter *adapter = rxo->adapter;
1473         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1474         struct be_queue_info *rxq = &rxo->q;
1475         struct page *pagep = NULL;
1476         struct be_eth_rx_d *rxd;
1477         u64 page_dmaaddr = 0, frag_dmaaddr;
1478         u32 posted, page_offset = 0;
1479
1480         page_info = &rxo->page_info_tbl[rxq->head];
1481         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1482                 if (!pagep) {
1483                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1484                         if (unlikely(!pagep)) {
1485                                 rx_stats(rxo)->rx_post_fail++;
1486                                 break;
1487                         }
1488                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1489                                                     0, adapter->big_page_size,
1490                                                     DMA_FROM_DEVICE);
1491                         page_info->page_offset = 0;
1492                 } else {
1493                         get_page(pagep);
1494                         page_info->page_offset = page_offset + rx_frag_size;
1495                 }
1496                 page_offset = page_info->page_offset;
1497                 page_info->page = pagep;
1498                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1499                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1500
1501                 rxd = queue_head_node(rxq);
1502                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1503                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1504
1505                 /* Any space left in the current big page for another frag? */
1506                 if ((page_offset + rx_frag_size + rx_frag_size) >
1507                                         adapter->big_page_size) {
1508                         pagep = NULL;
1509                         page_info->last_page_user = true;
1510                 }
1511
1512                 prev_page_info = page_info;
1513                 queue_head_inc(rxq);
1514                 page_info = &rxo->page_info_tbl[rxq->head];
1515         }
1516         if (pagep)
1517                 prev_page_info->last_page_user = true;
1518
1519         if (posted) {
1520                 atomic_add(posted, &rxq->used);
1521                 be_rxq_notify(adapter, rxq->id, posted);
1522         } else if (atomic_read(&rxq->used) == 0) {
1523                 /* Let be_worker replenish when memory is available */
1524                 rxo->rx_post_starved = true;
1525         }
1526 }
1527
1528 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1529 {
1530         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1531
1532         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1533                 return NULL;
1534
1535         rmb();
1536         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1537
1538         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1539
1540         queue_tail_inc(tx_cq);
1541         return txcp;
1542 }
1543
1544 static u16 be_tx_compl_process(struct be_adapter *adapter,
1545                 struct be_tx_obj *txo, u16 last_index)
1546 {
1547         struct be_queue_info *txq = &txo->q;
1548         struct be_eth_wrb *wrb;
1549         struct sk_buff **sent_skbs = txo->sent_skb_list;
1550         struct sk_buff *sent_skb;
1551         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1552         bool unmap_skb_hdr = true;
1553
1554         sent_skb = sent_skbs[txq->tail];
1555         BUG_ON(!sent_skb);
1556         sent_skbs[txq->tail] = NULL;
1557
1558         /* skip header wrb */
1559         queue_tail_inc(txq);
1560
1561         do {
1562                 cur_index = txq->tail;
1563                 wrb = queue_tail_node(txq);
1564                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1565                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1566                 unmap_skb_hdr = false;
1567
1568                 num_wrbs++;
1569                 queue_tail_inc(txq);
1570         } while (cur_index != last_index);
1571
1572         kfree_skb(sent_skb);
1573         return num_wrbs;
1574 }
1575
1576 /* Return the number of events in the event queue */
1577 static inline int events_get(struct be_eq_obj *eqo)
1578 {
1579         struct be_eq_entry *eqe;
1580         int num = 0;
1581
1582         do {
1583                 eqe = queue_tail_node(&eqo->q);
1584                 if (eqe->evt == 0)
1585                         break;
1586
1587                 rmb();
1588                 eqe->evt = 0;
1589                 num++;
1590                 queue_tail_inc(&eqo->q);
1591         } while (true);
1592
1593         return num;
1594 }
1595
1596 static int event_handle(struct be_eq_obj *eqo)
1597 {
1598         bool rearm = false;
1599         int num = events_get(eqo);
1600
1601         /* Deal with any spurious interrupts that come without events */
1602         if (!num)
1603                 rearm = true;
1604
1605         if (num || msix_enabled(eqo->adapter))
1606                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1607
1608         if (num)
1609                 napi_schedule(&eqo->napi);
1610
1611         return num;
1612 }
1613
1614 /* Leaves the EQ is disarmed state */
1615 static void be_eq_clean(struct be_eq_obj *eqo)
1616 {
1617         int num = events_get(eqo);
1618
1619         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1620 }
1621
1622 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1623 {
1624         struct be_rx_page_info *page_info;
1625         struct be_queue_info *rxq = &rxo->q;
1626         struct be_queue_info *rx_cq = &rxo->cq;
1627         struct be_rx_compl_info *rxcp;
1628         u16 tail;
1629
1630         /* First cleanup pending rx completions */
1631         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1632                 be_rx_compl_discard(rxo, rxcp);
1633                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1634         }
1635
1636         /* Then free posted rx buffer that were not used */
1637         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1638         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1639                 page_info = get_rx_page_info(rxo, tail);
1640                 put_page(page_info->page);
1641                 memset(page_info, 0, sizeof(*page_info));
1642         }
1643         BUG_ON(atomic_read(&rxq->used));
1644         rxq->tail = rxq->head = 0;
1645 }
1646
1647 static void be_tx_compl_clean(struct be_adapter *adapter)
1648 {
1649         struct be_tx_obj *txo;
1650         struct be_queue_info *txq;
1651         struct be_eth_tx_compl *txcp;
1652         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1653         struct sk_buff *sent_skb;
1654         bool dummy_wrb;
1655         int i, pending_txqs;
1656
1657         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1658         do {
1659                 pending_txqs = adapter->num_tx_qs;
1660
1661                 for_all_tx_queues(adapter, txo, i) {
1662                         txq = &txo->q;
1663                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1664                                 end_idx =
1665                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1666                                                       wrb_index, txcp);
1667                                 num_wrbs += be_tx_compl_process(adapter, txo,
1668                                                                 end_idx);
1669                                 cmpl++;
1670                         }
1671                         if (cmpl) {
1672                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1673                                 atomic_sub(num_wrbs, &txq->used);
1674                                 cmpl = 0;
1675                                 num_wrbs = 0;
1676                         }
1677                         if (atomic_read(&txq->used) == 0)
1678                                 pending_txqs--;
1679                 }
1680
1681                 if (pending_txqs == 0 || ++timeo > 200)
1682                         break;
1683
1684                 mdelay(1);
1685         } while (true);
1686
1687         for_all_tx_queues(adapter, txo, i) {
1688                 txq = &txo->q;
1689                 if (atomic_read(&txq->used))
1690                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1691                                 atomic_read(&txq->used));
1692
1693                 /* free posted tx for which compls will never arrive */
1694                 while (atomic_read(&txq->used)) {
1695                         sent_skb = txo->sent_skb_list[txq->tail];
1696                         end_idx = txq->tail;
1697                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1698                                                    &dummy_wrb);
1699                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1700                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1701                         atomic_sub(num_wrbs, &txq->used);
1702                 }
1703         }
1704 }
1705
1706 static void be_evt_queues_destroy(struct be_adapter *adapter)
1707 {
1708         struct be_eq_obj *eqo;
1709         int i;
1710
1711         for_all_evt_queues(adapter, eqo, i) {
1712                 be_eq_clean(eqo);
1713                 if (eqo->q.created)
1714                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1715                 be_queue_free(adapter, &eqo->q);
1716         }
1717 }
1718
1719 static int be_evt_queues_create(struct be_adapter *adapter)
1720 {
1721         struct be_queue_info *eq;
1722         struct be_eq_obj *eqo;
1723         int i, rc;
1724
1725         adapter->num_evt_qs = num_irqs(adapter);
1726
1727         for_all_evt_queues(adapter, eqo, i) {
1728                 eqo->adapter = adapter;
1729                 eqo->tx_budget = BE_TX_BUDGET;
1730                 eqo->idx = i;
1731                 eqo->max_eqd = BE_MAX_EQD;
1732                 eqo->enable_aic = true;
1733
1734                 eq = &eqo->q;
1735                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736                                         sizeof(struct be_eq_entry));
1737                 if (rc)
1738                         return rc;
1739
1740                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1741                 if (rc)
1742                         return rc;
1743         }
1744         return 0;
1745 }
1746
1747 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1748 {
1749         struct be_queue_info *q;
1750
1751         q = &adapter->mcc_obj.q;
1752         if (q->created)
1753                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1754         be_queue_free(adapter, q);
1755
1756         q = &adapter->mcc_obj.cq;
1757         if (q->created)
1758                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1759         be_queue_free(adapter, q);
1760 }
1761
1762 /* Must be called only after TX qs are created as MCC shares TX EQ */
1763 static int be_mcc_queues_create(struct be_adapter *adapter)
1764 {
1765         struct be_queue_info *q, *cq;
1766
1767         cq = &adapter->mcc_obj.cq;
1768         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1769                         sizeof(struct be_mcc_compl)))
1770                 goto err;
1771
1772         /* Use the default EQ for MCC completions */
1773         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1774                 goto mcc_cq_free;
1775
1776         q = &adapter->mcc_obj.q;
1777         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1778                 goto mcc_cq_destroy;
1779
1780         if (be_cmd_mccq_create(adapter, q, cq))
1781                 goto mcc_q_free;
1782
1783         return 0;
1784
1785 mcc_q_free:
1786         be_queue_free(adapter, q);
1787 mcc_cq_destroy:
1788         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1789 mcc_cq_free:
1790         be_queue_free(adapter, cq);
1791 err:
1792         return -1;
1793 }
1794
1795 static void be_tx_queues_destroy(struct be_adapter *adapter)
1796 {
1797         struct be_queue_info *q;
1798         struct be_tx_obj *txo;
1799         u8 i;
1800
1801         for_all_tx_queues(adapter, txo, i) {
1802                 q = &txo->q;
1803                 if (q->created)
1804                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1805                 be_queue_free(adapter, q);
1806
1807                 q = &txo->cq;
1808                 if (q->created)
1809                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1810                 be_queue_free(adapter, q);
1811         }
1812 }
1813
1814 static int be_num_txqs_want(struct be_adapter *adapter)
1815 {
1816         if (sriov_want(adapter) || be_is_mc(adapter) ||
1817             lancer_chip(adapter) || !be_physfn(adapter) ||
1818             adapter->generation == BE_GEN2)
1819                 return 1;
1820         else
1821                 return MAX_TX_QS;
1822 }
1823
1824 static int be_tx_cqs_create(struct be_adapter *adapter)
1825 {
1826         struct be_queue_info *cq, *eq;
1827         int status;
1828         struct be_tx_obj *txo;
1829         u8 i;
1830
1831         adapter->num_tx_qs = be_num_txqs_want(adapter);
1832         if (adapter->num_tx_qs != MAX_TX_QS) {
1833                 rtnl_lock();
1834                 netif_set_real_num_tx_queues(adapter->netdev,
1835                         adapter->num_tx_qs);
1836                 rtnl_unlock();
1837         }
1838
1839         for_all_tx_queues(adapter, txo, i) {
1840                 cq = &txo->cq;
1841                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1842                                         sizeof(struct be_eth_tx_compl));
1843                 if (status)
1844                         return status;
1845
1846                 /* If num_evt_qs is less than num_tx_qs, then more than
1847                  * one txq share an eq
1848                  */
1849                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1850                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1851                 if (status)
1852                         return status;
1853         }
1854         return 0;
1855 }
1856
1857 static int be_tx_qs_create(struct be_adapter *adapter)
1858 {
1859         struct be_tx_obj *txo;
1860         int i, status;
1861
1862         for_all_tx_queues(adapter, txo, i) {
1863                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1864                                         sizeof(struct be_eth_wrb));
1865                 if (status)
1866                         return status;
1867
1868                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1869                 if (status)
1870                         return status;
1871         }
1872
1873         return 0;
1874 }
1875
1876 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1877 {
1878         struct be_queue_info *q;
1879         struct be_rx_obj *rxo;
1880         int i;
1881
1882         for_all_rx_queues(adapter, rxo, i) {
1883                 q = &rxo->cq;
1884                 if (q->created)
1885                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1886                 be_queue_free(adapter, q);
1887         }
1888 }
1889
1890 static int be_rx_cqs_create(struct be_adapter *adapter)
1891 {
1892         struct be_queue_info *eq, *cq;
1893         struct be_rx_obj *rxo;
1894         int rc, i;
1895
1896         /* We'll create as many RSS rings as there are irqs.
1897          * But when there's only one irq there's no use creating RSS rings
1898          */
1899         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1900                                 num_irqs(adapter) + 1 : 1;
1901
1902         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1903         for_all_rx_queues(adapter, rxo, i) {
1904                 rxo->adapter = adapter;
1905                 cq = &rxo->cq;
1906                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1907                                 sizeof(struct be_eth_rx_compl));
1908                 if (rc)
1909                         return rc;
1910
1911                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1912                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1913                 if (rc)
1914                         return rc;
1915         }
1916
1917         if (adapter->num_rx_qs != MAX_RX_QS)
1918                 dev_info(&adapter->pdev->dev,
1919                         "Created only %d receive queues", adapter->num_rx_qs);
1920
1921         return 0;
1922 }
1923
1924 static irqreturn_t be_intx(int irq, void *dev)
1925 {
1926         struct be_adapter *adapter = dev;
1927         int num_evts;
1928
1929         /* With INTx only one EQ is used */
1930         num_evts = event_handle(&adapter->eq_obj[0]);
1931         if (num_evts)
1932                 return IRQ_HANDLED;
1933         else
1934                 return IRQ_NONE;
1935 }
1936
1937 static irqreturn_t be_msix(int irq, void *dev)
1938 {
1939         struct be_eq_obj *eqo = dev;
1940
1941         event_handle(eqo);
1942         return IRQ_HANDLED;
1943 }
1944
1945 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1946 {
1947         return (rxcp->tcpf && !rxcp->err) ? true : false;
1948 }
1949
1950 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1951                         int budget)
1952 {
1953         struct be_adapter *adapter = rxo->adapter;
1954         struct be_queue_info *rx_cq = &rxo->cq;
1955         struct be_rx_compl_info *rxcp;
1956         u32 work_done;
1957
1958         for (work_done = 0; work_done < budget; work_done++) {
1959                 rxcp = be_rx_compl_get(rxo);
1960                 if (!rxcp)
1961                         break;
1962
1963                 /* Is it a flush compl that has no data */
1964                 if (unlikely(rxcp->num_rcvd == 0))
1965                         goto loop_continue;
1966
1967                 /* Discard compl with partial DMA Lancer B0 */
1968                 if (unlikely(!rxcp->pkt_size)) {
1969                         be_rx_compl_discard(rxo, rxcp);
1970                         goto loop_continue;
1971                 }
1972
1973                 /* On BE drop pkts that arrive due to imperfect filtering in
1974                  * promiscuous mode on some skews
1975                  */
1976                 if (unlikely(rxcp->port != adapter->port_num &&
1977                                 !lancer_chip(adapter))) {
1978                         be_rx_compl_discard(rxo, rxcp);
1979                         goto loop_continue;
1980                 }
1981
1982                 if (do_gro(rxcp))
1983                         be_rx_compl_process_gro(rxo, napi, rxcp);
1984                 else
1985                         be_rx_compl_process(rxo, rxcp);
1986 loop_continue:
1987                 be_rx_stats_update(rxo, rxcp);
1988         }
1989
1990         if (work_done) {
1991                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1992
1993                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1994                         be_post_rx_frags(rxo, GFP_ATOMIC);
1995         }
1996
1997         return work_done;
1998 }
1999
2000 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2001                           int budget, int idx)
2002 {
2003         struct be_eth_tx_compl *txcp;
2004         int num_wrbs = 0, work_done;
2005
2006         for (work_done = 0; work_done < budget; work_done++) {
2007                 txcp = be_tx_compl_get(&txo->cq);
2008                 if (!txcp)
2009                         break;
2010                 num_wrbs += be_tx_compl_process(adapter, txo,
2011                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2012                                         wrb_index, txcp));
2013         }
2014
2015         if (work_done) {
2016                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2017                 atomic_sub(num_wrbs, &txo->q.used);
2018
2019                 /* As Tx wrbs have been freed up, wake up netdev queue
2020                  * if it was stopped due to lack of tx wrbs.  */
2021                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2022                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2023                         netif_wake_subqueue(adapter->netdev, idx);
2024                 }
2025
2026                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2027                 tx_stats(txo)->tx_compl += work_done;
2028                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2029         }
2030         return (work_done < budget); /* Done */
2031 }
2032
2033 int be_poll(struct napi_struct *napi, int budget)
2034 {
2035         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2036         struct be_adapter *adapter = eqo->adapter;
2037         int max_work = 0, work, i;
2038         bool tx_done;
2039
2040         /* Process all TXQs serviced by this EQ */
2041         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2042                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2043                                         eqo->tx_budget, i);
2044                 if (!tx_done)
2045                         max_work = budget;
2046         }
2047
2048         /* This loop will iterate twice for EQ0 in which
2049          * completions of the last RXQ (default one) are also processed
2050          * For other EQs the loop iterates only once
2051          */
2052         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2053                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2054                 max_work = max(work, max_work);
2055         }
2056
2057         if (is_mcc_eqo(eqo))
2058                 be_process_mcc(adapter);
2059
2060         if (max_work < budget) {
2061                 napi_complete(napi);
2062                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2063         } else {
2064                 /* As we'll continue in polling mode, count and clear events */
2065                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2066         }
2067         return max_work;
2068 }
2069
2070 void be_detect_dump_ue(struct be_adapter *adapter)
2071 {
2072         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2073         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2074         u32 i;
2075
2076         if (adapter->eeh_err || adapter->ue_detected)
2077                 return;
2078
2079         if (lancer_chip(adapter)) {
2080                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2081                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2082                         sliport_err1 = ioread32(adapter->db +
2083                                         SLIPORT_ERROR1_OFFSET);
2084                         sliport_err2 = ioread32(adapter->db +
2085                                         SLIPORT_ERROR2_OFFSET);
2086                 }
2087         } else {
2088                 pci_read_config_dword(adapter->pdev,
2089                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2090                 pci_read_config_dword(adapter->pdev,
2091                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2092                 pci_read_config_dword(adapter->pdev,
2093                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2094                 pci_read_config_dword(adapter->pdev,
2095                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2096
2097                 ue_lo = (ue_lo & (~ue_lo_mask));
2098                 ue_hi = (ue_hi & (~ue_hi_mask));
2099         }
2100
2101         if (ue_lo || ue_hi ||
2102                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2103                 adapter->ue_detected = true;
2104                 adapter->eeh_err = true;
2105                 dev_err(&adapter->pdev->dev,
2106                         "Unrecoverable error in the card\n");
2107         }
2108
2109         if (ue_lo) {
2110                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2111                         if (ue_lo & 1)
2112                                 dev_err(&adapter->pdev->dev,
2113                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2114                 }
2115         }
2116         if (ue_hi) {
2117                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2118                         if (ue_hi & 1)
2119                                 dev_err(&adapter->pdev->dev,
2120                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2121                 }
2122         }
2123
2124         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2125                 dev_err(&adapter->pdev->dev,
2126                         "sliport status 0x%x\n", sliport_status);
2127                 dev_err(&adapter->pdev->dev,
2128                         "sliport error1 0x%x\n", sliport_err1);
2129                 dev_err(&adapter->pdev->dev,
2130                         "sliport error2 0x%x\n", sliport_err2);
2131         }
2132 }
2133
2134 static void be_msix_disable(struct be_adapter *adapter)
2135 {
2136         if (msix_enabled(adapter)) {
2137                 pci_disable_msix(adapter->pdev);
2138                 adapter->num_msix_vec = 0;
2139         }
2140 }
2141
2142 static uint be_num_rss_want(struct be_adapter *adapter)
2143 {
2144         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2145              !sriov_want(adapter) && be_physfn(adapter) &&
2146              !be_is_mc(adapter))
2147                 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2148         else
2149                 return 0;
2150 }
2151
2152 static void be_msix_enable(struct be_adapter *adapter)
2153 {
2154 #define BE_MIN_MSIX_VECTORS             1
2155         int i, status, num_vec, num_roce_vec = 0;
2156
2157         /* If RSS queues are not used, need a vec for default RX Q */
2158         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2159         if (be_roce_supported(adapter)) {
2160                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2161                                         (num_online_cpus() + 1));
2162                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2163                 num_vec += num_roce_vec;
2164                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2165         }
2166         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2167
2168         for (i = 0; i < num_vec; i++)
2169                 adapter->msix_entries[i].entry = i;
2170
2171         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2172         if (status == 0) {
2173                 goto done;
2174         } else if (status >= BE_MIN_MSIX_VECTORS) {
2175                 num_vec = status;
2176                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2177                                 num_vec) == 0)
2178                         goto done;
2179         }
2180         return;
2181 done:
2182         if (be_roce_supported(adapter)) {
2183                 if (num_vec > num_roce_vec) {
2184                         adapter->num_msix_vec = num_vec - num_roce_vec;
2185                         adapter->num_msix_roce_vec =
2186                                 num_vec - adapter->num_msix_vec;
2187                 } else {
2188                         adapter->num_msix_vec = num_vec;
2189                         adapter->num_msix_roce_vec = 0;
2190                 }
2191         } else
2192                 adapter->num_msix_vec = num_vec;
2193         return;
2194 }
2195
2196 static inline int be_msix_vec_get(struct be_adapter *adapter,
2197                                 struct be_eq_obj *eqo)
2198 {
2199         return adapter->msix_entries[eqo->idx].vector;
2200 }
2201
2202 static int be_msix_register(struct be_adapter *adapter)
2203 {
2204         struct net_device *netdev = adapter->netdev;
2205         struct be_eq_obj *eqo;
2206         int status, i, vec;
2207
2208         for_all_evt_queues(adapter, eqo, i) {
2209                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2210                 vec = be_msix_vec_get(adapter, eqo);
2211                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2212                 if (status)
2213                         goto err_msix;
2214         }
2215
2216         return 0;
2217 err_msix:
2218         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2219                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2220         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2221                 status);
2222         be_msix_disable(adapter);
2223         return status;
2224 }
2225
2226 static int be_irq_register(struct be_adapter *adapter)
2227 {
2228         struct net_device *netdev = adapter->netdev;
2229         int status;
2230
2231         if (msix_enabled(adapter)) {
2232                 status = be_msix_register(adapter);
2233                 if (status == 0)
2234                         goto done;
2235                 /* INTx is not supported for VF */
2236                 if (!be_physfn(adapter))
2237                         return status;
2238         }
2239
2240         /* INTx */
2241         netdev->irq = adapter->pdev->irq;
2242         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2243                         adapter);
2244         if (status) {
2245                 dev_err(&adapter->pdev->dev,
2246                         "INTx request IRQ failed - err %d\n", status);
2247                 return status;
2248         }
2249 done:
2250         adapter->isr_registered = true;
2251         return 0;
2252 }
2253
2254 static void be_irq_unregister(struct be_adapter *adapter)
2255 {
2256         struct net_device *netdev = adapter->netdev;
2257         struct be_eq_obj *eqo;
2258         int i;
2259
2260         if (!adapter->isr_registered)
2261                 return;
2262
2263         /* INTx */
2264         if (!msix_enabled(adapter)) {
2265                 free_irq(netdev->irq, adapter);
2266                 goto done;
2267         }
2268
2269         /* MSIx */
2270         for_all_evt_queues(adapter, eqo, i)
2271                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2272
2273 done:
2274         adapter->isr_registered = false;
2275 }
2276
2277 static void be_rx_qs_destroy(struct be_adapter *adapter)
2278 {
2279         struct be_queue_info *q;
2280         struct be_rx_obj *rxo;
2281         int i;
2282
2283         for_all_rx_queues(adapter, rxo, i) {
2284                 q = &rxo->q;
2285                 if (q->created) {
2286                         be_cmd_rxq_destroy(adapter, q);
2287                         /* After the rxq is invalidated, wait for a grace time
2288                          * of 1ms for all dma to end and the flush compl to
2289                          * arrive
2290                          */
2291                         mdelay(1);
2292                         be_rx_cq_clean(rxo);
2293                 }
2294                 be_queue_free(adapter, q);
2295         }
2296 }
2297
2298 static int be_close(struct net_device *netdev)
2299 {
2300         struct be_adapter *adapter = netdev_priv(netdev);
2301         struct be_eq_obj *eqo;
2302         int i;
2303
2304         be_roce_dev_close(adapter);
2305
2306         be_async_mcc_disable(adapter);
2307
2308         if (!lancer_chip(adapter))
2309                 be_intr_set(adapter, false);
2310
2311         for_all_evt_queues(adapter, eqo, i) {
2312                 napi_disable(&eqo->napi);
2313                 if (msix_enabled(adapter))
2314                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2315                 else
2316                         synchronize_irq(netdev->irq);
2317                 be_eq_clean(eqo);
2318         }
2319
2320         be_irq_unregister(adapter);
2321
2322         /* Wait for all pending tx completions to arrive so that
2323          * all tx skbs are freed.
2324          */
2325         be_tx_compl_clean(adapter);
2326
2327         be_rx_qs_destroy(adapter);
2328         return 0;
2329 }
2330
2331 static int be_rx_qs_create(struct be_adapter *adapter)
2332 {
2333         struct be_rx_obj *rxo;
2334         int rc, i, j;
2335         u8 rsstable[128];
2336
2337         for_all_rx_queues(adapter, rxo, i) {
2338                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2339                                     sizeof(struct be_eth_rx_d));
2340                 if (rc)
2341                         return rc;
2342         }
2343
2344         /* The FW would like the default RXQ to be created first */
2345         rxo = default_rxo(adapter);
2346         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2347                                adapter->if_handle, false, &rxo->rss_id);
2348         if (rc)
2349                 return rc;
2350
2351         for_all_rss_queues(adapter, rxo, i) {
2352                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2353                                        rx_frag_size, adapter->if_handle,
2354                                        true, &rxo->rss_id);
2355                 if (rc)
2356                         return rc;
2357         }
2358
2359         if (be_multi_rxq(adapter)) {
2360                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2361                         for_all_rss_queues(adapter, rxo, i) {
2362                                 if ((j + i) >= 128)
2363                                         break;
2364                                 rsstable[j + i] = rxo->rss_id;
2365                         }
2366                 }
2367                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2368                 if (rc)
2369                         return rc;
2370         }
2371
2372         /* First time posting */
2373         for_all_rx_queues(adapter, rxo, i)
2374                 be_post_rx_frags(rxo, GFP_KERNEL);
2375         return 0;
2376 }
2377
2378 static int be_open(struct net_device *netdev)
2379 {
2380         struct be_adapter *adapter = netdev_priv(netdev);
2381         struct be_eq_obj *eqo;
2382         struct be_rx_obj *rxo;
2383         struct be_tx_obj *txo;
2384         u8 link_status;
2385         int status, i;
2386
2387         status = be_rx_qs_create(adapter);
2388         if (status)
2389                 goto err;
2390
2391         be_irq_register(adapter);
2392
2393         if (!lancer_chip(adapter))
2394                 be_intr_set(adapter, true);
2395
2396         for_all_rx_queues(adapter, rxo, i)
2397                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2398
2399         for_all_tx_queues(adapter, txo, i)
2400                 be_cq_notify(adapter, txo->cq.id, true, 0);
2401
2402         be_async_mcc_enable(adapter);
2403
2404         for_all_evt_queues(adapter, eqo, i) {
2405                 napi_enable(&eqo->napi);
2406                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2407         }
2408
2409         status = be_cmd_link_status_query(adapter, NULL, NULL,
2410                                           &link_status, 0);
2411         if (!status)
2412                 be_link_status_update(adapter, link_status);
2413
2414         be_roce_dev_open(adapter);
2415         return 0;
2416 err:
2417         be_close(adapter->netdev);
2418         return -EIO;
2419 }
2420
2421 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2422 {
2423         struct be_dma_mem cmd;
2424         int status = 0;
2425         u8 mac[ETH_ALEN];
2426
2427         memset(mac, 0, ETH_ALEN);
2428
2429         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2430         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2431                                     GFP_KERNEL);
2432         if (cmd.va == NULL)
2433                 return -1;
2434         memset(cmd.va, 0, cmd.size);
2435
2436         if (enable) {
2437                 status = pci_write_config_dword(adapter->pdev,
2438                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2439                 if (status) {
2440                         dev_err(&adapter->pdev->dev,
2441                                 "Could not enable Wake-on-lan\n");
2442                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2443                                           cmd.dma);
2444                         return status;
2445                 }
2446                 status = be_cmd_enable_magic_wol(adapter,
2447                                 adapter->netdev->dev_addr, &cmd);
2448                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2449                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2450         } else {
2451                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2452                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2453                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2454         }
2455
2456         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2457         return status;
2458 }
2459
2460 /*
2461  * Generate a seed MAC address from the PF MAC Address using jhash.
2462  * MAC Address for VFs are assigned incrementally starting from the seed.
2463  * These addresses are programmed in the ASIC by the PF and the VF driver
2464  * queries for the MAC address during its probe.
2465  */
2466 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2467 {
2468         u32 vf;
2469         int status = 0;
2470         u8 mac[ETH_ALEN];
2471         struct be_vf_cfg *vf_cfg;
2472
2473         be_vf_eth_addr_generate(adapter, mac);
2474
2475         for_all_vfs(adapter, vf_cfg, vf) {
2476                 if (lancer_chip(adapter)) {
2477                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2478                 } else {
2479                         status = be_cmd_pmac_add(adapter, mac,
2480                                                  vf_cfg->if_handle,
2481                                                  &vf_cfg->pmac_id, vf + 1);
2482                 }
2483
2484                 if (status)
2485                         dev_err(&adapter->pdev->dev,
2486                         "Mac address assignment failed for VF %d\n", vf);
2487                 else
2488                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2489
2490                 mac[5] += 1;
2491         }
2492         return status;
2493 }
2494
2495 static void be_vf_clear(struct be_adapter *adapter)
2496 {
2497         struct be_vf_cfg *vf_cfg;
2498         u32 vf;
2499
2500         if (be_find_vfs(adapter, ASSIGNED)) {
2501                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2502                 goto done;
2503         }
2504
2505         for_all_vfs(adapter, vf_cfg, vf) {
2506                 if (lancer_chip(adapter))
2507                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2508                 else
2509                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2510                                         vf_cfg->pmac_id, vf + 1);
2511
2512                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2513         }
2514         pci_disable_sriov(adapter->pdev);
2515 done:
2516         kfree(adapter->vf_cfg);
2517         adapter->num_vfs = 0;
2518 }
2519
2520 static int be_clear(struct be_adapter *adapter)
2521 {
2522         int i = 1;
2523
2524         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2525                 cancel_delayed_work_sync(&adapter->work);
2526                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2527         }
2528
2529         if (sriov_enabled(adapter))
2530                 be_vf_clear(adapter);
2531
2532         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2533                 be_cmd_pmac_del(adapter, adapter->if_handle,
2534                         adapter->pmac_id[i], 0);
2535
2536         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2537
2538         be_mcc_queues_destroy(adapter);
2539         be_rx_cqs_destroy(adapter);
2540         be_tx_queues_destroy(adapter);
2541         be_evt_queues_destroy(adapter);
2542
2543         /* tell fw we're done with firing cmds */
2544         be_cmd_fw_clean(adapter);
2545
2546         be_msix_disable(adapter);
2547         pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
2548         return 0;
2549 }
2550
2551 static int be_vf_setup_init(struct be_adapter *adapter)
2552 {
2553         struct be_vf_cfg *vf_cfg;
2554         int vf;
2555
2556         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2557                                   GFP_KERNEL);
2558         if (!adapter->vf_cfg)
2559                 return -ENOMEM;
2560
2561         for_all_vfs(adapter, vf_cfg, vf) {
2562                 vf_cfg->if_handle = -1;
2563                 vf_cfg->pmac_id = -1;
2564         }
2565         return 0;
2566 }
2567
2568 static int be_vf_setup(struct be_adapter *adapter)
2569 {
2570         struct be_vf_cfg *vf_cfg;
2571         struct device *dev = &adapter->pdev->dev;
2572         u32 cap_flags, en_flags, vf;
2573         u16 def_vlan, lnk_speed;
2574         int status, enabled_vfs;
2575
2576         enabled_vfs = be_find_vfs(adapter, ENABLED);
2577         if (enabled_vfs) {
2578                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2579                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2580                 return 0;
2581         }
2582
2583         if (num_vfs > adapter->dev_num_vfs) {
2584                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2585                          adapter->dev_num_vfs, num_vfs);
2586                 num_vfs = adapter->dev_num_vfs;
2587         }
2588
2589         status = pci_enable_sriov(adapter->pdev, num_vfs);
2590         if (!status) {
2591                 adapter->num_vfs = num_vfs;
2592         } else {
2593                 /* Platform doesn't support SRIOV though device supports it */
2594                 dev_warn(dev, "SRIOV enable failed\n");
2595                 return 0;
2596         }
2597
2598         status = be_vf_setup_init(adapter);
2599         if (status)
2600                 goto err;
2601
2602         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603                                 BE_IF_FLAGS_MULTICAST;
2604         for_all_vfs(adapter, vf_cfg, vf) {
2605                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2606                                           &vf_cfg->if_handle, NULL, vf + 1);
2607                 if (status)
2608                         goto err;
2609         }
2610
2611         if (!enabled_vfs) {
2612                 status = be_vf_eth_addr_config(adapter);
2613                 if (status)
2614                         goto err;
2615         }
2616
2617         for_all_vfs(adapter, vf_cfg, vf) {
2618                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2619                                                   NULL, vf + 1);
2620                 if (status)
2621                         goto err;
2622                 vf_cfg->tx_rate = lnk_speed * 10;
2623
2624                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2625                                 vf + 1, vf_cfg->if_handle);
2626                 if (status)
2627                         goto err;
2628                 vf_cfg->def_vid = def_vlan;
2629         }
2630         return 0;
2631 err:
2632         return status;
2633 }
2634
2635 static void be_setup_init(struct be_adapter *adapter)
2636 {
2637         adapter->vlan_prio_bmap = 0xff;
2638         adapter->phy.link_speed = -1;
2639         adapter->if_handle = -1;
2640         adapter->be3_native = false;
2641         adapter->promiscuous = false;
2642         adapter->eq_next_idx = 0;
2643         adapter->phy.forced_port_speed = -1;
2644 }
2645
2646 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2647 {
2648         u32 pmac_id;
2649         int status;
2650         bool pmac_id_active;
2651
2652         status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2653                                                         &pmac_id, mac);
2654         if (status != 0)
2655                 goto do_none;
2656
2657         if (pmac_id_active) {
2658                 status = be_cmd_mac_addr_query(adapter, mac,
2659                                 MAC_ADDRESS_TYPE_NETWORK,
2660                                 false, adapter->if_handle, pmac_id);
2661
2662                 if (!status)
2663                         adapter->pmac_id[0] = pmac_id;
2664         } else {
2665                 status = be_cmd_pmac_add(adapter, mac,
2666                                 adapter->if_handle, &adapter->pmac_id[0], 0);
2667         }
2668 do_none:
2669         return status;
2670 }
2671
2672 /* Routine to query per function resource limits */
2673 static int be_get_config(struct be_adapter *adapter)
2674 {
2675         int pos;
2676         u16 dev_num_vfs;
2677
2678         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2679         if (pos) {
2680                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2681                                      &dev_num_vfs);
2682                 adapter->dev_num_vfs = dev_num_vfs;
2683         }
2684         return 0;
2685 }
2686
2687 static int be_setup(struct be_adapter *adapter)
2688 {
2689         struct net_device *netdev = adapter->netdev;
2690         struct device *dev = &adapter->pdev->dev;
2691         u32 cap_flags, en_flags;
2692         u32 tx_fc, rx_fc;
2693         int status;
2694         u8 mac[ETH_ALEN];
2695
2696         be_setup_init(adapter);
2697
2698         be_get_config(adapter);
2699
2700         be_cmd_req_native_mode(adapter);
2701
2702         be_msix_enable(adapter);
2703
2704         status = be_evt_queues_create(adapter);
2705         if (status)
2706                 goto err;
2707
2708         status = be_tx_cqs_create(adapter);
2709         if (status)
2710                 goto err;
2711
2712         status = be_rx_cqs_create(adapter);
2713         if (status)
2714                 goto err;
2715
2716         status = be_mcc_queues_create(adapter);
2717         if (status)
2718                 goto err;
2719
2720         memset(mac, 0, ETH_ALEN);
2721         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2722                         true /*permanent */, 0, 0);
2723         if (status)
2724                 return status;
2725         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2726         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2727
2728         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2729                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2730         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2731                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2732
2733         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2734                 cap_flags |= BE_IF_FLAGS_RSS;
2735                 en_flags |= BE_IF_FLAGS_RSS;
2736         }
2737         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2738                         netdev->dev_addr, &adapter->if_handle,
2739                         &adapter->pmac_id[0], 0);
2740         if (status != 0)
2741                 goto err;
2742
2743          /* The VF's permanent mac queried from card is incorrect.
2744           * For BEx: Query the mac configued by the PF using if_handle
2745           * For Lancer: Get and use mac_list to obtain mac address.
2746           */
2747         if (!be_physfn(adapter)) {
2748                 if (lancer_chip(adapter))
2749                         status = be_add_mac_from_list(adapter, mac);
2750                 else
2751                         status = be_cmd_mac_addr_query(adapter, mac,
2752                                         MAC_ADDRESS_TYPE_NETWORK, false,
2753                                         adapter->if_handle, 0);
2754                 if (!status) {
2755                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2756                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2757                 }
2758         }
2759
2760         status = be_tx_qs_create(adapter);
2761         if (status)
2762                 goto err;
2763
2764         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2765
2766         be_vid_config(adapter, false, 0);
2767
2768         be_set_rx_mode(adapter->netdev);
2769
2770         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2771
2772         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2773                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2774                                         adapter->rx_fc);
2775
2776         pcie_set_readrq(adapter->pdev, 4096);
2777
2778         if (be_physfn(adapter) && num_vfs) {
2779                 if (adapter->dev_num_vfs)
2780                         be_vf_setup(adapter);
2781                 else
2782                         dev_warn(dev, "device doesn't support SRIOV\n");
2783         }
2784
2785         be_cmd_get_phy_info(adapter);
2786         if (be_pause_supported(adapter))
2787                 adapter->phy.fc_autoneg = 1;
2788
2789         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2790         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2791
2792         pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
2793         return 0;
2794 err:
2795         be_clear(adapter);
2796         return status;
2797 }
2798
2799 #ifdef CONFIG_NET_POLL_CONTROLLER
2800 static void be_netpoll(struct net_device *netdev)
2801 {
2802         struct be_adapter *adapter = netdev_priv(netdev);
2803         struct be_eq_obj *eqo;
2804         int i;
2805
2806         for_all_evt_queues(adapter, eqo, i)
2807                 event_handle(eqo);
2808
2809         return;
2810 }
2811 #endif
2812
2813 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2814 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2815
2816 static bool be_flash_redboot(struct be_adapter *adapter,
2817                         const u8 *p, u32 img_start, int image_size,
2818                         int hdr_size)
2819 {
2820         u32 crc_offset;
2821         u8 flashed_crc[4];
2822         int status;
2823
2824         crc_offset = hdr_size + img_start + image_size - 4;
2825
2826         p += crc_offset;
2827
2828         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2829                         (image_size - 4));
2830         if (status) {
2831                 dev_err(&adapter->pdev->dev,
2832                 "could not get crc from flash, not flashing redboot\n");
2833                 return false;
2834         }
2835
2836         /*update redboot only if crc does not match*/
2837         if (!memcmp(flashed_crc, p, 4))
2838                 return false;
2839         else
2840                 return true;
2841 }
2842
2843 static bool phy_flashing_required(struct be_adapter *adapter)
2844 {
2845         return (adapter->phy.phy_type == TN_8022 &&
2846                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2847 }
2848
2849 static bool is_comp_in_ufi(struct be_adapter *adapter,
2850                            struct flash_section_info *fsec, int type)
2851 {
2852         int i = 0, img_type = 0;
2853         struct flash_section_info_g2 *fsec_g2 = NULL;
2854
2855         if (adapter->generation != BE_GEN3)
2856                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2857
2858         for (i = 0; i < MAX_FLASH_COMP; i++) {
2859                 if (fsec_g2)
2860                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2861                 else
2862                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2863
2864                 if (img_type == type)
2865                         return true;
2866         }
2867         return false;
2868
2869 }
2870
2871 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2872                                          int header_size,
2873                                          const struct firmware *fw)
2874 {
2875         struct flash_section_info *fsec = NULL;
2876         const u8 *p = fw->data;
2877
2878         p += header_size;
2879         while (p < (fw->data + fw->size)) {
2880                 fsec = (struct flash_section_info *)p;
2881                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2882                         return fsec;
2883                 p += 32;
2884         }
2885         return NULL;
2886 }
2887
2888 static int be_flash_data(struct be_adapter *adapter,
2889                          const struct firmware *fw,
2890                          struct be_dma_mem *flash_cmd,
2891                          int num_of_images)
2892
2893 {
2894         int status = 0, i, filehdr_size = 0;
2895         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2896         u32 total_bytes = 0, flash_op;
2897         int num_bytes;
2898         const u8 *p = fw->data;
2899         struct be_cmd_write_flashrom *req = flash_cmd->va;
2900         const struct flash_comp *pflashcomp;
2901         int num_comp, hdr_size;
2902         struct flash_section_info *fsec = NULL;
2903
2904         struct flash_comp gen3_flash_types[] = {
2905                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2906                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2907                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2908                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2909                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2910                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2911                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2912                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2913                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2914                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2915                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2916                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2917                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2918                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2919                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2920                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2921                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2922                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2923                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2924                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2925         };
2926
2927         struct flash_comp gen2_flash_types[] = {
2928                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2929                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2930                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2931                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2932                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2933                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2934                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2935                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2936                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2937                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2938                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2939                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2940                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2941                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2942                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2943                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2944         };
2945
2946         if (adapter->generation == BE_GEN3) {
2947                 pflashcomp = gen3_flash_types;
2948                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2949                 num_comp = ARRAY_SIZE(gen3_flash_types);
2950         } else {
2951                 pflashcomp = gen2_flash_types;
2952                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2953                 num_comp = ARRAY_SIZE(gen2_flash_types);
2954         }
2955         /* Get flash section info*/
2956         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2957         if (!fsec) {
2958                 dev_err(&adapter->pdev->dev,
2959                         "Invalid Cookie. UFI corrupted ?\n");
2960                 return -1;
2961         }
2962         for (i = 0; i < num_comp; i++) {
2963                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2964                         continue;
2965
2966                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2967                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2968