08efd308d78ae40640f73953a41531aabba2c054
[pandora-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va) {
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133                 mem->va = NULL;
134         }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138                 u16 len, u16 entry_size)
139 {
140         struct be_dma_mem *mem = &q->dma_mem;
141
142         memset(q, 0, sizeof(*q));
143         q->len = len;
144         q->entry_size = entry_size;
145         mem->size = len * entry_size;
146         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147                                      GFP_KERNEL);
148         if (!mem->va)
149                 return -ENOMEM;
150         memset(mem->va, 0, mem->size);
151         return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156         u32 reg, enabled;
157
158         if (adapter->eeh_err)
159                 return;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_RQ_RING_ID_MASK;
180         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_TXULP_RING_ID_MASK;
190         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197                 bool arm, bool clear_int, u16 num_popped)
198 {
199         u32 val = 0;
200         val |= qid & DB_EQ_RING_ID_MASK;
201         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204         if (adapter->eeh_err)
205                 return;
206
207         if (arm)
208                 val |= 1 << DB_EQ_REARM_SHIFT;
209         if (clear_int)
210                 val |= 1 << DB_EQ_CLR_SHIFT;
211         val |= 1 << DB_EQ_EVNT_SHIFT;
212         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213         iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218         u32 val = 0;
219         val |= qid & DB_CQ_RING_ID_MASK;
220         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223         if (adapter->eeh_err)
224                 return;
225
226         if (arm)
227                 val |= 1 << DB_CQ_REARM_SHIFT;
228         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234         struct be_adapter *adapter = netdev_priv(netdev);
235         struct sockaddr *addr = p;
236         int status = 0;
237         u8 current_mac[ETH_ALEN];
238         u32 pmac_id = adapter->pmac_id[0];
239
240         if (!is_valid_ether_addr(addr->sa_data))
241                 return -EADDRNOTAVAIL;
242
243         status = be_cmd_mac_addr_query(adapter, current_mac,
244                                 MAC_ADDRESS_TYPE_NETWORK, false,
245                                 adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 }
562
563 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564                                         struct sk_buff *skb)
565 {
566         u8 vlan_prio;
567         u16 vlan_tag;
568
569         vlan_tag = vlan_tx_tag_get(skb);
570         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571         /* If vlan priority provided by OS is NOT in available bmap */
572         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574                                 adapter->recommended_prio;
575
576         return vlan_tag;
577 }
578
579 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
581 {
582         u16 vlan_tag;
583
584         memset(hdr, 0, sizeof(*hdr));
585
586         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
588         if (skb_is_gso(skb)) {
589                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591                         hdr, skb_shinfo(skb)->gso_size);
592                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
593                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
594                 if (lancer_chip(adapter) && adapter->sli_family  ==
595                                                         LANCER_A0_SLI_FAMILY) {
596                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597                         if (is_tcp_pkt(skb))
598                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599                                                                 tcpcs, hdr, 1);
600                         else if (is_udp_pkt(skb))
601                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602                                                                 udpcs, hdr, 1);
603                 }
604         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605                 if (is_tcp_pkt(skb))
606                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607                 else if (is_udp_pkt(skb))
608                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609         }
610
611         if (vlan_tx_tag_present(skb)) {
612                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
613                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
614                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
615         }
616
617         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621 }
622
623 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
624                 bool unmap_single)
625 {
626         dma_addr_t dma;
627
628         be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
631         if (wrb->frag_len) {
632                 if (unmap_single)
633                         dma_unmap_single(dev, dma, wrb->frag_len,
634                                          DMA_TO_DEVICE);
635                 else
636                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
637         }
638 }
639
640 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
641                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642 {
643         dma_addr_t busaddr;
644         int i, copied = 0;
645         struct device *dev = &adapter->pdev->dev;
646         struct sk_buff *first_skb = skb;
647         struct be_eth_wrb *wrb;
648         struct be_eth_hdr_wrb *hdr;
649         bool map_single = false;
650         u16 map_head;
651
652         hdr = queue_head_node(txq);
653         queue_head_inc(txq);
654         map_head = txq->head;
655
656         if (skb->len > skb->data_len) {
657                 int len = skb_headlen(skb);
658                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659                 if (dma_mapping_error(dev, busaddr))
660                         goto dma_err;
661                 map_single = true;
662                 wrb = queue_head_node(txq);
663                 wrb_fill(wrb, busaddr, len);
664                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665                 queue_head_inc(txq);
666                 copied += len;
667         }
668
669         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
670                 const struct skb_frag_struct *frag =
671                         &skb_shinfo(skb)->frags[i];
672                 busaddr = skb_frag_dma_map(dev, frag, 0,
673                                            skb_frag_size(frag), DMA_TO_DEVICE);
674                 if (dma_mapping_error(dev, busaddr))
675                         goto dma_err;
676                 wrb = queue_head_node(txq);
677                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
678                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679                 queue_head_inc(txq);
680                 copied += skb_frag_size(frag);
681         }
682
683         if (dummy_wrb) {
684                 wrb = queue_head_node(txq);
685                 wrb_fill(wrb, 0, 0);
686                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687                 queue_head_inc(txq);
688         }
689
690         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
691         be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693         return copied;
694 dma_err:
695         txq->head = map_head;
696         while (copied) {
697                 wrb = queue_head_node(txq);
698                 unmap_tx_frag(dev, wrb, map_single);
699                 map_single = false;
700                 copied -= wrb->frag_len;
701                 queue_head_inc(txq);
702         }
703         return 0;
704 }
705
706 static netdev_tx_t be_xmit(struct sk_buff *skb,
707                         struct net_device *netdev)
708 {
709         struct be_adapter *adapter = netdev_priv(netdev);
710         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711         struct be_queue_info *txq = &txo->q;
712         u32 wrb_cnt = 0, copied = 0;
713         u32 start = txq->head;
714         bool dummy_wrb, stopped = false;
715
716         /* For vlan tagged pkts, BE
717          * 1) calculates checksum even when CSO is not requested
718          * 2) calculates checksum wrongly for padded pkt less than
719          * 60 bytes long.
720          * As a workaround disable TX vlan offloading in such cases.
721          */
722         if (unlikely(vlan_tx_tag_present(skb) &&
723                      (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724                 skb = skb_share_check(skb, GFP_ATOMIC);
725                 if (unlikely(!skb))
726                         goto tx_drop;
727
728                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729                 if (unlikely(!skb))
730                         goto tx_drop;
731
732                 skb->vlan_tci = 0;
733         }
734
735         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
736
737         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738         if (copied) {
739                 /* record the sent skb in the sent_skb table */
740                 BUG_ON(txo->sent_skb_list[start]);
741                 txo->sent_skb_list[start] = skb;
742
743                 /* Ensure txq has space for the next skb; Else stop the queue
744                  * *BEFORE* ringing the tx doorbell, so that we serialze the
745                  * tx compls of the current transmit which'll wake up the queue
746                  */
747                 atomic_add(wrb_cnt, &txq->used);
748                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749                                                                 txq->len) {
750                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
751                         stopped = true;
752                 }
753
754                 be_txq_notify(adapter, txq->id, wrb_cnt);
755
756                 be_tx_stats_update(txo, wrb_cnt, copied,
757                                 skb_shinfo(skb)->gso_segs, stopped);
758         } else {
759                 txq->head = start;
760                 dev_kfree_skb_any(skb);
761         }
762 tx_drop:
763         return NETDEV_TX_OK;
764 }
765
766 static int be_change_mtu(struct net_device *netdev, int new_mtu)
767 {
768         struct be_adapter *adapter = netdev_priv(netdev);
769         if (new_mtu < BE_MIN_MTU ||
770                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771                                         (ETH_HLEN + ETH_FCS_LEN))) {
772                 dev_info(&adapter->pdev->dev,
773                         "MTU must be between %d and %d bytes\n",
774                         BE_MIN_MTU,
775                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
776                 return -EINVAL;
777         }
778         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779                         netdev->mtu, new_mtu);
780         netdev->mtu = new_mtu;
781         return 0;
782 }
783
784 /*
785  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786  * If the user configures more, place BE in vlan promiscuous mode.
787  */
788 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
789 {
790         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
791         u16 vtag[BE_NUM_VLANS_SUPPORTED];
792         u16 ntags = 0, i;
793         int status = 0;
794
795         if (vf) {
796                 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797                 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798                                             1, 1, 0);
799         }
800
801         /* No need to further configure vids if in promiscuous mode */
802         if (adapter->promiscuous)
803                 return 0;
804
805         if (adapter->vlans_added > adapter->max_vlans)
806                 goto set_vlan_promisc;
807
808         /* Construct VLAN Table to give to HW */
809         for (i = 0; i < VLAN_N_VID; i++)
810                 if (adapter->vlan_tag[i])
811                         vtag[ntags++] = cpu_to_le16(i);
812
813         status = be_cmd_vlan_config(adapter, adapter->if_handle,
814                                     vtag, ntags, 1, 0);
815
816         /* Set to VLAN promisc mode as setting VLAN filter failed */
817         if (status) {
818                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820                 goto set_vlan_promisc;
821         }
822
823         return status;
824
825 set_vlan_promisc:
826         status = be_cmd_vlan_config(adapter, adapter->if_handle,
827                                     NULL, 0, 1, 1);
828         return status;
829 }
830
831 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
832 {
833         struct be_adapter *adapter = netdev_priv(netdev);
834         int status = 0;
835
836         if (!be_physfn(adapter)) {
837                 status = -EINVAL;
838                 goto ret;
839         }
840
841         adapter->vlan_tag[vid] = 1;
842         if (adapter->vlans_added <= (adapter->max_vlans + 1))
843                 status = be_vid_config(adapter, false, 0);
844
845         if (!status)
846                 adapter->vlans_added++;
847         else
848                 adapter->vlan_tag[vid] = 0;
849 ret:
850         return status;
851 }
852
853 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
854 {
855         struct be_adapter *adapter = netdev_priv(netdev);
856         int status = 0;
857
858         if (!be_physfn(adapter)) {
859                 status = -EINVAL;
860                 goto ret;
861         }
862
863         adapter->vlan_tag[vid] = 0;
864         if (adapter->vlans_added <= adapter->max_vlans)
865                 status = be_vid_config(adapter, false, 0);
866
867         if (!status)
868                 adapter->vlans_added--;
869         else
870                 adapter->vlan_tag[vid] = 1;
871 ret:
872         return status;
873 }
874
875 static void be_set_rx_mode(struct net_device *netdev)
876 {
877         struct be_adapter *adapter = netdev_priv(netdev);
878         int status;
879
880         if (netdev->flags & IFF_PROMISC) {
881                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
882                 adapter->promiscuous = true;
883                 goto done;
884         }
885
886         /* BE was previously in promiscuous mode; disable it */
887         if (adapter->promiscuous) {
888                 adapter->promiscuous = false;
889                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
890
891                 if (adapter->vlans_added)
892                         be_vid_config(adapter, false, 0);
893         }
894
895         /* Enable multicast promisc if num configured exceeds what we support */
896         if (netdev->flags & IFF_ALLMULTI ||
897                         netdev_mc_count(netdev) > BE_MAX_MC) {
898                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
899                 goto done;
900         }
901
902         if (netdev_uc_count(netdev) != adapter->uc_macs) {
903                 struct netdev_hw_addr *ha;
904                 int i = 1; /* First slot is claimed by the Primary MAC */
905
906                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907                         be_cmd_pmac_del(adapter, adapter->if_handle,
908                                         adapter->pmac_id[i], 0);
909                 }
910
911                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913                         adapter->promiscuous = true;
914                         goto done;
915                 }
916
917                 netdev_for_each_uc_addr(ha, adapter->netdev) {
918                         adapter->uc_macs++; /* First slot is for Primary MAC */
919                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920                                         adapter->if_handle,
921                                         &adapter->pmac_id[adapter->uc_macs], 0);
922                 }
923         }
924
925         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927         /* Set to MCAST promisc mode if setting MULTICAST address fails */
928         if (status) {
929                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932         }
933 done:
934         return;
935 }
936
937 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938 {
939         struct be_adapter *adapter = netdev_priv(netdev);
940         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
941         int status;
942
943         if (!sriov_enabled(adapter))
944                 return -EPERM;
945
946         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
947                 return -EINVAL;
948
949         if (lancer_chip(adapter)) {
950                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
951         } else {
952                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953                                          vf_cfg->pmac_id, vf + 1);
954
955                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956                                          &vf_cfg->pmac_id, vf + 1);
957         }
958
959         if (status)
960                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961                                 mac, vf);
962         else
963                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
964
965         return status;
966 }
967
968 static int be_get_vf_config(struct net_device *netdev, int vf,
969                         struct ifla_vf_info *vi)
970 {
971         struct be_adapter *adapter = netdev_priv(netdev);
972         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
973
974         if (!sriov_enabled(adapter))
975                 return -EPERM;
976
977         if (vf >= adapter->num_vfs)
978                 return -EINVAL;
979
980         vi->vf = vf;
981         vi->tx_rate = vf_cfg->tx_rate;
982         vi->vlan = vf_cfg->vlan_tag;
983         vi->qos = 0;
984         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
985
986         return 0;
987 }
988
989 static int be_set_vf_vlan(struct net_device *netdev,
990                         int vf, u16 vlan, u8 qos)
991 {
992         struct be_adapter *adapter = netdev_priv(netdev);
993         int status = 0;
994
995         if (!sriov_enabled(adapter))
996                 return -EPERM;
997
998         if (vf >= adapter->num_vfs || vlan > 4095)
999                 return -EINVAL;
1000
1001         if (vlan) {
1002                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003                         /* If this is new value, program it. Else skip. */
1004                         adapter->vf_cfg[vf].vlan_tag = vlan;
1005
1006                         status = be_cmd_set_hsw_config(adapter, vlan,
1007                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1008                 }
1009         } else {
1010                 /* Reset Transparent Vlan Tagging. */
1011                 adapter->vf_cfg[vf].vlan_tag = 0;
1012                 vlan = adapter->vf_cfg[vf].def_vid;
1013                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014                         adapter->vf_cfg[vf].if_handle);
1015         }
1016
1017
1018         if (status)
1019                 dev_info(&adapter->pdev->dev,
1020                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1021         return status;
1022 }
1023
1024 static int be_set_vf_tx_rate(struct net_device *netdev,
1025                         int vf, int rate)
1026 {
1027         struct be_adapter *adapter = netdev_priv(netdev);
1028         int status = 0;
1029
1030         if (!sriov_enabled(adapter))
1031                 return -EPERM;
1032
1033         if (vf >= adapter->num_vfs)
1034                 return -EINVAL;
1035
1036         if (rate < 100 || rate > 10000) {
1037                 dev_err(&adapter->pdev->dev,
1038                         "tx rate must be between 100 and 10000 Mbps\n");
1039                 return -EINVAL;
1040         }
1041
1042         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1043
1044         if (status)
1045                 dev_err(&adapter->pdev->dev,
1046                                 "tx rate %d on VF %d failed\n", rate, vf);
1047         else
1048                 adapter->vf_cfg[vf].tx_rate = rate;
1049         return status;
1050 }
1051
1052 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1053 {
1054         struct pci_dev *dev, *pdev = adapter->pdev;
1055         int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1056         u16 offset, stride;
1057
1058         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1059         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1060         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1061
1062         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1063         while (dev) {
1064                 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1065                 if (dev->is_virtfn && dev->devfn == vf_fn) {
1066                         vfs++;
1067                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1068                                 assigned_vfs++;
1069                 }
1070                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1071         }
1072         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1073 }
1074
1075 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1076 {
1077         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1078         ulong now = jiffies;
1079         ulong delta = now - stats->rx_jiffies;
1080         u64 pkts;
1081         unsigned int start, eqd;
1082
1083         if (!eqo->enable_aic) {
1084                 eqd = eqo->eqd;
1085                 goto modify_eqd;
1086         }
1087
1088         if (eqo->idx >= adapter->num_rx_qs)
1089                 return;
1090
1091         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1092
1093         /* Wrapped around */
1094         if (time_before(now, stats->rx_jiffies)) {
1095                 stats->rx_jiffies = now;
1096                 return;
1097         }
1098
1099         /* Update once a second */
1100         if (delta < HZ)
1101                 return;
1102
1103         do {
1104                 start = u64_stats_fetch_begin_bh(&stats->sync);
1105                 pkts = stats->rx_pkts;
1106         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1107
1108         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1109         stats->rx_pkts_prev = pkts;
1110         stats->rx_jiffies = now;
1111         eqd = (stats->rx_pps / 110000) << 3;
1112         eqd = min(eqd, eqo->max_eqd);
1113         eqd = max(eqd, eqo->min_eqd);
1114         if (eqd < 10)
1115                 eqd = 0;
1116
1117 modify_eqd:
1118         if (eqd != eqo->cur_eqd) {
1119                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1120                 eqo->cur_eqd = eqd;
1121         }
1122 }
1123
1124 static void be_rx_stats_update(struct be_rx_obj *rxo,
1125                 struct be_rx_compl_info *rxcp)
1126 {
1127         struct be_rx_stats *stats = rx_stats(rxo);
1128
1129         u64_stats_update_begin(&stats->sync);
1130         stats->rx_compl++;
1131         stats->rx_bytes += rxcp->pkt_size;
1132         stats->rx_pkts++;
1133         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1134                 stats->rx_mcast_pkts++;
1135         if (rxcp->err)
1136                 stats->rx_compl_err++;
1137         u64_stats_update_end(&stats->sync);
1138 }
1139
1140 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1141 {
1142         /* L4 checksum is not reliable for non TCP/UDP packets.
1143          * Also ignore ipcksm for ipv6 pkts */
1144         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1145                                 (rxcp->ip_csum || rxcp->ipv6);
1146 }
1147
1148 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1149                                                 u16 frag_idx)
1150 {
1151         struct be_adapter *adapter = rxo->adapter;
1152         struct be_rx_page_info *rx_page_info;
1153         struct be_queue_info *rxq = &rxo->q;
1154
1155         rx_page_info = &rxo->page_info_tbl[frag_idx];
1156         BUG_ON(!rx_page_info->page);
1157
1158         if (rx_page_info->last_page_user) {
1159                 dma_unmap_page(&adapter->pdev->dev,
1160                                dma_unmap_addr(rx_page_info, bus),
1161                                adapter->big_page_size, DMA_FROM_DEVICE);
1162                 rx_page_info->last_page_user = false;
1163         }
1164
1165         atomic_dec(&rxq->used);
1166         return rx_page_info;
1167 }
1168
1169 /* Throwaway the data in the Rx completion */
1170 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1171                                 struct be_rx_compl_info *rxcp)
1172 {
1173         struct be_queue_info *rxq = &rxo->q;
1174         struct be_rx_page_info *page_info;
1175         u16 i, num_rcvd = rxcp->num_rcvd;
1176
1177         for (i = 0; i < num_rcvd; i++) {
1178                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1179                 put_page(page_info->page);
1180                 memset(page_info, 0, sizeof(*page_info));
1181                 index_inc(&rxcp->rxq_idx, rxq->len);
1182         }
1183 }
1184
1185 /*
1186  * skb_fill_rx_data forms a complete skb for an ether frame
1187  * indicated by rxcp.
1188  */
1189 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1190                              struct be_rx_compl_info *rxcp)
1191 {
1192         struct be_queue_info *rxq = &rxo->q;
1193         struct be_rx_page_info *page_info;
1194         u16 i, j;
1195         u16 hdr_len, curr_frag_len, remaining;
1196         u8 *start;
1197
1198         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1199         start = page_address(page_info->page) + page_info->page_offset;
1200         prefetch(start);
1201
1202         /* Copy data in the first descriptor of this completion */
1203         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1204
1205         /* Copy the header portion into skb_data */
1206         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1207         memcpy(skb->data, start, hdr_len);
1208         skb->len = curr_frag_len;
1209         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1210                 /* Complete packet has now been moved to data */
1211                 put_page(page_info->page);
1212                 skb->data_len = 0;
1213                 skb->tail += curr_frag_len;
1214         } else {
1215                 skb_shinfo(skb)->nr_frags = 1;
1216                 skb_frag_set_page(skb, 0, page_info->page);
1217                 skb_shinfo(skb)->frags[0].page_offset =
1218                                         page_info->page_offset + hdr_len;
1219                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1220                 skb->data_len = curr_frag_len - hdr_len;
1221                 skb->truesize += rx_frag_size;
1222                 skb->tail += hdr_len;
1223         }
1224         page_info->page = NULL;
1225
1226         if (rxcp->pkt_size <= rx_frag_size) {
1227                 BUG_ON(rxcp->num_rcvd != 1);
1228                 return;
1229         }
1230
1231         /* More frags present for this completion */
1232         index_inc(&rxcp->rxq_idx, rxq->len);
1233         remaining = rxcp->pkt_size - curr_frag_len;
1234         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1235                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1236                 curr_frag_len = min(remaining, rx_frag_size);
1237
1238                 /* Coalesce all frags from the same physical page in one slot */
1239                 if (page_info->page_offset == 0) {
1240                         /* Fresh page */
1241                         j++;
1242                         skb_frag_set_page(skb, j, page_info->page);
1243                         skb_shinfo(skb)->frags[j].page_offset =
1244                                                         page_info->page_offset;
1245                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1246                         skb_shinfo(skb)->nr_frags++;
1247                 } else {
1248                         put_page(page_info->page);
1249                 }
1250
1251                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1252                 skb->len += curr_frag_len;
1253                 skb->data_len += curr_frag_len;
1254                 skb->truesize += rx_frag_size;
1255                 remaining -= curr_frag_len;
1256                 index_inc(&rxcp->rxq_idx, rxq->len);
1257                 page_info->page = NULL;
1258         }
1259         BUG_ON(j > MAX_SKB_FRAGS);
1260 }
1261
1262 /* Process the RX completion indicated by rxcp when GRO is disabled */
1263 static void be_rx_compl_process(struct be_rx_obj *rxo,
1264                                 struct be_rx_compl_info *rxcp)
1265 {
1266         struct be_adapter *adapter = rxo->adapter;
1267         struct net_device *netdev = adapter->netdev;
1268         struct sk_buff *skb;
1269
1270         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1271         if (unlikely(!skb)) {
1272                 rx_stats(rxo)->rx_drops_no_skbs++;
1273                 be_rx_compl_discard(rxo, rxcp);
1274                 return;
1275         }
1276
1277         skb_fill_rx_data(rxo, skb, rxcp);
1278
1279         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1280                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1281         else
1282                 skb_checksum_none_assert(skb);
1283
1284         skb->protocol = eth_type_trans(skb, netdev);
1285         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1286         if (netdev->features & NETIF_F_RXHASH)
1287                 skb->rxhash = rxcp->rss_hash;
1288
1289
1290         if (rxcp->vlanf)
1291                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1292
1293         netif_receive_skb(skb);
1294 }
1295
1296 /* Process the RX completion indicated by rxcp when GRO is enabled */
1297 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1298                              struct be_rx_compl_info *rxcp)
1299 {
1300         struct be_adapter *adapter = rxo->adapter;
1301         struct be_rx_page_info *page_info;
1302         struct sk_buff *skb = NULL;
1303         struct be_queue_info *rxq = &rxo->q;
1304         u16 remaining, curr_frag_len;
1305         u16 i, j;
1306
1307         skb = napi_get_frags(napi);
1308         if (!skb) {
1309                 be_rx_compl_discard(rxo, rxcp);
1310                 return;
1311         }
1312
1313         remaining = rxcp->pkt_size;
1314         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1315                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1316
1317                 curr_frag_len = min(remaining, rx_frag_size);
1318
1319                 /* Coalesce all frags from the same physical page in one slot */
1320                 if (i == 0 || page_info->page_offset == 0) {
1321                         /* First frag or Fresh page */
1322                         j++;
1323                         skb_frag_set_page(skb, j, page_info->page);
1324                         skb_shinfo(skb)->frags[j].page_offset =
1325                                                         page_info->page_offset;
1326                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1327                 } else {
1328                         put_page(page_info->page);
1329                 }
1330                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1331                 skb->truesize += rx_frag_size;
1332                 remaining -= curr_frag_len;
1333                 index_inc(&rxcp->rxq_idx, rxq->len);
1334                 memset(page_info, 0, sizeof(*page_info));
1335         }
1336         BUG_ON(j > MAX_SKB_FRAGS);
1337
1338         skb_shinfo(skb)->nr_frags = j + 1;
1339         skb->len = rxcp->pkt_size;
1340         skb->data_len = rxcp->pkt_size;
1341         skb->ip_summed = CHECKSUM_UNNECESSARY;
1342         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1343         if (adapter->netdev->features & NETIF_F_RXHASH)
1344                 skb->rxhash = rxcp->rss_hash;
1345
1346         if (rxcp->vlanf)
1347                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1348
1349         napi_gro_frags(napi);
1350 }
1351
1352 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1353                                  struct be_rx_compl_info *rxcp)
1354 {
1355         rxcp->pkt_size =
1356                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1357         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1358         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1359         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1360         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1361         rxcp->ip_csum =
1362                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1363         rxcp->l4_csum =
1364                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1365         rxcp->ipv6 =
1366                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1367         rxcp->rxq_idx =
1368                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1369         rxcp->num_rcvd =
1370                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1371         rxcp->pkt_type =
1372                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1373         rxcp->rss_hash =
1374                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1375         if (rxcp->vlanf) {
1376                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1377                                           compl);
1378                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1379                                                compl);
1380         }
1381         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1382 }
1383
1384 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1385                                  struct be_rx_compl_info *rxcp)
1386 {
1387         rxcp->pkt_size =
1388                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1389         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1390         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1391         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1392         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1393         rxcp->ip_csum =
1394                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1395         rxcp->l4_csum =
1396                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1397         rxcp->ipv6 =
1398                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1399         rxcp->rxq_idx =
1400                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1401         rxcp->num_rcvd =
1402                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1403         rxcp->pkt_type =
1404                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1405         rxcp->rss_hash =
1406                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1407         if (rxcp->vlanf) {
1408                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1409                                           compl);
1410                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1411                                                compl);
1412         }
1413         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1414 }
1415
1416 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1417 {
1418         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1419         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1420         struct be_adapter *adapter = rxo->adapter;
1421
1422         /* For checking the valid bit it is Ok to use either definition as the
1423          * valid bit is at the same position in both v0 and v1 Rx compl */
1424         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1425                 return NULL;
1426
1427         rmb();
1428         be_dws_le_to_cpu(compl, sizeof(*compl));
1429
1430         if (adapter->be3_native)
1431                 be_parse_rx_compl_v1(compl, rxcp);
1432         else
1433                 be_parse_rx_compl_v0(compl, rxcp);
1434
1435         if (rxcp->vlanf) {
1436                 /* vlanf could be wrongly set in some cards.
1437                  * ignore if vtm is not set */
1438                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1439                         rxcp->vlanf = 0;
1440
1441                 if (!lancer_chip(adapter))
1442                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1443
1444                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1445                     !adapter->vlan_tag[rxcp->vlan_tag])
1446                         rxcp->vlanf = 0;
1447         }
1448
1449         /* As the compl has been parsed, reset it; we wont touch it again */
1450         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1451
1452         queue_tail_inc(&rxo->cq);
1453         return rxcp;
1454 }
1455
1456 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1457 {
1458         u32 order = get_order(size);
1459
1460         if (order > 0)
1461                 gfp |= __GFP_COMP;
1462         return  alloc_pages(gfp, order);
1463 }
1464
1465 /*
1466  * Allocate a page, split it to fragments of size rx_frag_size and post as
1467  * receive buffers to BE
1468  */
1469 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1470 {
1471         struct be_adapter *adapter = rxo->adapter;
1472         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1473         struct be_queue_info *rxq = &rxo->q;
1474         struct page *pagep = NULL;
1475         struct be_eth_rx_d *rxd;
1476         u64 page_dmaaddr = 0, frag_dmaaddr;
1477         u32 posted, page_offset = 0;
1478
1479         page_info = &rxo->page_info_tbl[rxq->head];
1480         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1481                 if (!pagep) {
1482                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1483                         if (unlikely(!pagep)) {
1484                                 rx_stats(rxo)->rx_post_fail++;
1485                                 break;
1486                         }
1487                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1488                                                     0, adapter->big_page_size,
1489                                                     DMA_FROM_DEVICE);
1490                         page_info->page_offset = 0;
1491                 } else {
1492                         get_page(pagep);
1493                         page_info->page_offset = page_offset + rx_frag_size;
1494                 }
1495                 page_offset = page_info->page_offset;
1496                 page_info->page = pagep;
1497                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1498                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1499
1500                 rxd = queue_head_node(rxq);
1501                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1502                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1503
1504                 /* Any space left in the current big page for another frag? */
1505                 if ((page_offset + rx_frag_size + rx_frag_size) >
1506                                         adapter->big_page_size) {
1507                         pagep = NULL;
1508                         page_info->last_page_user = true;
1509                 }
1510
1511                 prev_page_info = page_info;
1512                 queue_head_inc(rxq);
1513                 page_info = &rxo->page_info_tbl[rxq->head];
1514         }
1515         if (pagep)
1516                 prev_page_info->last_page_user = true;
1517
1518         if (posted) {
1519                 atomic_add(posted, &rxq->used);
1520                 be_rxq_notify(adapter, rxq->id, posted);
1521         } else if (atomic_read(&rxq->used) == 0) {
1522                 /* Let be_worker replenish when memory is available */
1523                 rxo->rx_post_starved = true;
1524         }
1525 }
1526
1527 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1528 {
1529         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1530
1531         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1532                 return NULL;
1533
1534         rmb();
1535         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1536
1537         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1538
1539         queue_tail_inc(tx_cq);
1540         return txcp;
1541 }
1542
1543 static u16 be_tx_compl_process(struct be_adapter *adapter,
1544                 struct be_tx_obj *txo, u16 last_index)
1545 {
1546         struct be_queue_info *txq = &txo->q;
1547         struct be_eth_wrb *wrb;
1548         struct sk_buff **sent_skbs = txo->sent_skb_list;
1549         struct sk_buff *sent_skb;
1550         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1551         bool unmap_skb_hdr = true;
1552
1553         sent_skb = sent_skbs[txq->tail];
1554         BUG_ON(!sent_skb);
1555         sent_skbs[txq->tail] = NULL;
1556
1557         /* skip header wrb */
1558         queue_tail_inc(txq);
1559
1560         do {
1561                 cur_index = txq->tail;
1562                 wrb = queue_tail_node(txq);
1563                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1564                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1565                 unmap_skb_hdr = false;
1566
1567                 num_wrbs++;
1568                 queue_tail_inc(txq);
1569         } while (cur_index != last_index);
1570
1571         kfree_skb(sent_skb);
1572         return num_wrbs;
1573 }
1574
1575 /* Return the number of events in the event queue */
1576 static inline int events_get(struct be_eq_obj *eqo)
1577 {
1578         struct be_eq_entry *eqe;
1579         int num = 0;
1580
1581         do {
1582                 eqe = queue_tail_node(&eqo->q);
1583                 if (eqe->evt == 0)
1584                         break;
1585
1586                 rmb();
1587                 eqe->evt = 0;
1588                 num++;
1589                 queue_tail_inc(&eqo->q);
1590         } while (true);
1591
1592         return num;
1593 }
1594
1595 static int event_handle(struct be_eq_obj *eqo)
1596 {
1597         bool rearm = false;
1598         int num = events_get(eqo);
1599
1600         /* Deal with any spurious interrupts that come without events */
1601         if (!num)
1602                 rearm = true;
1603
1604         if (num || msix_enabled(eqo->adapter))
1605                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1606
1607         if (num)
1608                 napi_schedule(&eqo->napi);
1609
1610         return num;
1611 }
1612
1613 /* Leaves the EQ is disarmed state */
1614 static void be_eq_clean(struct be_eq_obj *eqo)
1615 {
1616         int num = events_get(eqo);
1617
1618         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1619 }
1620
1621 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1622 {
1623         struct be_rx_page_info *page_info;
1624         struct be_queue_info *rxq = &rxo->q;
1625         struct be_queue_info *rx_cq = &rxo->cq;
1626         struct be_rx_compl_info *rxcp;
1627         u16 tail;
1628
1629         /* First cleanup pending rx completions */
1630         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1631                 be_rx_compl_discard(rxo, rxcp);
1632                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1633         }
1634
1635         /* Then free posted rx buffer that were not used */
1636         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1637         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1638                 page_info = get_rx_page_info(rxo, tail);
1639                 put_page(page_info->page);
1640                 memset(page_info, 0, sizeof(*page_info));
1641         }
1642         BUG_ON(atomic_read(&rxq->used));
1643         rxq->tail = rxq->head = 0;
1644 }
1645
1646 static void be_tx_compl_clean(struct be_adapter *adapter)
1647 {
1648         struct be_tx_obj *txo;
1649         struct be_queue_info *txq;
1650         struct be_eth_tx_compl *txcp;
1651         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1652         struct sk_buff *sent_skb;
1653         bool dummy_wrb;
1654         int i, pending_txqs;
1655
1656         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1657         do {
1658                 pending_txqs = adapter->num_tx_qs;
1659
1660                 for_all_tx_queues(adapter, txo, i) {
1661                         txq = &txo->q;
1662                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1663                                 end_idx =
1664                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1665                                                       wrb_index, txcp);
1666                                 num_wrbs += be_tx_compl_process(adapter, txo,
1667                                                                 end_idx);
1668                                 cmpl++;
1669                         }
1670                         if (cmpl) {
1671                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1672                                 atomic_sub(num_wrbs, &txq->used);
1673                                 cmpl = 0;
1674                                 num_wrbs = 0;
1675                         }
1676                         if (atomic_read(&txq->used) == 0)
1677                                 pending_txqs--;
1678                 }
1679
1680                 if (pending_txqs == 0 || ++timeo > 200)
1681                         break;
1682
1683                 mdelay(1);
1684         } while (true);
1685
1686         for_all_tx_queues(adapter, txo, i) {
1687                 txq = &txo->q;
1688                 if (atomic_read(&txq->used))
1689                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1690                                 atomic_read(&txq->used));
1691
1692                 /* free posted tx for which compls will never arrive */
1693                 while (atomic_read(&txq->used)) {
1694                         sent_skb = txo->sent_skb_list[txq->tail];
1695                         end_idx = txq->tail;
1696                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1697                                                    &dummy_wrb);
1698                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1699                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1700                         atomic_sub(num_wrbs, &txq->used);
1701                 }
1702         }
1703 }
1704
1705 static void be_evt_queues_destroy(struct be_adapter *adapter)
1706 {
1707         struct be_eq_obj *eqo;
1708         int i;
1709
1710         for_all_evt_queues(adapter, eqo, i) {
1711                 be_eq_clean(eqo);
1712                 if (eqo->q.created)
1713                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1714                 be_queue_free(adapter, &eqo->q);
1715         }
1716 }
1717
1718 static int be_evt_queues_create(struct be_adapter *adapter)
1719 {
1720         struct be_queue_info *eq;
1721         struct be_eq_obj *eqo;
1722         int i, rc;
1723
1724         adapter->num_evt_qs = num_irqs(adapter);
1725
1726         for_all_evt_queues(adapter, eqo, i) {
1727                 eqo->adapter = adapter;
1728                 eqo->tx_budget = BE_TX_BUDGET;
1729                 eqo->idx = i;
1730                 eqo->max_eqd = BE_MAX_EQD;
1731                 eqo->enable_aic = true;
1732
1733                 eq = &eqo->q;
1734                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1735                                         sizeof(struct be_eq_entry));
1736                 if (rc)
1737                         return rc;
1738
1739                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1740                 if (rc)
1741                         return rc;
1742         }
1743         return 0;
1744 }
1745
1746 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1747 {
1748         struct be_queue_info *q;
1749
1750         q = &adapter->mcc_obj.q;
1751         if (q->created)
1752                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1753         be_queue_free(adapter, q);
1754
1755         q = &adapter->mcc_obj.cq;
1756         if (q->created)
1757                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1758         be_queue_free(adapter, q);
1759 }
1760
1761 /* Must be called only after TX qs are created as MCC shares TX EQ */
1762 static int be_mcc_queues_create(struct be_adapter *adapter)
1763 {
1764         struct be_queue_info *q, *cq;
1765
1766         cq = &adapter->mcc_obj.cq;
1767         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1768                         sizeof(struct be_mcc_compl)))
1769                 goto err;
1770
1771         /* Use the default EQ for MCC completions */
1772         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1773                 goto mcc_cq_free;
1774
1775         q = &adapter->mcc_obj.q;
1776         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1777                 goto mcc_cq_destroy;
1778
1779         if (be_cmd_mccq_create(adapter, q, cq))
1780                 goto mcc_q_free;
1781
1782         return 0;
1783
1784 mcc_q_free:
1785         be_queue_free(adapter, q);
1786 mcc_cq_destroy:
1787         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1788 mcc_cq_free:
1789         be_queue_free(adapter, cq);
1790 err:
1791         return -1;
1792 }
1793
1794 static void be_tx_queues_destroy(struct be_adapter *adapter)
1795 {
1796         struct be_queue_info *q;
1797         struct be_tx_obj *txo;
1798         u8 i;
1799
1800         for_all_tx_queues(adapter, txo, i) {
1801                 q = &txo->q;
1802                 if (q->created)
1803                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1804                 be_queue_free(adapter, q);
1805
1806                 q = &txo->cq;
1807                 if (q->created)
1808                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1809                 be_queue_free(adapter, q);
1810         }
1811 }
1812
1813 static int be_num_txqs_want(struct be_adapter *adapter)
1814 {
1815         if (sriov_want(adapter) || be_is_mc(adapter) ||
1816             lancer_chip(adapter) || !be_physfn(adapter) ||
1817             adapter->generation == BE_GEN2)
1818                 return 1;
1819         else
1820                 return MAX_TX_QS;
1821 }
1822
1823 static int be_tx_cqs_create(struct be_adapter *adapter)
1824 {
1825         struct be_queue_info *cq, *eq;
1826         int status;
1827         struct be_tx_obj *txo;
1828         u8 i;
1829
1830         adapter->num_tx_qs = be_num_txqs_want(adapter);
1831         if (adapter->num_tx_qs != MAX_TX_QS) {
1832                 rtnl_lock();
1833                 netif_set_real_num_tx_queues(adapter->netdev,
1834                         adapter->num_tx_qs);
1835                 rtnl_unlock();
1836         }
1837
1838         for_all_tx_queues(adapter, txo, i) {
1839                 cq = &txo->cq;
1840                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1841                                         sizeof(struct be_eth_tx_compl));
1842                 if (status)
1843                         return status;
1844
1845                 /* If num_evt_qs is less than num_tx_qs, then more than
1846                  * one txq share an eq
1847                  */
1848                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1849                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1850                 if (status)
1851                         return status;
1852         }
1853         return 0;
1854 }
1855
1856 static int be_tx_qs_create(struct be_adapter *adapter)
1857 {
1858         struct be_tx_obj *txo;
1859         int i, status;
1860
1861         for_all_tx_queues(adapter, txo, i) {
1862                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1863                                         sizeof(struct be_eth_wrb));
1864                 if (status)
1865                         return status;
1866
1867                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1868                 if (status)
1869                         return status;
1870         }
1871
1872         return 0;
1873 }
1874
1875 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1876 {
1877         struct be_queue_info *q;
1878         struct be_rx_obj *rxo;
1879         int i;
1880
1881         for_all_rx_queues(adapter, rxo, i) {
1882                 q = &rxo->cq;
1883                 if (q->created)
1884                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1885                 be_queue_free(adapter, q);
1886         }
1887 }
1888
1889 static int be_rx_cqs_create(struct be_adapter *adapter)
1890 {
1891         struct be_queue_info *eq, *cq;
1892         struct be_rx_obj *rxo;
1893         int rc, i;
1894
1895         /* We'll create as many RSS rings as there are irqs.
1896          * But when there's only one irq there's no use creating RSS rings
1897          */
1898         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1899                                 num_irqs(adapter) + 1 : 1;
1900
1901         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1902         for_all_rx_queues(adapter, rxo, i) {
1903                 rxo->adapter = adapter;
1904                 cq = &rxo->cq;
1905                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1906                                 sizeof(struct be_eth_rx_compl));
1907                 if (rc)
1908                         return rc;
1909
1910                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1911                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1912                 if (rc)
1913                         return rc;
1914         }
1915
1916         if (adapter->num_rx_qs != MAX_RX_QS)
1917                 dev_info(&adapter->pdev->dev,
1918                         "Created only %d receive queues", adapter->num_rx_qs);
1919
1920         return 0;
1921 }
1922
1923 static irqreturn_t be_intx(int irq, void *dev)
1924 {
1925         struct be_adapter *adapter = dev;
1926         int num_evts;
1927
1928         /* With INTx only one EQ is used */
1929         num_evts = event_handle(&adapter->eq_obj[0]);
1930         if (num_evts)
1931                 return IRQ_HANDLED;
1932         else
1933                 return IRQ_NONE;
1934 }
1935
1936 static irqreturn_t be_msix(int irq, void *dev)
1937 {
1938         struct be_eq_obj *eqo = dev;
1939
1940         event_handle(eqo);
1941         return IRQ_HANDLED;
1942 }
1943
1944 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1945 {
1946         return (rxcp->tcpf && !rxcp->err) ? true : false;
1947 }
1948
1949 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1950                         int budget)
1951 {
1952         struct be_adapter *adapter = rxo->adapter;
1953         struct be_queue_info *rx_cq = &rxo->cq;
1954         struct be_rx_compl_info *rxcp;
1955         u32 work_done;
1956
1957         for (work_done = 0; work_done < budget; work_done++) {
1958                 rxcp = be_rx_compl_get(rxo);
1959                 if (!rxcp)
1960                         break;
1961
1962                 /* Is it a flush compl that has no data */
1963                 if (unlikely(rxcp->num_rcvd == 0))
1964                         goto loop_continue;
1965
1966                 /* Discard compl with partial DMA Lancer B0 */
1967                 if (unlikely(!rxcp->pkt_size)) {
1968                         be_rx_compl_discard(rxo, rxcp);
1969                         goto loop_continue;
1970                 }
1971
1972                 /* On BE drop pkts that arrive due to imperfect filtering in
1973                  * promiscuous mode on some skews
1974                  */
1975                 if (unlikely(rxcp->port != adapter->port_num &&
1976                                 !lancer_chip(adapter))) {
1977                         be_rx_compl_discard(rxo, rxcp);
1978                         goto loop_continue;
1979                 }
1980
1981                 if (do_gro(rxcp))
1982                         be_rx_compl_process_gro(rxo, napi, rxcp);
1983                 else
1984                         be_rx_compl_process(rxo, rxcp);
1985 loop_continue:
1986                 be_rx_stats_update(rxo, rxcp);
1987         }
1988
1989         if (work_done) {
1990                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1991
1992                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1993                         be_post_rx_frags(rxo, GFP_ATOMIC);
1994         }
1995
1996         return work_done;
1997 }
1998
1999 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2000                           int budget, int idx)
2001 {
2002         struct be_eth_tx_compl *txcp;
2003         int num_wrbs = 0, work_done;
2004
2005         for (work_done = 0; work_done < budget; work_done++) {
2006                 txcp = be_tx_compl_get(&txo->cq);
2007                 if (!txcp)
2008                         break;
2009                 num_wrbs += be_tx_compl_process(adapter, txo,
2010                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2011                                         wrb_index, txcp));
2012         }
2013
2014         if (work_done) {
2015                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2016                 atomic_sub(num_wrbs, &txo->q.used);
2017
2018                 /* As Tx wrbs have been freed up, wake up netdev queue
2019                  * if it was stopped due to lack of tx wrbs.  */
2020                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2021                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2022                         netif_wake_subqueue(adapter->netdev, idx);
2023                 }
2024
2025                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2026                 tx_stats(txo)->tx_compl += work_done;
2027                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2028         }
2029         return (work_done < budget); /* Done */
2030 }
2031
2032 int be_poll(struct napi_struct *napi, int budget)
2033 {
2034         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2035         struct be_adapter *adapter = eqo->adapter;
2036         int max_work = 0, work, i;
2037         bool tx_done;
2038
2039         /* Process all TXQs serviced by this EQ */
2040         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2041                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2042                                         eqo->tx_budget, i);
2043                 if (!tx_done)
2044                         max_work = budget;
2045         }
2046
2047         /* This loop will iterate twice for EQ0 in which
2048          * completions of the last RXQ (default one) are also processed
2049          * For other EQs the loop iterates only once
2050          */
2051         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2052                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2053                 max_work = max(work, max_work);
2054         }
2055
2056         if (is_mcc_eqo(eqo))
2057                 be_process_mcc(adapter);
2058
2059         if (max_work < budget) {
2060                 napi_complete(napi);
2061                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2062         } else {
2063                 /* As we'll continue in polling mode, count and clear events */
2064                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2065         }
2066         return max_work;
2067 }
2068
2069 void be_detect_dump_ue(struct be_adapter *adapter)
2070 {
2071         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2072         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2073         u32 i;
2074
2075         if (adapter->eeh_err || adapter->ue_detected)
2076                 return;
2077
2078         if (lancer_chip(adapter)) {
2079                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2080                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2081                         sliport_err1 = ioread32(adapter->db +
2082                                         SLIPORT_ERROR1_OFFSET);
2083                         sliport_err2 = ioread32(adapter->db +
2084                                         SLIPORT_ERROR2_OFFSET);
2085                 }
2086         } else {
2087                 pci_read_config_dword(adapter->pdev,
2088                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2089                 pci_read_config_dword(adapter->pdev,
2090                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2091                 pci_read_config_dword(adapter->pdev,
2092                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2093                 pci_read_config_dword(adapter->pdev,
2094                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2095
2096                 ue_lo = (ue_lo & (~ue_lo_mask));
2097                 ue_hi = (ue_hi & (~ue_hi_mask));
2098         }
2099
2100         if (ue_lo || ue_hi ||
2101                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2102                 adapter->ue_detected = true;
2103                 adapter->eeh_err = true;
2104                 dev_err(&adapter->pdev->dev,
2105                         "Unrecoverable error in the card\n");
2106         }
2107
2108         if (ue_lo) {
2109                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2110                         if (ue_lo & 1)
2111                                 dev_err(&adapter->pdev->dev,
2112                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2113                 }
2114         }
2115         if (ue_hi) {
2116                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2117                         if (ue_hi & 1)
2118                                 dev_err(&adapter->pdev->dev,
2119                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2120                 }
2121         }
2122
2123         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2124                 dev_err(&adapter->pdev->dev,
2125                         "sliport status 0x%x\n", sliport_status);
2126                 dev_err(&adapter->pdev->dev,
2127                         "sliport error1 0x%x\n", sliport_err1);
2128                 dev_err(&adapter->pdev->dev,
2129                         "sliport error2 0x%x\n", sliport_err2);
2130         }
2131 }
2132
2133 static void be_msix_disable(struct be_adapter *adapter)
2134 {
2135         if (msix_enabled(adapter)) {
2136                 pci_disable_msix(adapter->pdev);
2137                 adapter->num_msix_vec = 0;
2138         }
2139 }
2140
2141 static uint be_num_rss_want(struct be_adapter *adapter)
2142 {
2143         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2144              !sriov_want(adapter) && be_physfn(adapter) &&
2145              !be_is_mc(adapter))
2146                 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2147         else
2148                 return 0;
2149 }
2150
2151 static void be_msix_enable(struct be_adapter *adapter)
2152 {
2153 #define BE_MIN_MSIX_VECTORS             1
2154         int i, status, num_vec, num_roce_vec = 0;
2155
2156         /* If RSS queues are not used, need a vec for default RX Q */
2157         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2158         if (be_roce_supported(adapter)) {
2159                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2160                                         (num_online_cpus() + 1));
2161                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2162                 num_vec += num_roce_vec;
2163                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2164         }
2165         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2166
2167         for (i = 0; i < num_vec; i++)
2168                 adapter->msix_entries[i].entry = i;
2169
2170         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2171         if (status == 0) {
2172                 goto done;
2173         } else if (status >= BE_MIN_MSIX_VECTORS) {
2174                 num_vec = status;
2175                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2176                                 num_vec) == 0)
2177                         goto done;
2178         }
2179         return;
2180 done:
2181         if (be_roce_supported(adapter)) {
2182                 if (num_vec > num_roce_vec) {
2183                         adapter->num_msix_vec = num_vec - num_roce_vec;
2184                         adapter->num_msix_roce_vec =
2185                                 num_vec - adapter->num_msix_vec;
2186                 } else {
2187                         adapter->num_msix_vec = num_vec;
2188                         adapter->num_msix_roce_vec = 0;
2189                 }
2190         } else
2191                 adapter->num_msix_vec = num_vec;
2192         return;
2193 }
2194
2195 static inline int be_msix_vec_get(struct be_adapter *adapter,
2196                                 struct be_eq_obj *eqo)
2197 {
2198         return adapter->msix_entries[eqo->idx].vector;
2199 }
2200
2201 static int be_msix_register(struct be_adapter *adapter)
2202 {
2203         struct net_device *netdev = adapter->netdev;
2204         struct be_eq_obj *eqo;
2205         int status, i, vec;
2206
2207         for_all_evt_queues(adapter, eqo, i) {
2208                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2209                 vec = be_msix_vec_get(adapter, eqo);
2210                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2211                 if (status)
2212                         goto err_msix;
2213         }
2214
2215         return 0;
2216 err_msix:
2217         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2218                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2219         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2220                 status);
2221         be_msix_disable(adapter);
2222         return status;
2223 }
2224
2225 static int be_irq_register(struct be_adapter *adapter)
2226 {
2227         struct net_device *netdev = adapter->netdev;
2228         int status;
2229
2230         if (msix_enabled(adapter)) {
2231                 status = be_msix_register(adapter);
2232                 if (status == 0)
2233                         goto done;
2234                 /* INTx is not supported for VF */
2235                 if (!be_physfn(adapter))
2236                         return status;
2237         }
2238
2239         /* INTx */
2240         netdev->irq = adapter->pdev->irq;
2241         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2242                         adapter);
2243         if (status) {
2244                 dev_err(&adapter->pdev->dev,
2245                         "INTx request IRQ failed - err %d\n", status);
2246                 return status;
2247         }
2248 done:
2249         adapter->isr_registered = true;
2250         return 0;
2251 }
2252
2253 static void be_irq_unregister(struct be_adapter *adapter)
2254 {
2255         struct net_device *netdev = adapter->netdev;
2256         struct be_eq_obj *eqo;
2257         int i;
2258
2259         if (!adapter->isr_registered)
2260                 return;
2261
2262         /* INTx */
2263         if (!msix_enabled(adapter)) {
2264                 free_irq(netdev->irq, adapter);
2265                 goto done;
2266         }
2267
2268         /* MSIx */
2269         for_all_evt_queues(adapter, eqo, i)
2270                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2271
2272 done:
2273         adapter->isr_registered = false;
2274 }
2275
2276 static void be_rx_qs_destroy(struct be_adapter *adapter)
2277 {
2278         struct be_queue_info *q;
2279         struct be_rx_obj *rxo;
2280         int i;
2281
2282         for_all_rx_queues(adapter, rxo, i) {
2283                 q = &rxo->q;
2284                 if (q->created) {
2285                         be_cmd_rxq_destroy(adapter, q);
2286                         /* After the rxq is invalidated, wait for a grace time
2287                          * of 1ms for all dma to end and the flush compl to
2288                          * arrive
2289                          */
2290                         mdelay(1);
2291                         be_rx_cq_clean(rxo);
2292                 }
2293                 be_queue_free(adapter, q);
2294         }
2295 }
2296
2297 static int be_close(struct net_device *netdev)
2298 {
2299         struct be_adapter *adapter = netdev_priv(netdev);
2300         struct be_eq_obj *eqo;
2301         int i;
2302
2303         be_roce_dev_close(adapter);
2304
2305         be_async_mcc_disable(adapter);
2306
2307         if (!lancer_chip(adapter))
2308                 be_intr_set(adapter, false);
2309
2310         for_all_evt_queues(adapter, eqo, i) {
2311                 napi_disable(&eqo->napi);
2312                 if (msix_enabled(adapter))
2313                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2314                 else
2315                         synchronize_irq(netdev->irq);
2316                 be_eq_clean(eqo);
2317         }
2318
2319         be_irq_unregister(adapter);
2320
2321         /* Wait for all pending tx completions to arrive so that
2322          * all tx skbs are freed.
2323          */
2324         be_tx_compl_clean(adapter);
2325
2326         be_rx_qs_destroy(adapter);
2327         return 0;
2328 }
2329
2330 static int be_rx_qs_create(struct be_adapter *adapter)
2331 {
2332         struct be_rx_obj *rxo;
2333         int rc, i, j;
2334         u8 rsstable[128];
2335
2336         for_all_rx_queues(adapter, rxo, i) {
2337                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2338                                     sizeof(struct be_eth_rx_d));
2339                 if (rc)
2340                         return rc;
2341         }
2342
2343         /* The FW would like the default RXQ to be created first */
2344         rxo = default_rxo(adapter);
2345         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2346                                adapter->if_handle, false, &rxo->rss_id);
2347         if (rc)
2348                 return rc;
2349
2350         for_all_rss_queues(adapter, rxo, i) {
2351                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2352                                        rx_frag_size, adapter->if_handle,
2353                                        true, &rxo->rss_id);
2354                 if (rc)
2355                         return rc;
2356         }
2357
2358         if (be_multi_rxq(adapter)) {
2359                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2360                         for_all_rss_queues(adapter, rxo, i) {
2361                                 if ((j + i) >= 128)
2362                                         break;
2363                                 rsstable[j + i] = rxo->rss_id;
2364                         }
2365                 }
2366                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2367                 if (rc)
2368                         return rc;
2369         }
2370
2371         /* First time posting */
2372         for_all_rx_queues(adapter, rxo, i)
2373                 be_post_rx_frags(rxo, GFP_KERNEL);
2374         return 0;
2375 }
2376
2377 static int be_open(struct net_device *netdev)
2378 {
2379         struct be_adapter *adapter = netdev_priv(netdev);
2380         struct be_eq_obj *eqo;
2381         struct be_rx_obj *rxo;
2382         struct be_tx_obj *txo;
2383         u8 link_status;
2384         int status, i;
2385
2386         status = be_rx_qs_create(adapter);
2387         if (status)
2388                 goto err;
2389
2390         be_irq_register(adapter);
2391
2392         if (!lancer_chip(adapter))
2393                 be_intr_set(adapter, true);
2394
2395         for_all_rx_queues(adapter, rxo, i)
2396                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2397
2398         for_all_tx_queues(adapter, txo, i)
2399                 be_cq_notify(adapter, txo->cq.id, true, 0);
2400
2401         be_async_mcc_enable(adapter);
2402
2403         for_all_evt_queues(adapter, eqo, i) {
2404                 napi_enable(&eqo->napi);
2405                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2406         }
2407
2408         status = be_cmd_link_status_query(adapter, NULL, NULL,
2409                                           &link_status, 0);
2410         if (!status)
2411                 be_link_status_update(adapter, link_status);
2412
2413         be_roce_dev_open(adapter);
2414         return 0;
2415 err:
2416         be_close(adapter->netdev);
2417         return -EIO;
2418 }
2419
2420 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2421 {
2422         struct be_dma_mem cmd;
2423         int status = 0;
2424         u8 mac[ETH_ALEN];
2425
2426         memset(mac, 0, ETH_ALEN);
2427
2428         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2429         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2430                                     GFP_KERNEL);
2431         if (cmd.va == NULL)
2432                 return -1;
2433         memset(cmd.va, 0, cmd.size);
2434
2435         if (enable) {
2436                 status = pci_write_config_dword(adapter->pdev,
2437                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2438                 if (status) {
2439                         dev_err(&adapter->pdev->dev,
2440                                 "Could not enable Wake-on-lan\n");
2441                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2442                                           cmd.dma);
2443                         return status;
2444                 }
2445                 status = be_cmd_enable_magic_wol(adapter,
2446                                 adapter->netdev->dev_addr, &cmd);
2447                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2448                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2449         } else {
2450                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2451                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2452                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2453         }
2454
2455         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2456         return status;
2457 }
2458
2459 /*
2460  * Generate a seed MAC address from the PF MAC Address using jhash.
2461  * MAC Address for VFs are assigned incrementally starting from the seed.
2462  * These addresses are programmed in the ASIC by the PF and the VF driver
2463  * queries for the MAC address during its probe.
2464  */
2465 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2466 {
2467         u32 vf;
2468         int status = 0;
2469         u8 mac[ETH_ALEN];
2470         struct be_vf_cfg *vf_cfg;
2471
2472         be_vf_eth_addr_generate(adapter, mac);
2473
2474         for_all_vfs(adapter, vf_cfg, vf) {
2475                 if (lancer_chip(adapter)) {
2476                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2477                 } else {
2478                         status = be_cmd_pmac_add(adapter, mac,
2479                                                  vf_cfg->if_handle,
2480                                                  &vf_cfg->pmac_id, vf + 1);
2481                 }
2482
2483                 if (status)
2484                         dev_err(&adapter->pdev->dev,
2485                         "Mac address assignment failed for VF %d\n", vf);
2486                 else
2487                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2488
2489                 mac[5] += 1;
2490         }
2491         return status;
2492 }
2493
2494 static void be_vf_clear(struct be_adapter *adapter)
2495 {
2496         struct be_vf_cfg *vf_cfg;
2497         u32 vf;
2498
2499         if (be_find_vfs(adapter, ASSIGNED)) {
2500                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2501                 goto done;
2502         }
2503
2504         for_all_vfs(adapter, vf_cfg, vf) {
2505                 if (lancer_chip(adapter))
2506                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2507                 else
2508                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2509                                         vf_cfg->pmac_id, vf + 1);
2510
2511                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2512         }
2513         pci_disable_sriov(adapter->pdev);
2514 done:
2515         kfree(adapter->vf_cfg);
2516         adapter->num_vfs = 0;
2517 }
2518
2519 static int be_clear(struct be_adapter *adapter)
2520 {
2521         int i = 1;
2522
2523         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2524                 cancel_delayed_work_sync(&adapter->work);
2525                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2526         }
2527
2528         if (sriov_enabled(adapter))
2529                 be_vf_clear(adapter);
2530
2531         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2532                 be_cmd_pmac_del(adapter, adapter->if_handle,
2533                         adapter->pmac_id[i], 0);
2534
2535         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2536
2537         be_mcc_queues_destroy(adapter);
2538         be_rx_cqs_destroy(adapter);
2539         be_tx_queues_destroy(adapter);
2540         be_evt_queues_destroy(adapter);
2541
2542         /* tell fw we're done with firing cmds */
2543         be_cmd_fw_clean(adapter);
2544
2545         be_msix_disable(adapter);
2546         pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
2547         return 0;
2548 }
2549
2550 static int be_vf_setup_init(struct be_adapter *adapter)
2551 {
2552         struct be_vf_cfg *vf_cfg;
2553         int vf;
2554
2555         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2556                                   GFP_KERNEL);
2557         if (!adapter->vf_cfg)
2558                 return -ENOMEM;
2559
2560         for_all_vfs(adapter, vf_cfg, vf) {
2561                 vf_cfg->if_handle = -1;
2562                 vf_cfg->pmac_id = -1;
2563         }
2564         return 0;
2565 }
2566
2567 static int be_vf_setup(struct be_adapter *adapter)
2568 {
2569         struct be_vf_cfg *vf_cfg;
2570         struct device *dev = &adapter->pdev->dev;
2571         u32 cap_flags, en_flags, vf;
2572         u16 def_vlan, lnk_speed;
2573         int status, enabled_vfs;
2574
2575         enabled_vfs = be_find_vfs(adapter, ENABLED);
2576         if (enabled_vfs) {
2577                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2578                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2579                 return 0;
2580         }
2581
2582         if (num_vfs > adapter->dev_num_vfs) {
2583                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2584                          adapter->dev_num_vfs, num_vfs);
2585                 num_vfs = adapter->dev_num_vfs;
2586         }
2587
2588         status = pci_enable_sriov(adapter->pdev, num_vfs);
2589         if (!status) {
2590                 adapter->num_vfs = num_vfs;
2591         } else {
2592                 /* Platform doesn't support SRIOV though device supports it */
2593                 dev_warn(dev, "SRIOV enable failed\n");
2594                 return 0;
2595         }
2596
2597         status = be_vf_setup_init(adapter);
2598         if (status)
2599                 goto err;
2600
2601         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2602                                 BE_IF_FLAGS_MULTICAST;
2603         for_all_vfs(adapter, vf_cfg, vf) {
2604                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2605                                           &vf_cfg->if_handle, NULL, vf + 1);
2606                 if (status)
2607                         goto err;
2608         }
2609
2610         if (!enabled_vfs) {
2611                 status = be_vf_eth_addr_config(adapter);
2612                 if (status)
2613                         goto err;
2614         }
2615
2616         for_all_vfs(adapter, vf_cfg, vf) {
2617                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2618                                                   NULL, vf + 1);
2619                 if (status)
2620                         goto err;
2621                 vf_cfg->tx_rate = lnk_speed * 10;
2622
2623                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2624                                 vf + 1, vf_cfg->if_handle);
2625                 if (status)
2626                         goto err;
2627                 vf_cfg->def_vid = def_vlan;
2628         }
2629         return 0;
2630 err:
2631         return status;
2632 }
2633
2634 static void be_setup_init(struct be_adapter *adapter)
2635 {
2636         adapter->vlan_prio_bmap = 0xff;
2637         adapter->phy.link_speed = -1;
2638         adapter->if_handle = -1;
2639         adapter->be3_native = false;
2640         adapter->promiscuous = false;
2641         adapter->eq_next_idx = 0;
2642         adapter->phy.forced_port_speed = -1;
2643 }
2644
2645 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2646 {
2647         u32 pmac_id;
2648         int status;
2649         bool pmac_id_active;
2650
2651         status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2652                                                         &pmac_id, mac);
2653         if (status != 0)
2654                 goto do_none;
2655
2656         if (pmac_id_active) {
2657                 status = be_cmd_mac_addr_query(adapter, mac,
2658                                 MAC_ADDRESS_TYPE_NETWORK,
2659                                 false, adapter->if_handle, pmac_id);
2660
2661                 if (!status)
2662                         adapter->pmac_id[0] = pmac_id;
2663         } else {
2664                 status = be_cmd_pmac_add(adapter, mac,
2665                                 adapter->if_handle, &adapter->pmac_id[0], 0);
2666         }
2667 do_none:
2668         return status;
2669 }
2670
2671 /* Routine to query per function resource limits */
2672 static int be_get_config(struct be_adapter *adapter)
2673 {
2674         int pos;
2675         u16 dev_num_vfs;
2676
2677         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2678         if (pos) {
2679                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2680                                      &dev_num_vfs);
2681                 adapter->dev_num_vfs = dev_num_vfs;
2682         }
2683         return 0;
2684 }
2685
2686 static int be_setup(struct be_adapter *adapter)
2687 {
2688         struct net_device *netdev = adapter->netdev;
2689         struct device *dev = &adapter->pdev->dev;
2690         u32 cap_flags, en_flags;
2691         u32 tx_fc, rx_fc;
2692         int status;
2693         u8 mac[ETH_ALEN];
2694
2695         be_setup_init(adapter);
2696
2697         be_get_config(adapter);
2698
2699         be_cmd_req_native_mode(adapter);
2700
2701         be_msix_enable(adapter);
2702
2703         status = be_evt_queues_create(adapter);
2704         if (status)
2705                 goto err;
2706
2707         status = be_tx_cqs_create(adapter);
2708         if (status)
2709                 goto err;
2710
2711         status = be_rx_cqs_create(adapter);
2712         if (status)
2713                 goto err;
2714
2715         status = be_mcc_queues_create(adapter);
2716         if (status)
2717                 goto err;
2718
2719         memset(mac, 0, ETH_ALEN);
2720         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2721                         true /*permanent */, 0, 0);
2722         if (status)
2723                 return status;
2724         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2725         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2726
2727         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2728                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2729         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2730                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2731
2732         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2733                 cap_flags |= BE_IF_FLAGS_RSS;
2734                 en_flags |= BE_IF_FLAGS_RSS;
2735         }
2736         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2737                         netdev->dev_addr, &adapter->if_handle,
2738                         &adapter->pmac_id[0], 0);
2739         if (status != 0)
2740                 goto err;
2741
2742          /* The VF's permanent mac queried from card is incorrect.
2743           * For BEx: Query the mac configued by the PF using if_handle
2744           * For Lancer: Get and use mac_list to obtain mac address.
2745           */
2746         if (!be_physfn(adapter)) {
2747                 if (lancer_chip(adapter))
2748                         status = be_add_mac_from_list(adapter, mac);
2749                 else
2750                         status = be_cmd_mac_addr_query(adapter, mac,
2751                                         MAC_ADDRESS_TYPE_NETWORK, false,
2752                                         adapter->if_handle, 0);
2753                 if (!status) {
2754                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2755                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2756                 }
2757         }
2758
2759         status = be_tx_qs_create(adapter);
2760         if (status)
2761                 goto err;
2762
2763         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2764
2765         be_vid_config(adapter, false, 0);
2766
2767         be_set_rx_mode(adapter->netdev);
2768
2769         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2770
2771         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2772                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2773                                         adapter->rx_fc);
2774
2775         pcie_set_readrq(adapter->pdev, 4096);
2776
2777         if (be_physfn(adapter) && num_vfs) {
2778                 if (adapter->dev_num_vfs)
2779                         be_vf_setup(adapter);
2780                 else
2781                         dev_warn(dev, "device doesn't support SRIOV\n");
2782         }
2783
2784         be_cmd_get_phy_info(adapter);
2785         if (be_pause_supported(adapter))
2786                 adapter->phy.fc_autoneg = 1;
2787
2788         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2789         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2790
2791         pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
2792         return 0;
2793 err:
2794         be_clear(adapter);
2795         return status;
2796 }
2797
2798 #ifdef CONFIG_NET_POLL_CONTROLLER
2799 static void be_netpoll(struct net_device *netdev)
2800 {
2801         struct be_adapter *adapter = netdev_priv(netdev);
2802         struct be_eq_obj *eqo;
2803         int i;
2804
2805         for_all_evt_queues(adapter, eqo, i)
2806                 event_handle(eqo);
2807
2808         return;
2809 }
2810 #endif
2811
2812 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2813 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2814
2815 static bool be_flash_redboot(struct be_adapter *adapter,
2816                         const u8 *p, u32 img_start, int image_size,
2817                         int hdr_size)
2818 {
2819         u32 crc_offset;
2820         u8 flashed_crc[4];
2821         int status;
2822
2823         crc_offset = hdr_size + img_start + image_size - 4;
2824
2825         p += crc_offset;
2826
2827         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2828                         (image_size - 4));
2829         if (status) {
2830                 dev_err(&adapter->pdev->dev,
2831                 "could not get crc from flash, not flashing redboot\n");
2832                 return false;
2833         }
2834
2835         /*update redboot only if crc does not match*/
2836         if (!memcmp(flashed_crc, p, 4))
2837                 return false;
2838         else
2839                 return true;
2840 }
2841
2842 static bool phy_flashing_required(struct be_adapter *adapter)
2843 {
2844         return (adapter->phy.phy_type == TN_8022 &&
2845                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2846 }
2847
2848 static bool is_comp_in_ufi(struct be_adapter *adapter,
2849                            struct flash_section_info *fsec, int type)
2850 {
2851         int i = 0, img_type = 0;
2852         struct flash_section_info_g2 *fsec_g2 = NULL;
2853
2854         if (adapter->generation != BE_GEN3)
2855                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2856
2857         for (i = 0; i < MAX_FLASH_COMP; i++) {
2858                 if (fsec_g2)
2859                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2860                 else
2861                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2862
2863                 if (img_type == type)
2864                         return true;
2865         }
2866         return false;
2867
2868 }
2869
2870 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2871                                          int header_size,
2872                                          const struct firmware *fw)
2873 {
2874         struct flash_section_info *fsec = NULL;
2875         const u8 *p = fw->data;
2876
2877         p += header_size;
2878         while (p < (fw->data + fw->size)) {
2879                 fsec = (struct flash_section_info *)p;
2880                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2881                         return fsec;
2882                 p += 32;
2883         }
2884         return NULL;
2885 }
2886
2887 static int be_flash_data(struct be_adapter *adapter,
2888                          const struct firmware *fw,
2889                          struct be_dma_mem *flash_cmd,
2890                          int num_of_images)
2891
2892 {
2893         int status = 0, i, filehdr_size = 0;
2894         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2895         u32 total_bytes = 0, flash_op;
2896         int num_bytes;
2897         const u8 *p = fw->data;
2898         struct be_cmd_write_flashrom *req = flash_cmd->va;
2899         const struct flash_comp *pflashcomp;
2900         int num_comp, hdr_size;
2901         struct flash_section_info *fsec = NULL;
2902
2903         struct flash_comp gen3_flash_types[] = {
2904                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2905                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2906                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2907                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2908                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2909                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2910                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2911                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2912                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2913                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2914                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2915                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2916                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2917                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2918                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2919                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2920                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2921                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2922                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2923                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2924         };
2925
2926         struct flash_comp gen2_flash_types[] = {
2927                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2928                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2929                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2930                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2931                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2932                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2933                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2934                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2935                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2936                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2937                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2938                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2939                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2940                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2941                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2942                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2943         };
2944
2945         if (adapter->generation == BE_GEN3) {
2946                 pflashcomp = gen3_flash_types;
2947                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2948                 num_comp = ARRAY_SIZE(gen3_flash_types);
2949         } else {
2950                 pflashcomp = gen2_flash_types;
2951                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2952                 num_comp = ARRAY_SIZE(gen2_flash_types);
2953         }
2954         /* Get flash section info*/
2955         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2956         if (!fsec) {
2957                 dev_err(&adapter->pdev->dev,
2958                         "Invalid Cookie. UFI corrupted ?\n");
2959                 return -1;
2960         }
2961         for (i = 0; i < num_comp; i++) {
2962                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2963                         continue;
2964
2965                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2966                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2967                         continue;
2968
2969                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
2970                         if (!phy_flashing_required(adapter))
2971                                 continue;
2972                 }
2973
2974                 hdr_size = filehdr_size +
2975                            (num_of_images * sizeof(struct image_hdr));
2976
2977                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2978                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2979                                        pflashcomp[i].size, hdr_size)))
2980                         continue;
2981
2982                 /* Flash the component */
2983                 p = fw->data;
2984                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
2985                 if (p + pflashcomp[i].size > fw->data + fw->size)
2986                         return -1;
2987                 total_bytes = pflashcomp[i].size;
2988                 while (total_bytes) {
2989                         if (total_bytes > 32*1024)
2990                                 num_bytes = 32*1024;
2991                         else
2992                                 num_bytes = total_bytes;
2993                         total_bytes -= num_bytes;
2994                         if (!total_bytes) {
2995                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2996                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2997                                 else
2998                                         flash_op = FLASHROM_OPER_FLASH;
2999                         } else {
3000                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3001                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3002                                 else
3003                                         flash_op = FLASHROM_OPER_SAVE;
3004                         }
3005                         memcpy(req->params.data_buf, p, num_bytes);
3006                         p += num_bytes;
3007                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3008                                 pflashcomp[i].optype, flash_op, num_bytes);
3009                         if (status) {
3010                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3011                                         (pflashcomp[i].optype ==
3012                                                 OPTYPE_PHY_FW))
3013                                         break;
3014                                 dev_err(&adapter->pdev->dev,
3015                                         "cmd to write to flash rom failed.\n");
3016                                 return -1;
3017                         }
3018                 }
3019         }
3020         return 0;
3021 }
3022
3023 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3024 {
3025         if (fhdr == NULL)
3026                 return 0;
3027         if (fhdr->build[0] == '3')
3028                 return BE_GEN3;
3029         else if (fhdr->build[0] == '2')
3030                 return BE_GEN2;
3031         else
3032                 return 0;
3033 }
3034
3035 static int lancer_fw_download(struct be_adapter *adapter,
3036                                 const struct firmware *fw)
3037 {
3038 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3039 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3040         struct be_dma_mem flash_cmd;
3041         const u8 *data_ptr = NULL;
3042         u8 *dest_image_ptr = NULL;
3043         size_t image_size = 0;
3044         u32 chunk_size = 0;
3045         u32 data_written = 0;
3046         u32 offset = 0;
3047         int status = 0;
3048         u8 add_status = 0;
3049
3050         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3051                 dev_err(&adapter->pdev->dev,
3052                         "FW Image not properly aligned. "
3053                         "Length must be 4 byte aligned.\n");
3054                 status = -EINVAL;
3055                 goto lancer_fw_exit;
3056         }
3057
3058         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3059                                 + LANCER_FW_DOWNLOAD_CHUNK;
3060         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3061                                                 &flash_cmd.dma, GFP_KERNEL);
3062         if (!flash_cmd.va) {
3063                 status = -ENOMEM;
3064                 dev_err(&adapter->pdev->dev,
3065                         "Memory allocation failure while flashing\n");
3066                 goto lancer_fw_exit;
3067         }
3068
3069         dest_image_ptr = flash_cmd.va +
3070                                 sizeof(struct lancer_cmd_req_write_object);
3071         image_size = fw->size;
3072         data_ptr = fw->data;
3073
3074         while (image_size) {
3075                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3076
3077                 /* Copy the image chunk content. */
3078                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3079
3080                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3081                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3082                                 &data_written, &add_status);
3083
3084                 if (status)
3085                         break;
3086
3087                 offset += data_written;
3088                 data_ptr += data_written;
3089                 image_size -= data_written;
3090         }
3091
3092         if (!status) {
3093                 /* Commit the FW written */
3094                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3095                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3096                                         &data_written, &add_status);
3097         }
3098
3099         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3100                                 flash_cmd.dma);
3101         if (status) {
3102                 dev_err(&adapter->pdev->dev,
3103                         "Firmware load error. "
3104                         "Status code: 0x%x Additional Status: 0x%x\n",
3105                         status, add_status);
3106                 goto lancer_fw_exit;
3107         }
3108
3109         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3110 lancer_fw_exit:
3111         return status;
3112 }
3113
3114 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3115 {
3116         struct flash_file_hdr_g2 *fhdr;
3117         struct flash_file_hdr_g3 *fhdr3;
3118         struct image_hdr *img_hdr_ptr = NULL;
3119         struct be_dma_mem flash_cmd;
3120         const u8 *p;
3121         int status = 0, i = 0, num_imgs = 0;
3122
3123         p = fw->data;
3124         fhdr = (struct flash_file_hdr_g2 *) p;
3125
3126         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3127         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3128                                           &flash_cmd.dma, GFP_KERNEL);
3129         if (!flash_cmd.va) {
3130                 status = -ENOMEM;
3131                 dev_err(&adapter->pdev->dev,
3132                         "Memory allocation failure while flashing\n");
3133                 goto be_fw_exit;
3134         }
3135
3136         if ((adapter->generation == BE_GEN3) &&
3137                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3138                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3139                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3140                 for (i = 0; i < num_imgs; i++) {
3141                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3142                                         (sizeof(struct flash_file_hdr_g3) +
3143                                          i * sizeof(struct image_hdr)));
3144                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3145                                 status = be_flash_data(adapter, fw, &flash_cmd,
3146                                                         num_imgs);
3147                 }
3148         } else if ((adapter->generation == BE_GEN2) &&
3149                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3150                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3151         } else {
3152                 dev_err(&adapter->pdev->dev,
3153                         "UFI and Interface are not compatible for flashing\n");
3154                 status = -1;
3155         }
3156
3157         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3158                           flash_cmd.dma);
3159         if (status) {
3160                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3161                 goto be_fw_exit;
3162         }
3163
3164         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3165
3166 be_fw_exit:
3167         return status;
3168 }
3169
3170 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3171 {
3172         const struct firmware *fw;
3173         int status;
3174
3175         if (!netif_running(adapter->netdev)) {
3176                 dev_err(&adapter->pdev->dev,
3177                         "Firmware load not allowed (interface is down)\n");
3178                 return -1;
3179         }
3180
3181         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3182         if (status)
3183                 goto fw_exit;
3184
3185         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3186
3187         if (lancer_chip(adapter))
3188                 status = lancer_fw_download(adapter, fw);
3189         else
3190                 status = be_fw_download(adapter, fw);
3191
3192 fw_exit:
3193         release_firmware(fw);
3194         return status;
3195 }
3196
3197 static const struct net_device_ops be_netdev_ops = {
3198         .ndo_open               = be_open,
3199         .ndo_stop               = be_close,
3200         .ndo_start_xmit         = be_xmit,
3201         .ndo_set_rx_mode        = be_set_rx_mode,
3202         .ndo_set_mac_address    = be_mac_addr_set,
3203         .ndo_change_mtu         = be_change_mtu,
3204         .ndo_get_stats64        = be_get_stats64,
3205         .ndo_validate_addr      = eth_validate_addr,
3206         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3207         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3208         .ndo_set_vf_mac         = be_set_vf_mac,
3209         .ndo_set_vf_vlan        = be_set_vf_vlan,
3210         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3211         .ndo_get_vf_config      = be_get_vf_config,
3212 #ifdef CONFIG_NET_POLL_CONTROLLER
3213         .ndo_poll_controller    = be_netpoll,
3214 #endif
3215 };
3216
3217 static void be_netdev_init(struct net_device *netdev)
3218 {
3219         struct be_adapter *adapter = netdev_priv(netdev);
3220         struct be_eq_obj *eqo;
3221         int i;
3222
3223         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3224                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3225                 NETIF_F_HW_VLAN_TX;
3226         if (be_multi_rxq(adapter))
3227                 netdev->hw_features |= NETIF_F_RXHASH;
3228
3229         netdev->features |= netdev->hw_features |
3230                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3231
3232         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3233                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3234
3235         netdev->priv_flags |= IFF_UNICAST_FLT;
3236
3237         netdev->flags |= IFF_MULTICAST;
3238
3239         netif_set_gso_max_size(netdev, 65535);
3240
3241         netdev->netdev_ops = &be_netdev_ops;
3242
3243         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3244
3245         for_all_evt_queues(adapter, eqo, i)
3246                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3247 }
3248
3249 static void be_unmap_pci_bars(struct be_adapter *adapter)
3250 {
3251         if (adapter->csr)
3252                 iounmap(adapter->csr);
3253         if (adapter->db)
3254                 iounmap(adapter->db);
3255         if (adapter->roce_db.base)
3256                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3257 }
3258
3259 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3260 {
3261         struct pci_dev *pdev = adapter->pdev;
3262         u8 __iomem *addr;
3263
3264         addr = pci_iomap(pdev, 2, 0);
3265         if (addr == NULL)
3266                 return -ENOMEM;
3267
3268         adapter->roce_db.base = addr;
3269         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3270         adapter->roce_db.size = 8192;
3271         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3272         return 0;
3273 }
3274
3275 static int be_map_pci_bars(struct be_adapter *adapter)
3276 {
3277         u8 __iomem *addr;
3278         int db_reg;
3279
3280         if (lancer_chip(adapter)) {
3281                 if (be_type_2_3(adapter)) {
3282                         addr = ioremap_nocache(
3283                                         pci_resource_start(adapter->pdev, 0),
3284                                         pci_resource_len(adapter->pdev, 0));
3285                         if (addr == NULL)
3286                                 return -ENOMEM;
3287                         adapter->db = addr;
3288                 }
3289                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3290                         if (lancer_roce_map_pci_bars(adapter))
3291                                 goto pci_map_err;
3292                 }
3293                 return 0;
3294         }
3295
3296         if (be_physfn(adapter)) {
3297                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3298                                 pci_resource_len(adapter->pdev, 2));
3299                 if (addr == NULL)
3300                         return -ENOMEM;
3301                 adapter->csr = addr;
3302         }
3303
3304         if (adapter->generation == BE_GEN2) {
3305                 db_reg = 4;
3306         } else {
3307                 if (be_physfn(adapter))
3308                         db_reg = 4;
3309                 else
3310                         db_reg = 0;
3311         }
3312         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3313                                 pci_resource_len(adapter->pdev, db_reg));
3314         if (addr == NULL)
3315                 goto pci_map_err;
3316         adapter->db = addr;
3317         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3318                 adapter->roce_db.size = 4096;
3319                 adapter->roce_db.io_addr =
3320                                 pci_resource_start(adapter->pdev, db_reg);
3321                 adapter->roce_db.total_size =
3322                                 pci_resource_len(adapter->pdev, db_reg);
3323         }
3324         return 0;
3325 pci_map_err:
3326         be_unmap_pci_bars(adapter);
3327         return -ENOMEM;
3328 }
3329
3330 static void be_ctrl_cleanup(struct be_adapter *adapter)
3331 {
3332         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3333
3334         be_unmap_pci_bars(adapter);
3335
3336         if (mem->va)
3337                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3338                                   mem->dma);
3339
3340         mem = &adapter->rx_filter;
3341         if (mem->va)
3342                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3343                                   mem->dma);
3344 }
3345
3346 static int be_ctrl_init(struct be_adapter *adapter)
3347 {
3348         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3349         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3350         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3351         int status;
3352
3353         status = be_map_pci_bars(adapter);
3354         if (status)
3355                 goto done;
3356
3357         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3358         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3359                                                 mbox_mem_alloc->size,
3360                                                 &mbox_mem_alloc->dma,
3361                                                 GFP_KERNEL);
3362         if (!mbox_mem_alloc->va) {
3363                 status = -ENOMEM;
3364                 goto unmap_pci_bars;
3365         }
3366         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3367         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3368         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3369         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3370
3371         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3372         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3373                                         &rx_filter->dma, GFP_KERNEL);
3374         if (rx_filter->va == NULL) {
3375                 status = -ENOMEM;
3376                 goto free_mbox;
3377         }
3378         memset(rx_filter->va, 0, rx_filter->size);
3379
3380         mutex_init(&adapter->mbox_lock);
3381         spin_lock_init(&adapter->mcc_lock);
3382         spin_lock_init(&adapter->mcc_cq_lock);
3383
3384         init_completion(&adapter->flash_compl);
3385         pci_save_state(adapter->pdev);
3386         return 0;
3387
3388 free_mbox:
3389         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3390                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3391
3392 unmap_pci_bars:
3393         be_unmap_pci_bars(adapter);
3394
3395 done:
3396         return status;
3397 }
3398
3399 static void be_stats_cleanup(struct be_adapter *adapter)
3400 {
3401         struct be_dma_mem *cmd = &adapter->stats_cmd;
3402
3403         if (cmd->va)
3404                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3405                                   cmd->va, cmd->dma);
3406 }
3407
3408 static int be_stats_init(struct be_adapter *adapter)
3409 {
3410         struct be_dma_mem *cmd = &adapter->stats_cmd;
3411
3412         if (adapter->generation == BE_GEN2) {
3413                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3414         } else {
3415                 if (lancer_chip(adapter))
3416                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3417                 else
3418                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3419         }
3420         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3421                                      GFP_KERNEL);
3422         if (cmd->va == NULL)
3423                 return -1;
3424         memset(cmd->va, 0, cmd->size);
3425         return 0;
3426 }
3427
3428 static void __devexit be_remove(struct pci_dev *pdev)
3429 {
3430         struct be_adapter *adapter = pci_get_drvdata(pdev);
3431
3432         if (!adapter)
3433                 return;
3434
3435         be_roce_dev_remove(adapter);
3436
3437         unregister_netdev(adapter->netdev);
3438
3439         be_clear(adapter);
3440
3441         be_stats_cleanup(adapter);
3442
3443         be_ctrl_cleanup(adapter);
3444
3445         pci_set_drvdata(pdev, NULL);
3446         pci_release_regions(pdev);
3447         pci_disable_device(pdev);
3448
3449         free_netdev(adapter->netdev);
3450 }
3451
3452 bool be_is_wol_supported(struct be_adapter *adapter)
3453 {
3454         return ((adapter->wol_cap & BE_WOL_CAP) &&
3455                 !be_is_wol_excluded(adapter)) ? true : false;
3456 }
3457
3458 u32 be_get_fw_log_level(struct be_adapter *adapter)
3459 {
3460         struct be_dma_mem extfat_cmd;
3461         struct be_fat_conf_params *cfgs;
3462         int status;
3463         u32 level = 0;
3464         int j;
3465
3466         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3467         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3468         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3469                                              &extfat_cmd.dma);
3470
3471         if (!extfat_cmd.va) {
3472                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3473                         __func__);
3474                 goto err;
3475         }
3476
3477         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3478         if (!status) {
3479                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3480                                                 sizeof(struct be_cmd_resp_hdr));
3481                 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3482                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3483                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3484                 }
3485         }
3486         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3487                             extfat_cmd.dma);
3488 err:
3489         return level;
3490 }
3491 static int be_get_initial_config(struct be_adapter *adapter)
3492 {
3493         int status;
3494         u32 level;
3495
3496         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3497                         &adapter->function_mode, &adapter->function_caps);
3498         if (status)
3499                 return status;
3500
3501         if (adapter->function_mode & FLEX10_MODE)
3502                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3503         else
3504                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3505
3506         if (be_physfn(adapter))
3507                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3508         else
3509                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3510
3511         /* primary mac needs 1 pmac entry */
3512         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3513                                   sizeof(u32), GFP_KERNEL);
3514         if (!adapter->pmac_id)
3515                 return -ENOMEM;
3516
3517         status = be_cmd_get_cntl_attributes(adapter);
3518         if (status)
3519                 return status;
3520
3521         status = be_cmd_get_acpi_wol_cap(adapter);
3522         if (status) {
3523                 /* in case of a failure to get wol capabillities
3524                  * check the exclusion list to determine WOL capability */
3525                 if (!be_is_wol_excluded(adapter))
3526                         adapter->wol_cap |= BE_WOL_CAP;
3527         }
3528
3529         if (be_is_wol_supported(adapter))
3530                 adapter->wol = true;
3531
3532         level = be_get_fw_log_level(adapter);
3533         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3534
3535         return 0;
3536 }
3537
3538 static int be_dev_type_check(struct be_adapter *adapter)
3539 {
3540         struct pci_dev *pdev = adapter->pdev;
3541         u32 sli_intf = 0, if_type;
3542
3543         switch (pdev->device) {
3544         case BE_DEVICE_ID1:
3545         case OC_DEVICE_ID1:
3546                 adapter->generation = BE_GEN2;
3547                 break;
3548         case BE_DEVICE_ID2:
3549         case OC_DEVICE_ID2:
3550                 adapter->generation = BE_GEN3;
3551                 break;
3552         case OC_DEVICE_ID3:
3553         case OC_DEVICE_ID4:
3554                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3555                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3556                                                 SLI_INTF_IF_TYPE_SHIFT;
3557                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3558                                                 SLI_INTF_IF_TYPE_SHIFT;
3559                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3560                         !be_type_2_3(adapter)) {
3561                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3562                         return -EINVAL;
3563                 }
3564                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3565                                          SLI_INTF_FAMILY_SHIFT);
3566                 adapter->generation = BE_GEN3;
3567                 break;
3568         case OC_DEVICE_ID5:
3569                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3570                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3571                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3572                         return -EINVAL;
3573                 }
3574                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3575                                          SLI_INTF_FAMILY_SHIFT);
3576                 adapter->generation = BE_GEN3;
3577                 break;
3578         default:
3579                 adapter->generation = 0;
3580         }
3581
3582         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3583         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3584         return 0;
3585 }
3586
3587 static int lancer_wait_ready(struct be_adapter *adapter)
3588 {
3589 #define SLIPORT_READY_TIMEOUT 30
3590         u32 sliport_status;
3591         int status = 0, i;
3592
3593         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3594                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3595                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3596                         break;
3597
3598                 msleep(1000);
3599         }
3600
3601         if (i == SLIPORT_READY_TIMEOUT)
3602                 status = -1;
3603
3604         return status;
3605 }
3606
3607 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3608 {
3609         int status;
3610         u32 sliport_status, err, reset_needed;
3611         status = lancer_wait_ready(adapter);
3612         if (!status) {
3613                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3614                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3615                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3616                 if (err && reset_needed) {
3617                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3618                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3619
3620                         /* check adapter has corrected the error */
3621                         status = lancer_wait_ready(adapter);
3622                         sliport_status = ioread32(adapter->db +
3623                                                         SLIPORT_STATUS_OFFSET);
3624                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3625                                                 SLIPORT_STATUS_RN_MASK);
3626                         if (status || sliport_status)
3627                                 status = -1;
3628                 } else if (err || reset_needed) {
3629                         status = -1;
3630                 }
3631         }
3632         return status;
3633 }
3634
3635 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3636 {
3637         int status;
3638         u32 sliport_status;
3639
3640         if (adapter->eeh_err || adapter->ue_detected)
3641                 return;
3642
3643         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3644
3645         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3646                 dev_err(&adapter->pdev->dev,
3647                                 "Adapter in error state."
3648                                 "Trying to recover.\n");
3649
3650                 status = lancer_test_and_set_rdy_state(adapter);
3651                 if (status)
3652                         goto err;
3653
3654                 netif_device_detach(adapter->netdev);
3655
3656                 if (netif_running(adapter->netdev))
3657                         be_close(adapter->netdev);
3658
3659                 be_clear(adapter);
3660
3661                 adapter->fw_timeout = false;
3662
3663                 status = be_setup(adapter);
3664                 if (status)
3665                         goto err;
3666
3667                 if (netif_running(adapter->netdev)) {
3668                         status = be_open(adapter->netdev);
3669                         if (status)
3670                                 goto err;
3671                 }
3672
3673                 netif_device_attach(adapter->netdev);
3674
3675                 dev_err(&adapter->pdev->dev,
3676                                 "Adapter error recovery succeeded\n");
3677         }
3678         return;
3679 err:
3680         dev_err(&adapter->pdev->dev,
3681                         "Adapter error recovery failed\n");
3682 }
3683
3684 static void be_worker(struct work_struct *work)
3685 {
3686         struct be_adapter *adapter =
3687                 container_of(work, struct be_adapter, work.work);
3688         struct be_rx_obj *rxo;
3689         struct be_eq_obj *eqo;
3690         int i;
3691
3692         if (lancer_chip(adapter))
3693                 lancer_test_and_recover_fn_err(adapter);
3694
3695         be_detect_dump_ue(adapter);
3696
3697         /* when interrupts are not yet enabled, just reap any pending
3698         * mcc completions */
3699         if (!netif_running(adapter->netdev)) {
3700                 be_process_mcc(adapter);
3701                 goto reschedule;
3702         }
3703
3704         if (!adapter->stats_cmd_sent) {
3705                 if (lancer_chip(adapter))
3706                         lancer_cmd_get_pport_stats(adapter,
3707                                                 &adapter->stats_cmd);
3708                 else
3709                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3710         }
3711
3712         for_all_rx_queues(adapter, rxo, i) {
3713                 if (rxo->rx_post_starved) {
3714                         rxo->rx_post_starved = false;
3715                         be_post_rx_frags(rxo, GFP_KERNEL);
3716                 }
3717         }
3718
3719         for_all_evt_queues(adapter, eqo, i)
3720                 be_eqd_update(adapter, eqo);
3721
3722 reschedule:
3723         adapter->work_counter++;
3724         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3725 }
3726
3727 static bool be_reset_required(struct be_adapter *adapter)
3728 {
3729         u32 reg;
3730
3731         pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3732         return reg;
3733 }
3734
3735 static int __devinit be_probe(struct pci_dev *pdev,
3736                         const struct pci_device_id *pdev_id)
3737 {
3738         int status = 0;
3739         struct be_adapter *adapter;
3740         struct net_device *netdev;
3741
3742         status = pci_enable_device(pdev);
3743         if (status)
3744                 goto do_none;
3745
3746         status = pci_request_regions(pdev, DRV_NAME);
3747         if (status)
3748                 goto disable_dev;
3749         pci_set_master(pdev);
3750
3751         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3752         if (netdev == NULL) {
3753                 status = -ENOMEM;
3754                 goto rel_reg;
3755         }
3756         adapter = netdev_priv(netdev);
3757         adapter->pdev = pdev;
3758         pci_set_drvdata(pdev, adapter);
3759
3760         status = be_dev_type_check(adapter);
3761         if (status)
3762                 goto free_netdev;
3763
3764         adapter->netdev = netdev;
3765         SET_NETDEV_DEV(netdev, &pdev->dev);
3766
3767         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3768         if (!status) {
3769                 netdev->features |= NETIF_F_HIGHDMA;
3770         } else {
3771                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3772                 if (status) {
3773                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3774                         goto free_netdev;
3775                 }
3776         }
3777
3778         status = be_ctrl_init(adapter);
3779         if (status)
3780                 goto free_netdev;
3781
3782         if (lancer_chip(adapter)) {
3783                 status = lancer_wait_ready(adapter);
3784                 if (!status) {
3785                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3786                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3787                         status = lancer_test_and_set_rdy_state(adapter);
3788                 }
3789                 if (status) {
3790                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3791                         goto ctrl_clean;
3792                 }
3793         }
3794
3795         /* sync up with fw's ready state */
3796         if (be_physfn(adapter)) {
3797                 status = be_cmd_POST(adapter);
3798                 if (status)
3799                         goto ctrl_clean;
3800         }
3801
3802         /* tell fw we're ready to fire cmds */
3803         status = be_cmd_fw_init(adapter);
3804         if (status)
3805                 goto ctrl_clean;
3806
3807         if (be_reset_required(adapter)) {
3808                 status = be_cmd_reset_function(adapter);
3809                 if (status)
3810                         goto ctrl_clean;
3811         }
3812
3813         /* The INTR bit may be set in the card when probed by a kdump kernel
3814          * after a crash.
3815          */
3816         if (!lancer_chip(adapter))
3817                 be_intr_set(adapter, false);
3818
3819         status = be_stats_init(adapter);
3820         if (status)
3821                 goto ctrl_clean;
3822
3823         status = be_get_initial_config(adapter);
3824         if (status)
3825                 goto stats_clean;
3826
3827         INIT_DELAYED_WORK(&adapter->work, be_worker);
3828         adapter->rx_fc = adapter->tx_fc = true;
3829
3830         status = be_setup(adapter);
3831         if (status)
3832                 goto msix_disable;
3833
3834         be_netdev_init(netdev);
3835         status = register_netdev(netdev);
3836         if (status != 0)
3837                 goto unsetup;
3838
3839         be_roce_dev_add(adapter);
3840
3841         dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3842                 adapter->port_num);
3843
3844         return 0;
3845
3846 unsetup:
3847         be_clear(adapter);
3848 msix_disable:
3849         be_msix_disable(adapter);
3850 stats_clean:
3851         be_stats_cleanup(adapter);
3852 ctrl_clean:
3853         be_ctrl_cleanup(adapter);
3854 free_netdev:
3855         free_netdev(netdev);
3856         pci_set_drvdata(pdev, NULL);
3857 rel_reg:
3858         pci_release_regions(pdev);
3859 disable_dev:
3860         pci_disable_device(pdev);
3861 do_none:
3862         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3863         return status;
3864 }
3865
3866 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3867 {
3868         struct be_adapter *adapter = pci_get_drvdata(pdev);
3869         struct net_device *netdev =  adapter->netdev;
3870
3871         if (adapter->wol)
3872                 be_setup_wol(adapter, true);
3873
3874         netif_device_detach(netdev);
3875         if (netif_running(netdev)) {
3876                 rtnl_lock();
3877                 be_close(netdev);
3878                 rtnl_unlock();
3879         }
3880         be_clear(adapter);
3881
3882         pci_save_state(pdev);
3883         pci_disable_device(pdev);
3884         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3885         return 0;
3886 }
3887
3888 static int be_resume(struct pci_dev *pdev)
3889 {
3890         int status = 0;
3891         struct be_adapter *adapter = pci_get_drvdata(pdev);
3892         struct net_device *netdev =  adapter->netdev;
3893
3894         netif_device_detach(netdev);
3895
3896         status = pci_enable_device(pdev);
3897         if (status)
3898                 return status;
3899
3900         pci_set_power_state(pdev, 0);
3901         pci_restore_state(pdev);
3902
3903         /* tell fw we're ready to fire cmds */
3904         status = be_cmd_fw_init(adapter);
3905         if (status)
3906                 return status;
3907
3908         be_setup(adapter);
3909         if (netif_running(netdev)) {
3910                 rtnl_lock();
3911                 be_open(netdev);
3912                 rtnl_unlock();
3913         }
3914         netif_device_attach(netdev);
3915
3916         if (adapter->wol)
3917                 be_setup_wol(adapter, false);
3918
3919         return 0;
3920 }
3921
3922 /*
3923  * An FLR will stop BE from DMAing any data.
3924  */
3925 static void be_shutdown(struct pci_dev *pdev)
3926 {
3927         struct be_adapter *adapter = pci_get_drvdata(pdev);
3928
3929         if (!adapter)
3930                 return;
3931
3932         cancel_delayed_work_sync(&adapter->work);
3933
3934         netif_device_detach(adapter->netdev);
3935
3936         if (adapter->wol)
3937                 be_setup_wol(adapter, true);
3938
3939         be_cmd_reset_function(adapter);
3940
3941         pci_disable_device(pdev);
3942 }
3943
3944 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3945                                 pci_channel_state_t state)
3946 {
3947         struct be_adapter *adapter = pci_get_drvdata(pdev);
3948         struct net_device *netdev =  adapter->netdev;
3949
3950         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3951
3952         adapter->eeh_err = true;
3953
3954         netif_device_detach(netdev);
3955
3956         if (netif_running(netdev)) {
3957                 rtnl_lock();
3958                 be_close(netdev);
3959                 rtnl_unlock();
3960         }
3961         be_clear(adapter);
3962
3963         if (state == pci_channel_io_perm_failure)
3964                 return PCI_ERS_RESULT_DISCONNECT;
3965
3966         pci_disable_device(pdev);
3967
3968         /* The error could cause the FW to trigger a flash debug dump.
3969          * Resetting the card while flash dump is in progress
3970          * can cause it not to recover; wait for it to finish
3971          */
3972         ssleep(30);
3973         return PCI_ERS_RESULT_NEED_RESET;
3974 }
3975
3976 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3977 {
3978         struct be_adapter *adapter = pci_get_drvdata(pdev);
3979         int status;
3980
3981         dev_info(&adapter->pdev->dev, "EEH reset\n");
3982         adapter->eeh_err = false;
3983         adapter->ue_detected = false;
3984         adapter->fw_timeout = false;
3985
3986         status = pci_enable_device(pdev);
3987         if (status)
3988                 return PCI_ERS_RESULT_DISCONNECT;
3989
3990         pci_set_master(pdev);
3991         pci_set_power_state(pdev, 0);
3992         pci_restore_state(pdev);
3993
3994         /* Check if card is ok and fw is ready */
3995         status = be_cmd_POST(adapter);
3996         if (status)
3997                 return PCI_ERS_RESULT_DISCONNECT;
3998
3999         return PCI_ERS_RESULT_RECOVERED;
4000 }
4001
4002 static void be_eeh_resume(struct pci_dev *pdev)
4003 {
4004         int status = 0;
4005         struct be_adapter *adapter = pci_get_drvdata(pdev);
4006         struct net_device *netdev =  adapter->netdev;
4007
4008         dev_info(&adapter->pdev->dev, "EEH resume\n");
4009
4010         pci_save_state(pdev);
4011
4012         /* tell fw we're ready to fire cmds */
4013         status = be_cmd_fw_init(adapter);
4014         if (status)
4015                 goto err;
4016
4017         status = be_setup(adapter);
4018         if (status)
4019                 goto err;
4020
4021         if (netif_running(netdev)) {
4022                 status = be_open(netdev);
4023                 if (status)
4024                         goto err;
4025         }
4026         netif_device_attach(netdev);
4027         return;
4028 err:
4029         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4030 }
4031
4032 static struct pci_error_handlers be_eeh_handlers = {
4033         .error_detected = be_eeh_err_detected,
4034         .slot_reset = be_eeh_reset,
4035         .resume = be_eeh_resume,
4036 };
4037
4038 static struct pci_driver be_driver = {
4039         .name = DRV_NAME,
4040         .id_table = be_dev_ids,
4041         .probe = be_probe,
4042         .remove = be_remove,
4043         .suspend = be_suspend,
4044         .resume = be_resume,
4045         .shutdown = be_shutdown,
4046         .err_handler = &be_eeh_handlers
4047 };
4048
4049 static int __init be_init_module(void)
4050 {
4051         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4052             rx_frag_size != 2048) {
4053                 printk(KERN_WARNING DRV_NAME
4054                         " : Module param rx_frag_size must be 2048/4096/8192."
4055                         " Using 2048\n");
4056                 rx_frag_size = 2048;
4057         }
4058
4059         return pci_register_driver(&be_driver);
4060 }
4061 module_init(be_init_module);
4062
4063 static void __exit be_exit_module(void)
4064 {
4065         pci_unregister_driver(&be_driver);
4066 }
4067 module_exit(be_exit_module);