Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb))
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785         else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786                 vlan_tag = adapter->pvid;
787
788         if (vlan_tag) {
789                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
790                 if (unlikely(!skb))
791                         return skb;
792                 skb->vlan_tci = 0;
793                 if (skip_hw_vlan)
794                         *skip_hw_vlan = true;
795         }
796
797         /* Insert the outer VLAN, if any */
798         if (adapter->qnq_vid) {
799                 vlan_tag = adapter->qnq_vid;
800                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
801                 if (unlikely(!skb))
802                         return skb;
803                 if (skip_hw_vlan)
804                         *skip_hw_vlan = true;
805         }
806
807         return skb;
808 }
809
810 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811 {
812         struct ethhdr *eh = (struct ethhdr *)skb->data;
813         u16 offset = ETH_HLEN;
814
815         if (eh->h_proto == htons(ETH_P_IPV6)) {
816                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818                 offset += sizeof(struct ipv6hdr);
819                 if (ip6h->nexthdr != NEXTHDR_TCP &&
820                     ip6h->nexthdr != NEXTHDR_UDP) {
821                         struct ipv6_opt_hdr *ehdr =
822                                 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825                         if (ehdr->hdrlen == 0xff)
826                                 return true;
827                 }
828         }
829         return false;
830 }
831
832 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833 {
834         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835 }
836
837 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838                                 struct sk_buff *skb)
839 {
840         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
841 }
842
843 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844                                            struct sk_buff *skb,
845                                            bool *skip_hw_vlan)
846 {
847         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
848         unsigned int eth_hdr_len;
849         struct iphdr *ip;
850
851         /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
852          * may cause a transmit stall on that port. So the work-around is to
853          * pad such packets to a 36-byte length.
854          */
855         if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856                 if (skb_padto(skb, 36))
857                         goto tx_drop;
858                 skb->len = 36;
859         }
860
861         /* For padded packets, BE HW modifies tot_len field in IP header
862          * incorrecly when VLAN tag is inserted by HW.
863          * For padded packets, Lancer computes incorrect checksum.
864          */
865         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
866                                                 VLAN_ETH_HLEN : ETH_HLEN;
867         if (skb->len <= 60 &&
868             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
869             is_ipv4_pkt(skb)) {
870                 ip = (struct iphdr *)ip_hdr(skb);
871                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
872         }
873
874         /* If vlan tag is already inlined in the packet, skip HW VLAN
875          * tagging in UMC mode
876          */
877         if ((adapter->function_mode & UMC_ENABLED) &&
878             veh->h_vlan_proto == htons(ETH_P_8021Q))
879                         *skip_hw_vlan = true;
880
881         /* HW has a bug wherein it will calculate CSUM for VLAN
882          * pkts even though it is disabled.
883          * Manually insert VLAN in pkt.
884          */
885         if (skb->ip_summed != CHECKSUM_PARTIAL &&
886             vlan_tx_tag_present(skb)) {
887                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
888                 if (unlikely(!skb))
889                         goto tx_drop;
890         }
891
892         /* HW may lockup when VLAN HW tagging is requested on
893          * certain ipv6 packets. Drop such pkts if the HW workaround to
894          * skip HW tagging is not enabled by FW.
895          */
896         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
897             (adapter->pvid || adapter->qnq_vid) &&
898             !qnq_async_evt_rcvd(adapter)))
899                 goto tx_drop;
900
901         /* Manual VLAN tag insertion to prevent:
902          * ASIC lockup when the ASIC inserts VLAN tag into
903          * certain ipv6 packets. Insert VLAN tags in driver,
904          * and set event, completion, vlan bits accordingly
905          * in the Tx WRB.
906          */
907         if (be_ipv6_tx_stall_chk(adapter, skb) &&
908             be_vlan_tag_tx_chk(adapter, skb)) {
909                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
910                 if (unlikely(!skb))
911                         goto tx_drop;
912         }
913
914         return skb;
915 tx_drop:
916         dev_kfree_skb_any(skb);
917         return NULL;
918 }
919
920 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
921 {
922         struct be_adapter *adapter = netdev_priv(netdev);
923         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
924         struct be_queue_info *txq = &txo->q;
925         bool dummy_wrb, stopped = false;
926         u32 wrb_cnt = 0, copied = 0;
927         bool skip_hw_vlan = false;
928         u32 start = txq->head;
929
930         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
931         if (!skb)
932                 return NETDEV_TX_OK;
933
934         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
935
936         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
937                               skip_hw_vlan);
938         if (copied) {
939                 int gso_segs = skb_shinfo(skb)->gso_segs;
940
941                 /* record the sent skb in the sent_skb table */
942                 BUG_ON(txo->sent_skb_list[start]);
943                 txo->sent_skb_list[start] = skb;
944
945                 /* Ensure txq has space for the next skb; Else stop the queue
946                  * *BEFORE* ringing the tx doorbell, so that we serialze the
947                  * tx compls of the current transmit which'll wake up the queue
948                  */
949                 atomic_add(wrb_cnt, &txq->used);
950                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
951                                                                 txq->len) {
952                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
953                         stopped = true;
954                 }
955
956                 be_txq_notify(adapter, txo, wrb_cnt);
957
958                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
959         } else {
960                 txq->head = start;
961                 dev_kfree_skb_any(skb);
962         }
963         return NETDEV_TX_OK;
964 }
965
966 static int be_change_mtu(struct net_device *netdev, int new_mtu)
967 {
968         struct be_adapter *adapter = netdev_priv(netdev);
969         if (new_mtu < BE_MIN_MTU ||
970                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
971                                         (ETH_HLEN + ETH_FCS_LEN))) {
972                 dev_info(&adapter->pdev->dev,
973                         "MTU must be between %d and %d bytes\n",
974                         BE_MIN_MTU,
975                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
976                 return -EINVAL;
977         }
978         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
979                         netdev->mtu, new_mtu);
980         netdev->mtu = new_mtu;
981         return 0;
982 }
983
984 /*
985  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
986  * If the user configures more, place BE in vlan promiscuous mode.
987  */
988 static int be_vid_config(struct be_adapter *adapter)
989 {
990         u16 vids[BE_NUM_VLANS_SUPPORTED];
991         u16 num = 0, i;
992         int status = 0;
993
994         /* No need to further configure vids if in promiscuous mode */
995         if (adapter->promiscuous)
996                 return 0;
997
998         if (adapter->vlans_added > adapter->max_vlans)
999                 goto set_vlan_promisc;
1000
1001         /* Construct VLAN Table to give to HW */
1002         for (i = 0; i < VLAN_N_VID; i++)
1003                 if (adapter->vlan_tag[i])
1004                         vids[num++] = cpu_to_le16(i);
1005
1006         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1007                                     vids, num, 1, 0);
1008
1009         /* Set to VLAN promisc mode as setting VLAN filter failed */
1010         if (status) {
1011                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1012                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1013                 goto set_vlan_promisc;
1014         }
1015
1016         return status;
1017
1018 set_vlan_promisc:
1019         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1020                                     NULL, 0, 1, 1);
1021         return status;
1022 }
1023
1024 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1025 {
1026         struct be_adapter *adapter = netdev_priv(netdev);
1027         int status = 0;
1028
1029         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1030                 status = -EINVAL;
1031                 goto ret;
1032         }
1033
1034         /* Packets with VID 0 are always received by Lancer by default */
1035         if (lancer_chip(adapter) && vid == 0)
1036                 goto ret;
1037
1038         adapter->vlan_tag[vid] = 1;
1039         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1040                 status = be_vid_config(adapter);
1041
1042         if (!status)
1043                 adapter->vlans_added++;
1044         else
1045                 adapter->vlan_tag[vid] = 0;
1046 ret:
1047         return status;
1048 }
1049
1050 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1051 {
1052         struct be_adapter *adapter = netdev_priv(netdev);
1053         int status = 0;
1054
1055         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1056                 status = -EINVAL;
1057                 goto ret;
1058         }
1059
1060         /* Packets with VID 0 are always received by Lancer by default */
1061         if (lancer_chip(adapter) && vid == 0)
1062                 goto ret;
1063
1064         adapter->vlan_tag[vid] = 0;
1065         if (adapter->vlans_added <= adapter->max_vlans)
1066                 status = be_vid_config(adapter);
1067
1068         if (!status)
1069                 adapter->vlans_added--;
1070         else
1071                 adapter->vlan_tag[vid] = 1;
1072 ret:
1073         return status;
1074 }
1075
1076 static void be_set_rx_mode(struct net_device *netdev)
1077 {
1078         struct be_adapter *adapter = netdev_priv(netdev);
1079         int status;
1080
1081         if (netdev->flags & IFF_PROMISC) {
1082                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1083                 adapter->promiscuous = true;
1084                 goto done;
1085         }
1086
1087         /* BE was previously in promiscuous mode; disable it */
1088         if (adapter->promiscuous) {
1089                 adapter->promiscuous = false;
1090                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1091
1092                 if (adapter->vlans_added)
1093                         be_vid_config(adapter);
1094         }
1095
1096         /* Enable multicast promisc if num configured exceeds what we support */
1097         if (netdev->flags & IFF_ALLMULTI ||
1098             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1099                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1100                 goto done;
1101         }
1102
1103         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1104                 struct netdev_hw_addr *ha;
1105                 int i = 1; /* First slot is claimed by the Primary MAC */
1106
1107                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1108                         be_cmd_pmac_del(adapter, adapter->if_handle,
1109                                         adapter->pmac_id[i], 0);
1110                 }
1111
1112                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1113                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1114                         adapter->promiscuous = true;
1115                         goto done;
1116                 }
1117
1118                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1119                         adapter->uc_macs++; /* First slot is for Primary MAC */
1120                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1121                                         adapter->if_handle,
1122                                         &adapter->pmac_id[adapter->uc_macs], 0);
1123                 }
1124         }
1125
1126         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1127
1128         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1129         if (status) {
1130                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1131                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1132                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1133         }
1134 done:
1135         return;
1136 }
1137
1138 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1139 {
1140         struct be_adapter *adapter = netdev_priv(netdev);
1141         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1142         int status;
1143         bool active_mac = false;
1144         u32 pmac_id;
1145         u8 old_mac[ETH_ALEN];
1146
1147         if (!sriov_enabled(adapter))
1148                 return -EPERM;
1149
1150         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1151                 return -EINVAL;
1152
1153         if (lancer_chip(adapter)) {
1154                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1155                                                   &pmac_id, vf + 1);
1156                 if (!status && active_mac)
1157                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1158                                         pmac_id, vf + 1);
1159
1160                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1161         } else {
1162                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1163                                          vf_cfg->pmac_id, vf + 1);
1164
1165                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1166                                          &vf_cfg->pmac_id, vf + 1);
1167         }
1168
1169         if (status)
1170                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1171                                 mac, vf);
1172         else
1173                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1174
1175         return status;
1176 }
1177
1178 static int be_get_vf_config(struct net_device *netdev, int vf,
1179                         struct ifla_vf_info *vi)
1180 {
1181         struct be_adapter *adapter = netdev_priv(netdev);
1182         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1183
1184         if (!sriov_enabled(adapter))
1185                 return -EPERM;
1186
1187         if (vf >= adapter->num_vfs)
1188                 return -EINVAL;
1189
1190         vi->vf = vf;
1191         vi->tx_rate = vf_cfg->tx_rate;
1192         vi->vlan = vf_cfg->vlan_tag;
1193         vi->qos = 0;
1194         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1195
1196         return 0;
1197 }
1198
1199 static int be_set_vf_vlan(struct net_device *netdev,
1200                         int vf, u16 vlan, u8 qos)
1201 {
1202         struct be_adapter *adapter = netdev_priv(netdev);
1203         int status = 0;
1204
1205         if (!sriov_enabled(adapter))
1206                 return -EPERM;
1207
1208         if (vf >= adapter->num_vfs || vlan > 4095)
1209                 return -EINVAL;
1210
1211         if (vlan) {
1212                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1213                         /* If this is new value, program it. Else skip. */
1214                         adapter->vf_cfg[vf].vlan_tag = vlan;
1215
1216                         status = be_cmd_set_hsw_config(adapter, vlan,
1217                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1218                 }
1219         } else {
1220                 /* Reset Transparent Vlan Tagging. */
1221                 adapter->vf_cfg[vf].vlan_tag = 0;
1222                 vlan = adapter->vf_cfg[vf].def_vid;
1223                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1224                         adapter->vf_cfg[vf].if_handle);
1225         }
1226
1227
1228         if (status)
1229                 dev_info(&adapter->pdev->dev,
1230                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1231         return status;
1232 }
1233
1234 static int be_set_vf_tx_rate(struct net_device *netdev,
1235                         int vf, int rate)
1236 {
1237         struct be_adapter *adapter = netdev_priv(netdev);
1238         int status = 0;
1239
1240         if (!sriov_enabled(adapter))
1241                 return -EPERM;
1242
1243         if (vf >= adapter->num_vfs)
1244                 return -EINVAL;
1245
1246         if (rate < 100 || rate > 10000) {
1247                 dev_err(&adapter->pdev->dev,
1248                         "tx rate must be between 100 and 10000 Mbps\n");
1249                 return -EINVAL;
1250         }
1251
1252         if (lancer_chip(adapter))
1253                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1254         else
1255                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1256
1257         if (status)
1258                 dev_err(&adapter->pdev->dev,
1259                                 "tx rate %d on VF %d failed\n", rate, vf);
1260         else
1261                 adapter->vf_cfg[vf].tx_rate = rate;
1262         return status;
1263 }
1264
1265 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1266 {
1267         struct pci_dev *dev, *pdev = adapter->pdev;
1268         int vfs = 0, assigned_vfs = 0, pos;
1269         u16 offset, stride;
1270
1271         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1272         if (!pos)
1273                 return 0;
1274         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1275         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1276
1277         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1278         while (dev) {
1279                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1280                         vfs++;
1281                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1282                                 assigned_vfs++;
1283                 }
1284                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1285         }
1286         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1287 }
1288
1289 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1290 {
1291         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1292         ulong now = jiffies;
1293         ulong delta = now - stats->rx_jiffies;
1294         u64 pkts;
1295         unsigned int start, eqd;
1296
1297         if (!eqo->enable_aic) {
1298                 eqd = eqo->eqd;
1299                 goto modify_eqd;
1300         }
1301
1302         if (eqo->idx >= adapter->num_rx_qs)
1303                 return;
1304
1305         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1306
1307         /* Wrapped around */
1308         if (time_before(now, stats->rx_jiffies)) {
1309                 stats->rx_jiffies = now;
1310                 return;
1311         }
1312
1313         /* Update once a second */
1314         if (delta < HZ)
1315                 return;
1316
1317         do {
1318                 start = u64_stats_fetch_begin_bh(&stats->sync);
1319                 pkts = stats->rx_pkts;
1320         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1321
1322         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1323         stats->rx_pkts_prev = pkts;
1324         stats->rx_jiffies = now;
1325         eqd = (stats->rx_pps / 110000) << 3;
1326         eqd = min(eqd, eqo->max_eqd);
1327         eqd = max(eqd, eqo->min_eqd);
1328         if (eqd < 10)
1329                 eqd = 0;
1330
1331 modify_eqd:
1332         if (eqd != eqo->cur_eqd) {
1333                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1334                 eqo->cur_eqd = eqd;
1335         }
1336 }
1337
1338 static void be_rx_stats_update(struct be_rx_obj *rxo,
1339                 struct be_rx_compl_info *rxcp)
1340 {
1341         struct be_rx_stats *stats = rx_stats(rxo);
1342
1343         u64_stats_update_begin(&stats->sync);
1344         stats->rx_compl++;
1345         stats->rx_bytes += rxcp->pkt_size;
1346         stats->rx_pkts++;
1347         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1348                 stats->rx_mcast_pkts++;
1349         if (rxcp->err)
1350                 stats->rx_compl_err++;
1351         u64_stats_update_end(&stats->sync);
1352 }
1353
1354 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1355 {
1356         /* L4 checksum is not reliable for non TCP/UDP packets.
1357          * Also ignore ipcksm for ipv6 pkts */
1358         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1359                                 (rxcp->ip_csum || rxcp->ipv6);
1360 }
1361
1362 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1363                                                 u16 frag_idx)
1364 {
1365         struct be_adapter *adapter = rxo->adapter;
1366         struct be_rx_page_info *rx_page_info;
1367         struct be_queue_info *rxq = &rxo->q;
1368
1369         rx_page_info = &rxo->page_info_tbl[frag_idx];
1370         BUG_ON(!rx_page_info->page);
1371
1372         if (rx_page_info->last_page_user) {
1373                 dma_unmap_page(&adapter->pdev->dev,
1374                                dma_unmap_addr(rx_page_info, bus),
1375                                adapter->big_page_size, DMA_FROM_DEVICE);
1376                 rx_page_info->last_page_user = false;
1377         }
1378
1379         atomic_dec(&rxq->used);
1380         return rx_page_info;
1381 }
1382
1383 /* Throwaway the data in the Rx completion */
1384 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1385                                 struct be_rx_compl_info *rxcp)
1386 {
1387         struct be_queue_info *rxq = &rxo->q;
1388         struct be_rx_page_info *page_info;
1389         u16 i, num_rcvd = rxcp->num_rcvd;
1390
1391         for (i = 0; i < num_rcvd; i++) {
1392                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1393                 put_page(page_info->page);
1394                 memset(page_info, 0, sizeof(*page_info));
1395                 index_inc(&rxcp->rxq_idx, rxq->len);
1396         }
1397 }
1398
1399 /*
1400  * skb_fill_rx_data forms a complete skb for an ether frame
1401  * indicated by rxcp.
1402  */
1403 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1404                              struct be_rx_compl_info *rxcp)
1405 {
1406         struct be_queue_info *rxq = &rxo->q;
1407         struct be_rx_page_info *page_info;
1408         u16 i, j;
1409         u16 hdr_len, curr_frag_len, remaining;
1410         u8 *start;
1411
1412         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1413         start = page_address(page_info->page) + page_info->page_offset;
1414         prefetch(start);
1415
1416         /* Copy data in the first descriptor of this completion */
1417         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1418
1419         skb->len = curr_frag_len;
1420         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1421                 memcpy(skb->data, start, curr_frag_len);
1422                 /* Complete packet has now been moved to data */
1423                 put_page(page_info->page);
1424                 skb->data_len = 0;
1425                 skb->tail += curr_frag_len;
1426         } else {
1427                 hdr_len = ETH_HLEN;
1428                 memcpy(skb->data, start, hdr_len);
1429                 skb_shinfo(skb)->nr_frags = 1;
1430                 skb_frag_set_page(skb, 0, page_info->page);
1431                 skb_shinfo(skb)->frags[0].page_offset =
1432                                         page_info->page_offset + hdr_len;
1433                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1434                 skb->data_len = curr_frag_len - hdr_len;
1435                 skb->truesize += rx_frag_size;
1436                 skb->tail += hdr_len;
1437         }
1438         page_info->page = NULL;
1439
1440         if (rxcp->pkt_size <= rx_frag_size) {
1441                 BUG_ON(rxcp->num_rcvd != 1);
1442                 return;
1443         }
1444
1445         /* More frags present for this completion */
1446         index_inc(&rxcp->rxq_idx, rxq->len);
1447         remaining = rxcp->pkt_size - curr_frag_len;
1448         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1449                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1450                 curr_frag_len = min(remaining, rx_frag_size);
1451
1452                 /* Coalesce all frags from the same physical page in one slot */
1453                 if (page_info->page_offset == 0) {
1454                         /* Fresh page */
1455                         j++;
1456                         skb_frag_set_page(skb, j, page_info->page);
1457                         skb_shinfo(skb)->frags[j].page_offset =
1458                                                         page_info->page_offset;
1459                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1460                         skb_shinfo(skb)->nr_frags++;
1461                 } else {
1462                         put_page(page_info->page);
1463                 }
1464
1465                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1466                 skb->len += curr_frag_len;
1467                 skb->data_len += curr_frag_len;
1468                 skb->truesize += rx_frag_size;
1469                 remaining -= curr_frag_len;
1470                 index_inc(&rxcp->rxq_idx, rxq->len);
1471                 page_info->page = NULL;
1472         }
1473         BUG_ON(j > MAX_SKB_FRAGS);
1474 }
1475
1476 /* Process the RX completion indicated by rxcp when GRO is disabled */
1477 static void be_rx_compl_process(struct be_rx_obj *rxo,
1478                                 struct be_rx_compl_info *rxcp)
1479 {
1480         struct be_adapter *adapter = rxo->adapter;
1481         struct net_device *netdev = adapter->netdev;
1482         struct sk_buff *skb;
1483
1484         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1485         if (unlikely(!skb)) {
1486                 rx_stats(rxo)->rx_drops_no_skbs++;
1487                 be_rx_compl_discard(rxo, rxcp);
1488                 return;
1489         }
1490
1491         skb_fill_rx_data(rxo, skb, rxcp);
1492
1493         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1494                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1495         else
1496                 skb_checksum_none_assert(skb);
1497
1498         skb->protocol = eth_type_trans(skb, netdev);
1499         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1500         if (netdev->features & NETIF_F_RXHASH)
1501                 skb->rxhash = rxcp->rss_hash;
1502
1503
1504         if (rxcp->vlanf)
1505                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1506
1507         netif_receive_skb(skb);
1508 }
1509
1510 /* Process the RX completion indicated by rxcp when GRO is enabled */
1511 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1512                              struct be_rx_compl_info *rxcp)
1513 {
1514         struct be_adapter *adapter = rxo->adapter;
1515         struct be_rx_page_info *page_info;
1516         struct sk_buff *skb = NULL;
1517         struct be_queue_info *rxq = &rxo->q;
1518         u16 remaining, curr_frag_len;
1519         u16 i, j;
1520
1521         skb = napi_get_frags(napi);
1522         if (!skb) {
1523                 be_rx_compl_discard(rxo, rxcp);
1524                 return;
1525         }
1526
1527         remaining = rxcp->pkt_size;
1528         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1529                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1530
1531                 curr_frag_len = min(remaining, rx_frag_size);
1532
1533                 /* Coalesce all frags from the same physical page in one slot */
1534                 if (i == 0 || page_info->page_offset == 0) {
1535                         /* First frag or Fresh page */
1536                         j++;
1537                         skb_frag_set_page(skb, j, page_info->page);
1538                         skb_shinfo(skb)->frags[j].page_offset =
1539                                                         page_info->page_offset;
1540                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1541                 } else {
1542                         put_page(page_info->page);
1543                 }
1544                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1545                 skb->truesize += rx_frag_size;
1546                 remaining -= curr_frag_len;
1547                 index_inc(&rxcp->rxq_idx, rxq->len);
1548                 memset(page_info, 0, sizeof(*page_info));
1549         }
1550         BUG_ON(j > MAX_SKB_FRAGS);
1551
1552         skb_shinfo(skb)->nr_frags = j + 1;
1553         skb->len = rxcp->pkt_size;
1554         skb->data_len = rxcp->pkt_size;
1555         skb->ip_summed = CHECKSUM_UNNECESSARY;
1556         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1557         if (adapter->netdev->features & NETIF_F_RXHASH)
1558                 skb->rxhash = rxcp->rss_hash;
1559
1560         if (rxcp->vlanf)
1561                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1562
1563         napi_gro_frags(napi);
1564 }
1565
1566 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1567                                  struct be_rx_compl_info *rxcp)
1568 {
1569         rxcp->pkt_size =
1570                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1571         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1572         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1573         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1574         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1575         rxcp->ip_csum =
1576                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1577         rxcp->l4_csum =
1578                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1579         rxcp->ipv6 =
1580                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1581         rxcp->rxq_idx =
1582                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1583         rxcp->num_rcvd =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1585         rxcp->pkt_type =
1586                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1587         rxcp->rss_hash =
1588                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1589         if (rxcp->vlanf) {
1590                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1591                                           compl);
1592                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1593                                                compl);
1594         }
1595         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1596 }
1597
1598 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1599                                  struct be_rx_compl_info *rxcp)
1600 {
1601         rxcp->pkt_size =
1602                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1603         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1604         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1605         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1606         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1607         rxcp->ip_csum =
1608                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1609         rxcp->l4_csum =
1610                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1611         rxcp->ipv6 =
1612                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1613         rxcp->rxq_idx =
1614                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1615         rxcp->num_rcvd =
1616                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1617         rxcp->pkt_type =
1618                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1619         rxcp->rss_hash =
1620                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1621         if (rxcp->vlanf) {
1622                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1623                                           compl);
1624                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1625                                                compl);
1626         }
1627         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1628         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1629                                       ip_frag, compl);
1630 }
1631
1632 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1633 {
1634         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1635         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1636         struct be_adapter *adapter = rxo->adapter;
1637
1638         /* For checking the valid bit it is Ok to use either definition as the
1639          * valid bit is at the same position in both v0 and v1 Rx compl */
1640         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1641                 return NULL;
1642
1643         rmb();
1644         be_dws_le_to_cpu(compl, sizeof(*compl));
1645
1646         if (adapter->be3_native)
1647                 be_parse_rx_compl_v1(compl, rxcp);
1648         else
1649                 be_parse_rx_compl_v0(compl, rxcp);
1650
1651         if (rxcp->ip_frag)
1652                 rxcp->l4_csum = 0;
1653
1654         if (rxcp->vlanf) {
1655                 /* vlanf could be wrongly set in some cards.
1656                  * ignore if vtm is not set */
1657                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1658                         rxcp->vlanf = 0;
1659
1660                 if (!lancer_chip(adapter))
1661                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1662
1663                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1664                     !adapter->vlan_tag[rxcp->vlan_tag])
1665                         rxcp->vlanf = 0;
1666         }
1667
1668         /* As the compl has been parsed, reset it; we wont touch it again */
1669         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1670
1671         queue_tail_inc(&rxo->cq);
1672         return rxcp;
1673 }
1674
1675 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1676 {
1677         u32 order = get_order(size);
1678
1679         if (order > 0)
1680                 gfp |= __GFP_COMP;
1681         return  alloc_pages(gfp, order);
1682 }
1683
1684 /*
1685  * Allocate a page, split it to fragments of size rx_frag_size and post as
1686  * receive buffers to BE
1687  */
1688 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1689 {
1690         struct be_adapter *adapter = rxo->adapter;
1691         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1692         struct be_queue_info *rxq = &rxo->q;
1693         struct page *pagep = NULL;
1694         struct be_eth_rx_d *rxd;
1695         u64 page_dmaaddr = 0, frag_dmaaddr;
1696         u32 posted, page_offset = 0;
1697
1698         page_info = &rxo->page_info_tbl[rxq->head];
1699         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1700                 if (!pagep) {
1701                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1702                         if (unlikely(!pagep)) {
1703                                 rx_stats(rxo)->rx_post_fail++;
1704                                 break;
1705                         }
1706                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1707                                                     0, adapter->big_page_size,
1708                                                     DMA_FROM_DEVICE);
1709                         page_info->page_offset = 0;
1710                 } else {
1711                         get_page(pagep);
1712                         page_info->page_offset = page_offset + rx_frag_size;
1713                 }
1714                 page_offset = page_info->page_offset;
1715                 page_info->page = pagep;
1716                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1717                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1718
1719                 rxd = queue_head_node(rxq);
1720                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1721                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1722
1723                 /* Any space left in the current big page for another frag? */
1724                 if ((page_offset + rx_frag_size + rx_frag_size) >
1725                                         adapter->big_page_size) {
1726                         pagep = NULL;
1727                         page_info->last_page_user = true;
1728                 }
1729
1730                 prev_page_info = page_info;
1731                 queue_head_inc(rxq);
1732                 page_info = &rxo->page_info_tbl[rxq->head];
1733         }
1734         if (pagep)
1735                 prev_page_info->last_page_user = true;
1736
1737         if (posted) {
1738                 atomic_add(posted, &rxq->used);
1739                 be_rxq_notify(adapter, rxq->id, posted);
1740         } else if (atomic_read(&rxq->used) == 0) {
1741                 /* Let be_worker replenish when memory is available */
1742                 rxo->rx_post_starved = true;
1743         }
1744 }
1745
1746 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1747 {
1748         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1749
1750         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1751                 return NULL;
1752
1753         rmb();
1754         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1755
1756         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1757
1758         queue_tail_inc(tx_cq);
1759         return txcp;
1760 }
1761
1762 static u16 be_tx_compl_process(struct be_adapter *adapter,
1763                 struct be_tx_obj *txo, u16 last_index)
1764 {
1765         struct be_queue_info *txq = &txo->q;
1766         struct be_eth_wrb *wrb;
1767         struct sk_buff **sent_skbs = txo->sent_skb_list;
1768         struct sk_buff *sent_skb;
1769         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1770         bool unmap_skb_hdr = true;
1771
1772         sent_skb = sent_skbs[txq->tail];
1773         BUG_ON(!sent_skb);
1774         sent_skbs[txq->tail] = NULL;
1775
1776         /* skip header wrb */
1777         queue_tail_inc(txq);
1778
1779         do {
1780                 cur_index = txq->tail;
1781                 wrb = queue_tail_node(txq);
1782                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1783                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1784                 unmap_skb_hdr = false;
1785
1786                 num_wrbs++;
1787                 queue_tail_inc(txq);
1788         } while (cur_index != last_index);
1789
1790         kfree_skb(sent_skb);
1791         return num_wrbs;
1792 }
1793
1794 /* Return the number of events in the event queue */
1795 static inline int events_get(struct be_eq_obj *eqo)
1796 {
1797         struct be_eq_entry *eqe;
1798         int num = 0;
1799
1800         do {
1801                 eqe = queue_tail_node(&eqo->q);
1802                 if (eqe->evt == 0)
1803                         break;
1804
1805                 rmb();
1806                 eqe->evt = 0;
1807                 num++;
1808                 queue_tail_inc(&eqo->q);
1809         } while (true);
1810
1811         return num;
1812 }
1813
1814 /* Leaves the EQ is disarmed state */
1815 static void be_eq_clean(struct be_eq_obj *eqo)
1816 {
1817         int num = events_get(eqo);
1818
1819         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1820 }
1821
1822 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1823 {
1824         struct be_rx_page_info *page_info;
1825         struct be_queue_info *rxq = &rxo->q;
1826         struct be_queue_info *rx_cq = &rxo->cq;
1827         struct be_rx_compl_info *rxcp;
1828         struct be_adapter *adapter = rxo->adapter;
1829         int flush_wait = 0;
1830         u16 tail;
1831
1832         /* Consume pending rx completions.
1833          * Wait for the flush completion (identified by zero num_rcvd)
1834          * to arrive. Notify CQ even when there are no more CQ entries
1835          * for HW to flush partially coalesced CQ entries.
1836          * In Lancer, there is no need to wait for flush compl.
1837          */
1838         for (;;) {
1839                 rxcp = be_rx_compl_get(rxo);
1840                 if (rxcp == NULL) {
1841                         if (lancer_chip(adapter))
1842                                 break;
1843
1844                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1845                                 dev_warn(&adapter->pdev->dev,
1846                                          "did not receive flush compl\n");
1847                                 break;
1848                         }
1849                         be_cq_notify(adapter, rx_cq->id, true, 0);
1850                         mdelay(1);
1851                 } else {
1852                         be_rx_compl_discard(rxo, rxcp);
1853                         be_cq_notify(adapter, rx_cq->id, false, 1);
1854                         if (rxcp->num_rcvd == 0)
1855                                 break;
1856                 }
1857         }
1858
1859         /* After cleanup, leave the CQ in unarmed state */
1860         be_cq_notify(adapter, rx_cq->id, false, 0);
1861
1862         /* Then free posted rx buffers that were not used */
1863         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1864         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1865                 page_info = get_rx_page_info(rxo, tail);
1866                 put_page(page_info->page);
1867                 memset(page_info, 0, sizeof(*page_info));
1868         }
1869         BUG_ON(atomic_read(&rxq->used));
1870         rxq->tail = rxq->head = 0;
1871 }
1872
1873 static void be_tx_compl_clean(struct be_adapter *adapter)
1874 {
1875         struct be_tx_obj *txo;
1876         struct be_queue_info *txq;
1877         struct be_eth_tx_compl *txcp;
1878         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1879         struct sk_buff *sent_skb;
1880         bool dummy_wrb;
1881         int i, pending_txqs;
1882
1883         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1884         do {
1885                 pending_txqs = adapter->num_tx_qs;
1886
1887                 for_all_tx_queues(adapter, txo, i) {
1888                         txq = &txo->q;
1889                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1890                                 end_idx =
1891                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1892                                                       wrb_index, txcp);
1893                                 num_wrbs += be_tx_compl_process(adapter, txo,
1894                                                                 end_idx);
1895                                 cmpl++;
1896                         }
1897                         if (cmpl) {
1898                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1899                                 atomic_sub(num_wrbs, &txq->used);
1900                                 cmpl = 0;
1901                                 num_wrbs = 0;
1902                         }
1903                         if (atomic_read(&txq->used) == 0)
1904                                 pending_txqs--;
1905                 }
1906
1907                 if (pending_txqs == 0 || ++timeo > 200)
1908                         break;
1909
1910                 mdelay(1);
1911         } while (true);
1912
1913         for_all_tx_queues(adapter, txo, i) {
1914                 txq = &txo->q;
1915                 if (atomic_read(&txq->used))
1916                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1917                                 atomic_read(&txq->used));
1918
1919                 /* free posted tx for which compls will never arrive */
1920                 while (atomic_read(&txq->used)) {
1921                         sent_skb = txo->sent_skb_list[txq->tail];
1922                         end_idx = txq->tail;
1923                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1924                                                    &dummy_wrb);
1925                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1926                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1927                         atomic_sub(num_wrbs, &txq->used);
1928                 }
1929         }
1930 }
1931
1932 static void be_evt_queues_destroy(struct be_adapter *adapter)
1933 {
1934         struct be_eq_obj *eqo;
1935         int i;
1936
1937         for_all_evt_queues(adapter, eqo, i) {
1938                 if (eqo->q.created) {
1939                         be_eq_clean(eqo);
1940                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1941                 }
1942                 be_queue_free(adapter, &eqo->q);
1943         }
1944 }
1945
1946 static int be_evt_queues_create(struct be_adapter *adapter)
1947 {
1948         struct be_queue_info *eq;
1949         struct be_eq_obj *eqo;
1950         int i, rc;
1951
1952         adapter->num_evt_qs = num_irqs(adapter);
1953
1954         for_all_evt_queues(adapter, eqo, i) {
1955                 eqo->adapter = adapter;
1956                 eqo->tx_budget = BE_TX_BUDGET;
1957                 eqo->idx = i;
1958                 eqo->max_eqd = BE_MAX_EQD;
1959                 eqo->enable_aic = true;
1960
1961                 eq = &eqo->q;
1962                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1963                                         sizeof(struct be_eq_entry));
1964                 if (rc)
1965                         return rc;
1966
1967                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1968                 if (rc)
1969                         return rc;
1970         }
1971         return 0;
1972 }
1973
1974 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1975 {
1976         struct be_queue_info *q;
1977
1978         q = &adapter->mcc_obj.q;
1979         if (q->created)
1980                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1981         be_queue_free(adapter, q);
1982
1983         q = &adapter->mcc_obj.cq;
1984         if (q->created)
1985                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1986         be_queue_free(adapter, q);
1987 }
1988
1989 /* Must be called only after TX qs are created as MCC shares TX EQ */
1990 static int be_mcc_queues_create(struct be_adapter *adapter)
1991 {
1992         struct be_queue_info *q, *cq;
1993
1994         cq = &adapter->mcc_obj.cq;
1995         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1996                         sizeof(struct be_mcc_compl)))
1997                 goto err;
1998
1999         /* Use the default EQ for MCC completions */
2000         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2001                 goto mcc_cq_free;
2002
2003         q = &adapter->mcc_obj.q;
2004         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2005                 goto mcc_cq_destroy;
2006
2007         if (be_cmd_mccq_create(adapter, q, cq))
2008                 goto mcc_q_free;
2009
2010         return 0;
2011
2012 mcc_q_free:
2013         be_queue_free(adapter, q);
2014 mcc_cq_destroy:
2015         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2016 mcc_cq_free:
2017         be_queue_free(adapter, cq);
2018 err:
2019         return -1;
2020 }
2021
2022 static void be_tx_queues_destroy(struct be_adapter *adapter)
2023 {
2024         struct be_queue_info *q;
2025         struct be_tx_obj *txo;
2026         u8 i;
2027
2028         for_all_tx_queues(adapter, txo, i) {
2029                 q = &txo->q;
2030                 if (q->created)
2031                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2032                 be_queue_free(adapter, q);
2033
2034                 q = &txo->cq;
2035                 if (q->created)
2036                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2037                 be_queue_free(adapter, q);
2038         }
2039 }
2040
2041 static int be_num_txqs_want(struct be_adapter *adapter)
2042 {
2043         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2044             be_is_mc(adapter) ||
2045             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2046             BE2_chip(adapter))
2047                 return 1;
2048         else
2049                 return adapter->max_tx_queues;
2050 }
2051
2052 static int be_tx_cqs_create(struct be_adapter *adapter)
2053 {
2054         struct be_queue_info *cq, *eq;
2055         int status;
2056         struct be_tx_obj *txo;
2057         u8 i;
2058
2059         adapter->num_tx_qs = be_num_txqs_want(adapter);
2060         if (adapter->num_tx_qs != MAX_TX_QS) {
2061                 rtnl_lock();
2062                 netif_set_real_num_tx_queues(adapter->netdev,
2063                         adapter->num_tx_qs);
2064                 rtnl_unlock();
2065         }
2066
2067         for_all_tx_queues(adapter, txo, i) {
2068                 cq = &txo->cq;
2069                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2070                                         sizeof(struct be_eth_tx_compl));
2071                 if (status)
2072                         return status;
2073
2074                 /* If num_evt_qs is less than num_tx_qs, then more than
2075                  * one txq share an eq
2076                  */
2077                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2078                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2079                 if (status)
2080                         return status;
2081         }
2082         return 0;
2083 }
2084
2085 static int be_tx_qs_create(struct be_adapter *adapter)
2086 {
2087         struct be_tx_obj *txo;
2088         int i, status;
2089
2090         for_all_tx_queues(adapter, txo, i) {
2091                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2092                                         sizeof(struct be_eth_wrb));
2093                 if (status)
2094                         return status;
2095
2096                 status = be_cmd_txq_create(adapter, txo);
2097                 if (status)
2098                         return status;
2099         }
2100
2101         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2102                  adapter->num_tx_qs);
2103         return 0;
2104 }
2105
2106 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2107 {
2108         struct be_queue_info *q;
2109         struct be_rx_obj *rxo;
2110         int i;
2111
2112         for_all_rx_queues(adapter, rxo, i) {
2113                 q = &rxo->cq;
2114                 if (q->created)
2115                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2116                 be_queue_free(adapter, q);
2117         }
2118 }
2119
2120 static int be_rx_cqs_create(struct be_adapter *adapter)
2121 {
2122         struct be_queue_info *eq, *cq;
2123         struct be_rx_obj *rxo;
2124         int rc, i;
2125
2126         /* We'll create as many RSS rings as there are irqs.
2127          * But when there's only one irq there's no use creating RSS rings
2128          */
2129         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2130                                 num_irqs(adapter) + 1 : 1;
2131         if (adapter->num_rx_qs != MAX_RX_QS) {
2132                 rtnl_lock();
2133                 netif_set_real_num_rx_queues(adapter->netdev,
2134                                              adapter->num_rx_qs);
2135                 rtnl_unlock();
2136         }
2137
2138         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2139         for_all_rx_queues(adapter, rxo, i) {
2140                 rxo->adapter = adapter;
2141                 cq = &rxo->cq;
2142                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2143                                 sizeof(struct be_eth_rx_compl));
2144                 if (rc)
2145                         return rc;
2146
2147                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2148                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2149                 if (rc)
2150                         return rc;
2151         }
2152
2153         dev_info(&adapter->pdev->dev,
2154                  "created %d RSS queue(s) and 1 default RX queue\n",
2155                  adapter->num_rx_qs - 1);
2156         return 0;
2157 }
2158
2159 static irqreturn_t be_intx(int irq, void *dev)
2160 {
2161         struct be_eq_obj *eqo = dev;
2162         struct be_adapter *adapter = eqo->adapter;
2163         int num_evts = 0;
2164
2165         /* IRQ is not expected when NAPI is scheduled as the EQ
2166          * will not be armed.
2167          * But, this can happen on Lancer INTx where it takes
2168          * a while to de-assert INTx or in BE2 where occasionaly
2169          * an interrupt may be raised even when EQ is unarmed.
2170          * If NAPI is already scheduled, then counting & notifying
2171          * events will orphan them.
2172          */
2173         if (napi_schedule_prep(&eqo->napi)) {
2174                 num_evts = events_get(eqo);
2175                 __napi_schedule(&eqo->napi);
2176                 if (num_evts)
2177                         eqo->spurious_intr = 0;
2178         }
2179         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2180
2181         /* Return IRQ_HANDLED only for the the first spurious intr
2182          * after a valid intr to stop the kernel from branding
2183          * this irq as a bad one!
2184          */
2185         if (num_evts || eqo->spurious_intr++ == 0)
2186                 return IRQ_HANDLED;
2187         else
2188                 return IRQ_NONE;
2189 }
2190
2191 static irqreturn_t be_msix(int irq, void *dev)
2192 {
2193         struct be_eq_obj *eqo = dev;
2194
2195         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2196         napi_schedule(&eqo->napi);
2197         return IRQ_HANDLED;
2198 }
2199
2200 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2201 {
2202         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2203 }
2204
2205 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2206                         int budget)
2207 {
2208         struct be_adapter *adapter = rxo->adapter;
2209         struct be_queue_info *rx_cq = &rxo->cq;
2210         struct be_rx_compl_info *rxcp;
2211         u32 work_done;
2212
2213         for (work_done = 0; work_done < budget; work_done++) {
2214                 rxcp = be_rx_compl_get(rxo);
2215                 if (!rxcp)
2216                         break;
2217
2218                 /* Is it a flush compl that has no data */
2219                 if (unlikely(rxcp->num_rcvd == 0))
2220                         goto loop_continue;
2221
2222                 /* Discard compl with partial DMA Lancer B0 */
2223                 if (unlikely(!rxcp->pkt_size)) {
2224                         be_rx_compl_discard(rxo, rxcp);
2225                         goto loop_continue;
2226                 }
2227
2228                 /* On BE drop pkts that arrive due to imperfect filtering in
2229                  * promiscuous mode on some skews
2230                  */
2231                 if (unlikely(rxcp->port != adapter->port_num &&
2232                                 !lancer_chip(adapter))) {
2233                         be_rx_compl_discard(rxo, rxcp);
2234                         goto loop_continue;
2235                 }
2236
2237                 if (do_gro(rxcp))
2238                         be_rx_compl_process_gro(rxo, napi, rxcp);
2239                 else
2240                         be_rx_compl_process(rxo, rxcp);
2241 loop_continue:
2242                 be_rx_stats_update(rxo, rxcp);
2243         }
2244
2245         if (work_done) {
2246                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2247
2248                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2249                         be_post_rx_frags(rxo, GFP_ATOMIC);
2250         }
2251
2252         return work_done;
2253 }
2254
2255 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2256                           int budget, int idx)
2257 {
2258         struct be_eth_tx_compl *txcp;
2259         int num_wrbs = 0, work_done;
2260
2261         for (work_done = 0; work_done < budget; work_done++) {
2262                 txcp = be_tx_compl_get(&txo->cq);
2263                 if (!txcp)
2264                         break;
2265                 num_wrbs += be_tx_compl_process(adapter, txo,
2266                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2267                                         wrb_index, txcp));
2268         }
2269
2270         if (work_done) {
2271                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2272                 atomic_sub(num_wrbs, &txo->q.used);
2273
2274                 /* As Tx wrbs have been freed up, wake up netdev queue
2275                  * if it was stopped due to lack of tx wrbs.  */
2276                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2277                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2278                         netif_wake_subqueue(adapter->netdev, idx);
2279                 }
2280
2281                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2282                 tx_stats(txo)->tx_compl += work_done;
2283                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2284         }
2285         return (work_done < budget); /* Done */
2286 }
2287
2288 int be_poll(struct napi_struct *napi, int budget)
2289 {
2290         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2291         struct be_adapter *adapter = eqo->adapter;
2292         int max_work = 0, work, i, num_evts;
2293         bool tx_done;
2294
2295         num_evts = events_get(eqo);
2296
2297         /* Process all TXQs serviced by this EQ */
2298         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2299                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2300                                         eqo->tx_budget, i);
2301                 if (!tx_done)
2302                         max_work = budget;
2303         }
2304
2305         /* This loop will iterate twice for EQ0 in which
2306          * completions of the last RXQ (default one) are also processed
2307          * For other EQs the loop iterates only once
2308          */
2309         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2310                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2311                 max_work = max(work, max_work);
2312         }
2313
2314         if (is_mcc_eqo(eqo))
2315                 be_process_mcc(adapter);
2316
2317         if (max_work < budget) {
2318                 napi_complete(napi);
2319                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2320         } else {
2321                 /* As we'll continue in polling mode, count and clear events */
2322                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2323         }
2324         return max_work;
2325 }
2326
2327 void be_detect_error(struct be_adapter *adapter)
2328 {
2329         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2330         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2331         u32 i;
2332
2333         if (be_hw_error(adapter))
2334                 return;
2335
2336         if (lancer_chip(adapter)) {
2337                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2338                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2339                         sliport_err1 = ioread32(adapter->db +
2340                                         SLIPORT_ERROR1_OFFSET);
2341                         sliport_err2 = ioread32(adapter->db +
2342                                         SLIPORT_ERROR2_OFFSET);
2343                 }
2344         } else {
2345                 pci_read_config_dword(adapter->pdev,
2346                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2347                 pci_read_config_dword(adapter->pdev,
2348                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2349                 pci_read_config_dword(adapter->pdev,
2350                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2351                 pci_read_config_dword(adapter->pdev,
2352                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2353
2354                 ue_lo = (ue_lo & ~ue_lo_mask);
2355                 ue_hi = (ue_hi & ~ue_hi_mask);
2356         }
2357
2358         /* On certain platforms BE hardware can indicate spurious UEs.
2359          * Allow the h/w to stop working completely in case of a real UE.
2360          * Hence not setting the hw_error for UE detection.
2361          */
2362         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2363                 adapter->hw_error = true;
2364                 dev_err(&adapter->pdev->dev,
2365                         "Error detected in the card\n");
2366         }
2367
2368         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2369                 dev_err(&adapter->pdev->dev,
2370                         "ERR: sliport status 0x%x\n", sliport_status);
2371                 dev_err(&adapter->pdev->dev,
2372                         "ERR: sliport error1 0x%x\n", sliport_err1);
2373                 dev_err(&adapter->pdev->dev,
2374                         "ERR: sliport error2 0x%x\n", sliport_err2);
2375         }
2376
2377         if (ue_lo) {
2378                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2379                         if (ue_lo & 1)
2380                                 dev_err(&adapter->pdev->dev,
2381                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2382                 }
2383         }
2384
2385         if (ue_hi) {
2386                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2387                         if (ue_hi & 1)
2388                                 dev_err(&adapter->pdev->dev,
2389                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2390                 }
2391         }
2392
2393 }
2394
2395 static void be_msix_disable(struct be_adapter *adapter)
2396 {
2397         if (msix_enabled(adapter)) {
2398                 pci_disable_msix(adapter->pdev);
2399                 adapter->num_msix_vec = 0;
2400         }
2401 }
2402
2403 static uint be_num_rss_want(struct be_adapter *adapter)
2404 {
2405         u32 num = 0;
2406
2407         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2408             (lancer_chip(adapter) ||
2409              (!sriov_want(adapter) && be_physfn(adapter)))) {
2410                 num = adapter->max_rss_queues;
2411                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2412         }
2413         return num;
2414 }
2415
2416 static int be_msix_enable(struct be_adapter *adapter)
2417 {
2418 #define BE_MIN_MSIX_VECTORS             1
2419         int i, status, num_vec, num_roce_vec = 0;
2420         struct device *dev = &adapter->pdev->dev;
2421
2422         /* If RSS queues are not used, need a vec for default RX Q */
2423         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2424         if (be_roce_supported(adapter)) {
2425                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2426                                         (num_online_cpus() + 1));
2427                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2428                 num_vec += num_roce_vec;
2429                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2430         }
2431         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2432
2433         for (i = 0; i < num_vec; i++)
2434                 adapter->msix_entries[i].entry = i;
2435
2436         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2437         if (status == 0) {
2438                 goto done;
2439         } else if (status >= BE_MIN_MSIX_VECTORS) {
2440                 num_vec = status;
2441                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2442                                          num_vec);
2443                 if (!status)
2444                         goto done;
2445         }
2446
2447         dev_warn(dev, "MSIx enable failed\n");
2448         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2449         if (!be_physfn(adapter))
2450                 return status;
2451         return 0;
2452 done:
2453         if (be_roce_supported(adapter)) {
2454                 if (num_vec > num_roce_vec) {
2455                         adapter->num_msix_vec = num_vec - num_roce_vec;
2456                         adapter->num_msix_roce_vec =
2457                                 num_vec - adapter->num_msix_vec;
2458                 } else {
2459                         adapter->num_msix_vec = num_vec;
2460                         adapter->num_msix_roce_vec = 0;
2461                 }
2462         } else
2463                 adapter->num_msix_vec = num_vec;
2464         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2465         return 0;
2466 }
2467
2468 static inline int be_msix_vec_get(struct be_adapter *adapter,
2469                                 struct be_eq_obj *eqo)
2470 {
2471         return adapter->msix_entries[eqo->idx].vector;
2472 }
2473
2474 static int be_msix_register(struct be_adapter *adapter)
2475 {
2476         struct net_device *netdev = adapter->netdev;
2477         struct be_eq_obj *eqo;
2478         int status, i, vec;
2479
2480         for_all_evt_queues(adapter, eqo, i) {
2481                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2482                 vec = be_msix_vec_get(adapter, eqo);
2483                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2484                 if (status)
2485                         goto err_msix;
2486         }
2487
2488         return 0;
2489 err_msix:
2490         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2491                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2492         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2493                 status);
2494         be_msix_disable(adapter);
2495         return status;
2496 }
2497
2498 static int be_irq_register(struct be_adapter *adapter)
2499 {
2500         struct net_device *netdev = adapter->netdev;
2501         int status;
2502
2503         if (msix_enabled(adapter)) {
2504                 status = be_msix_register(adapter);
2505                 if (status == 0)
2506                         goto done;
2507                 /* INTx is not supported for VF */
2508                 if (!be_physfn(adapter))
2509                         return status;
2510         }
2511
2512         /* INTx: only the first EQ is used */
2513         netdev->irq = adapter->pdev->irq;
2514         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2515                              &adapter->eq_obj[0]);
2516         if (status) {
2517                 dev_err(&adapter->pdev->dev,
2518                         "INTx request IRQ failed - err %d\n", status);
2519                 return status;
2520         }
2521 done:
2522         adapter->isr_registered = true;
2523         return 0;
2524 }
2525
2526 static void be_irq_unregister(struct be_adapter *adapter)
2527 {
2528         struct net_device *netdev = adapter->netdev;
2529         struct be_eq_obj *eqo;
2530         int i;
2531
2532         if (!adapter->isr_registered)
2533                 return;
2534
2535         /* INTx */
2536         if (!msix_enabled(adapter)) {
2537                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2538                 goto done;
2539         }
2540
2541         /* MSIx */
2542         for_all_evt_queues(adapter, eqo, i)
2543                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2544
2545 done:
2546         adapter->isr_registered = false;
2547 }
2548
2549 static void be_rx_qs_destroy(struct be_adapter *adapter)
2550 {
2551         struct be_queue_info *q;
2552         struct be_rx_obj *rxo;
2553         int i;
2554
2555         for_all_rx_queues(adapter, rxo, i) {
2556                 q = &rxo->q;
2557                 if (q->created) {
2558                         be_cmd_rxq_destroy(adapter, q);
2559                         be_rx_cq_clean(rxo);
2560                 }
2561                 be_queue_free(adapter, q);
2562         }
2563 }
2564
2565 static int be_close(struct net_device *netdev)
2566 {
2567         struct be_adapter *adapter = netdev_priv(netdev);
2568         struct be_eq_obj *eqo;
2569         int i;
2570
2571         be_roce_dev_close(adapter);
2572
2573         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2574                 for_all_evt_queues(adapter, eqo, i)
2575                         napi_disable(&eqo->napi);
2576                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2577         }
2578
2579         be_async_mcc_disable(adapter);
2580
2581         /* Wait for all pending tx completions to arrive so that
2582          * all tx skbs are freed.
2583          */
2584         be_tx_compl_clean(adapter);
2585         netif_tx_disable(netdev);
2586
2587         be_rx_qs_destroy(adapter);
2588
2589         for_all_evt_queues(adapter, eqo, i) {
2590                 if (msix_enabled(adapter))
2591                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2592                 else
2593                         synchronize_irq(netdev->irq);
2594                 be_eq_clean(eqo);
2595         }
2596
2597         be_irq_unregister(adapter);
2598
2599         return 0;
2600 }
2601
2602 static int be_rx_qs_create(struct be_adapter *adapter)
2603 {
2604         struct be_rx_obj *rxo;
2605         int rc, i, j;
2606         u8 rsstable[128];
2607
2608         for_all_rx_queues(adapter, rxo, i) {
2609                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2610                                     sizeof(struct be_eth_rx_d));
2611                 if (rc)
2612                         return rc;
2613         }
2614
2615         /* The FW would like the default RXQ to be created first */
2616         rxo = default_rxo(adapter);
2617         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2618                                adapter->if_handle, false, &rxo->rss_id);
2619         if (rc)
2620                 return rc;
2621
2622         for_all_rss_queues(adapter, rxo, i) {
2623                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2624                                        rx_frag_size, adapter->if_handle,
2625                                        true, &rxo->rss_id);
2626                 if (rc)
2627                         return rc;
2628         }
2629
2630         if (be_multi_rxq(adapter)) {
2631                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2632                         for_all_rss_queues(adapter, rxo, i) {
2633                                 if ((j + i) >= 128)
2634                                         break;
2635                                 rsstable[j + i] = rxo->rss_id;
2636                         }
2637                 }
2638                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2639                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2640
2641                 if (!BEx_chip(adapter))
2642                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2643                                                 RSS_ENABLE_UDP_IPV6;
2644
2645                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2646                                        128);
2647                 if (rc) {
2648                         adapter->rss_flags = 0;
2649                         return rc;
2650                 }
2651         }
2652
2653         /* First time posting */
2654         for_all_rx_queues(adapter, rxo, i)
2655                 be_post_rx_frags(rxo, GFP_KERNEL);
2656         return 0;
2657 }
2658
2659 static int be_open(struct net_device *netdev)
2660 {
2661         struct be_adapter *adapter = netdev_priv(netdev);
2662         struct be_eq_obj *eqo;
2663         struct be_rx_obj *rxo;
2664         struct be_tx_obj *txo;
2665         u8 link_status;
2666         int status, i;
2667
2668         status = be_rx_qs_create(adapter);
2669         if (status)
2670                 goto err;
2671
2672         status = be_irq_register(adapter);
2673         if (status)
2674                 goto err;
2675
2676         for_all_rx_queues(adapter, rxo, i)
2677                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2678
2679         for_all_tx_queues(adapter, txo, i)
2680                 be_cq_notify(adapter, txo->cq.id, true, 0);
2681
2682         be_async_mcc_enable(adapter);
2683
2684         for_all_evt_queues(adapter, eqo, i) {
2685                 napi_enable(&eqo->napi);
2686                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2687         }
2688         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2689
2690         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2691         if (!status)
2692                 be_link_status_update(adapter, link_status);
2693
2694         netif_tx_start_all_queues(netdev);
2695         be_roce_dev_open(adapter);
2696         return 0;
2697 err:
2698         be_close(adapter->netdev);
2699         return -EIO;
2700 }
2701
2702 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2703 {
2704         struct be_dma_mem cmd;
2705         int status = 0;
2706         u8 mac[ETH_ALEN];
2707
2708         memset(mac, 0, ETH_ALEN);
2709
2710         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2711         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2712                                     GFP_KERNEL | __GFP_ZERO);
2713         if (cmd.va == NULL)
2714                 return -1;
2715
2716         if (enable) {
2717                 status = pci_write_config_dword(adapter->pdev,
2718                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2719                 if (status) {
2720                         dev_err(&adapter->pdev->dev,
2721                                 "Could not enable Wake-on-lan\n");
2722                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2723                                           cmd.dma);
2724                         return status;
2725                 }
2726                 status = be_cmd_enable_magic_wol(adapter,
2727                                 adapter->netdev->dev_addr, &cmd);
2728                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2729                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2730         } else {
2731                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2732                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2733                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2734         }
2735
2736         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2737         return status;
2738 }
2739
2740 /*
2741  * Generate a seed MAC address from the PF MAC Address using jhash.
2742  * MAC Address for VFs are assigned incrementally starting from the seed.
2743  * These addresses are programmed in the ASIC by the PF and the VF driver
2744  * queries for the MAC address during its probe.
2745  */
2746 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2747 {
2748         u32 vf;
2749         int status = 0;
2750         u8 mac[ETH_ALEN];
2751         struct be_vf_cfg *vf_cfg;
2752
2753         be_vf_eth_addr_generate(adapter, mac);
2754
2755         for_all_vfs(adapter, vf_cfg, vf) {
2756                 if (lancer_chip(adapter)) {
2757                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2758                 } else {
2759                         status = be_cmd_pmac_add(adapter, mac,
2760                                                  vf_cfg->if_handle,
2761                                                  &vf_cfg->pmac_id, vf + 1);
2762                 }
2763
2764                 if (status)
2765                         dev_err(&adapter->pdev->dev,
2766                         "Mac address assignment failed for VF %d\n", vf);
2767                 else
2768                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2769
2770                 mac[5] += 1;
2771         }
2772         return status;
2773 }
2774
2775 static int be_vfs_mac_query(struct be_adapter *adapter)
2776 {
2777         int status, vf;
2778         u8 mac[ETH_ALEN];
2779         struct be_vf_cfg *vf_cfg;
2780         bool active;
2781
2782         for_all_vfs(adapter, vf_cfg, vf) {
2783                 be_cmd_get_mac_from_list(adapter, mac, &active,
2784                                          &vf_cfg->pmac_id, 0);
2785
2786                 status = be_cmd_mac_addr_query(adapter, mac, false,
2787                                                vf_cfg->if_handle, 0);
2788                 if (status)
2789                         return status;
2790                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2791         }
2792         return 0;
2793 }
2794
2795 static void be_vf_clear(struct be_adapter *adapter)
2796 {
2797         struct be_vf_cfg *vf_cfg;
2798         u32 vf;
2799
2800         if (be_find_vfs(adapter, ASSIGNED)) {
2801                 dev_warn(&adapter->pdev->dev,
2802                          "VFs are assigned to VMs: not disabling VFs\n");
2803                 goto done;
2804         }
2805
2806         pci_disable_sriov(adapter->pdev);
2807
2808         for_all_vfs(adapter, vf_cfg, vf) {
2809                 if (lancer_chip(adapter))
2810                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2811                 else
2812                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2813                                         vf_cfg->pmac_id, vf + 1);
2814
2815                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2816         }
2817 done:
2818         kfree(adapter->vf_cfg);
2819         adapter->num_vfs = 0;
2820 }
2821
2822 static int be_clear(struct be_adapter *adapter)
2823 {
2824         int i = 1;
2825
2826         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2827                 cancel_delayed_work_sync(&adapter->work);
2828                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2829         }
2830
2831         if (sriov_enabled(adapter))
2832                 be_vf_clear(adapter);
2833
2834         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2835                 be_cmd_pmac_del(adapter, adapter->if_handle,
2836                         adapter->pmac_id[i], 0);
2837
2838         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2839
2840         be_mcc_queues_destroy(adapter);
2841         be_rx_cqs_destroy(adapter);
2842         be_tx_queues_destroy(adapter);
2843         be_evt_queues_destroy(adapter);
2844
2845         kfree(adapter->pmac_id);
2846         adapter->pmac_id = NULL;
2847
2848         be_msix_disable(adapter);
2849         return 0;
2850 }
2851
2852 static int be_vfs_if_create(struct be_adapter *adapter)
2853 {
2854         struct be_vf_cfg *vf_cfg;
2855         u32 cap_flags, en_flags, vf;
2856         int status;
2857
2858         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2859                     BE_IF_FLAGS_MULTICAST;
2860
2861         for_all_vfs(adapter, vf_cfg, vf) {
2862                 if (!BE3_chip(adapter))
2863                         be_cmd_get_profile_config(adapter, &cap_flags,
2864                                                   NULL, vf + 1);
2865
2866                 /* If a FW profile exists, then cap_flags are updated */
2867                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2868                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2869                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2870                                           &vf_cfg->if_handle, vf + 1);
2871                 if (status)
2872                         goto err;
2873         }
2874 err:
2875         return status;
2876 }
2877
2878 static int be_vf_setup_init(struct be_adapter *adapter)
2879 {
2880         struct be_vf_cfg *vf_cfg;
2881         int vf;
2882
2883         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2884                                   GFP_KERNEL);
2885         if (!adapter->vf_cfg)
2886                 return -ENOMEM;
2887
2888         for_all_vfs(adapter, vf_cfg, vf) {
2889                 vf_cfg->if_handle = -1;
2890                 vf_cfg->pmac_id = -1;
2891         }
2892         return 0;
2893 }
2894
2895 static int be_vf_setup(struct be_adapter *adapter)
2896 {
2897         struct be_vf_cfg *vf_cfg;
2898         u16 def_vlan, lnk_speed;
2899         int status, old_vfs, vf;
2900         struct device *dev = &adapter->pdev->dev;
2901
2902         old_vfs = be_find_vfs(adapter, ENABLED);
2903         if (old_vfs) {
2904                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2905                 if (old_vfs != num_vfs)
2906                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2907                 adapter->num_vfs = old_vfs;
2908         } else {
2909                 if (num_vfs > adapter->dev_num_vfs)
2910                         dev_info(dev, "Device supports %d VFs and not %d\n",
2911                                  adapter->dev_num_vfs, num_vfs);
2912                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2913                 if (!adapter->num_vfs)
2914                         return 0;
2915         }
2916
2917         status = be_vf_setup_init(adapter);
2918         if (status)
2919                 goto err;
2920
2921         if (old_vfs) {
2922                 for_all_vfs(adapter, vf_cfg, vf) {
2923                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2924                         if (status)
2925                                 goto err;
2926                 }
2927         } else {
2928                 status = be_vfs_if_create(adapter);
2929                 if (status)
2930                         goto err;
2931         }
2932
2933         if (old_vfs) {
2934                 status = be_vfs_mac_query(adapter);
2935                 if (status)
2936                         goto err;
2937         } else {
2938                 status = be_vf_eth_addr_config(adapter);
2939                 if (status)
2940                         goto err;
2941         }
2942
2943         for_all_vfs(adapter, vf_cfg, vf) {
2944                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2945                  * Allow full available bandwidth
2946                  */
2947                 if (BE3_chip(adapter) && !old_vfs)
2948                         be_cmd_set_qos(adapter, 1000, vf+1);
2949
2950                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2951                                                   NULL, vf + 1);
2952                 if (!status)
2953                         vf_cfg->tx_rate = lnk_speed;
2954
2955                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2956                                                vf + 1, vf_cfg->if_handle);
2957                 if (status)
2958                         goto err;
2959                 vf_cfg->def_vid = def_vlan;
2960
2961                 be_cmd_enable_vf(adapter, vf + 1);
2962         }
2963
2964         if (!old_vfs) {
2965                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2966                 if (status) {
2967                         dev_err(dev, "SRIOV enable failed\n");
2968                         adapter->num_vfs = 0;
2969                         goto err;
2970                 }
2971         }
2972         return 0;
2973 err:
2974         dev_err(dev, "VF setup failed\n");
2975         be_vf_clear(adapter);
2976         return status;
2977 }
2978
2979 static void be_setup_init(struct be_adapter *adapter)
2980 {
2981         adapter->vlan_prio_bmap = 0xff;
2982         adapter->phy.link_speed = -1;
2983         adapter->if_handle = -1;
2984         adapter->be3_native = false;
2985         adapter->promiscuous = false;
2986         if (be_physfn(adapter))
2987                 adapter->cmd_privileges = MAX_PRIVILEGES;
2988         else
2989                 adapter->cmd_privileges = MIN_PRIVILEGES;
2990 }
2991
2992 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2993                            bool *active_mac, u32 *pmac_id)
2994 {
2995         int status = 0;
2996
2997         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2998                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2999                 if (!lancer_chip(adapter) && !be_physfn(adapter))
3000                         *active_mac = true;
3001                 else
3002                         *active_mac = false;
3003
3004                 return status;
3005         }
3006
3007         if (lancer_chip(adapter)) {
3008                 status = be_cmd_get_mac_from_list(adapter, mac,
3009                                                   active_mac, pmac_id, 0);
3010                 if (*active_mac) {
3011                         status = be_cmd_mac_addr_query(adapter, mac, false,
3012                                                        if_handle, *pmac_id);
3013                 }
3014         } else if (be_physfn(adapter)) {
3015                 /* For BE3, for PF get permanent MAC */
3016                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
3017                 *active_mac = false;
3018         } else {
3019                 /* For BE3, for VF get soft MAC assigned by PF*/
3020                 status = be_cmd_mac_addr_query(adapter, mac, false,
3021                                                if_handle, 0);
3022                 *active_mac = true;
3023         }
3024         return status;
3025 }
3026
3027 static void be_get_resources(struct be_adapter *adapter)
3028 {
3029         u16 dev_num_vfs;
3030         int pos, status;
3031         bool profile_present = false;
3032         u16 txq_count = 0;
3033
3034         if (!BEx_chip(adapter)) {
3035                 status = be_cmd_get_func_config(adapter);
3036                 if (!status)
3037                         profile_present = true;
3038         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3039                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3040         }
3041
3042         if (profile_present) {
3043                 /* Sanity fixes for Lancer */
3044                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3045                                               BE_UC_PMAC_COUNT);
3046                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3047                                            BE_NUM_VLANS_SUPPORTED);
3048                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3049                                                BE_MAX_MC);
3050                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3051                                                MAX_TX_QS);
3052                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3053                                                 BE3_MAX_RSS_QS);
3054                 adapter->max_event_queues = min_t(u16,
3055                                                   adapter->max_event_queues,
3056                                                   BE3_MAX_RSS_QS);
3057
3058                 if (adapter->max_rss_queues &&
3059                     adapter->max_rss_queues == adapter->max_rx_queues)
3060                         adapter->max_rss_queues -= 1;
3061
3062                 if (adapter->max_event_queues < adapter->max_rss_queues)
3063                         adapter->max_rss_queues = adapter->max_event_queues;
3064
3065         } else {
3066                 if (be_physfn(adapter))
3067                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3068                 else
3069                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3070
3071                 if (adapter->function_mode & FLEX10_MODE)
3072                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3073                 else
3074                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3075
3076                 adapter->max_mcast_mac = BE_MAX_MC;
3077                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3078                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3079                                                MAX_TX_QS);
3080                 adapter->max_rss_queues = (adapter->be3_native) ?
3081                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3082                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3083
3084                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3085                                         BE_IF_FLAGS_BROADCAST |
3086                                         BE_IF_FLAGS_MULTICAST |
3087                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3088                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3089                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3090                                         BE_IF_FLAGS_PROMISCUOUS;
3091
3092                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3093                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3094         }
3095
3096         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3097         if (pos) {
3098                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3099                                      &dev_num_vfs);
3100                 if (BE3_chip(adapter))
3101                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3102                 adapter->dev_num_vfs = dev_num_vfs;
3103         }
3104 }
3105
3106 /* Routine to query per function resource limits */
3107 static int be_get_config(struct be_adapter *adapter)
3108 {
3109         int status;
3110
3111         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3112                                      &adapter->function_mode,
3113                                      &adapter->function_caps,
3114                                      &adapter->asic_rev);
3115         if (status)
3116                 goto err;
3117
3118         be_get_resources(adapter);
3119
3120         /* primary mac needs 1 pmac entry */
3121         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3122                                    sizeof(u32), GFP_KERNEL);
3123         if (!adapter->pmac_id) {
3124                 status = -ENOMEM;
3125                 goto err;
3126         }
3127
3128 err:
3129         return status;
3130 }
3131
3132 static int be_setup(struct be_adapter *adapter)
3133 {
3134         struct device *dev = &adapter->pdev->dev;
3135         u32 en_flags;
3136         u32 tx_fc, rx_fc;
3137         int status;
3138         u8 mac[ETH_ALEN];
3139         bool active_mac;
3140
3141         be_setup_init(adapter);
3142
3143         if (!lancer_chip(adapter))
3144                 be_cmd_req_native_mode(adapter);
3145
3146         status = be_get_config(adapter);
3147         if (status)
3148                 goto err;
3149
3150         status = be_msix_enable(adapter);
3151         if (status)
3152                 goto err;
3153
3154         status = be_evt_queues_create(adapter);
3155         if (status)
3156                 goto err;
3157
3158         status = be_tx_cqs_create(adapter);
3159         if (status)
3160                 goto err;
3161
3162         status = be_rx_cqs_create(adapter);
3163         if (status)
3164                 goto err;
3165
3166         status = be_mcc_queues_create(adapter);
3167         if (status)
3168                 goto err;
3169
3170         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3171         /* In UMC mode FW does not return right privileges.
3172          * Override with correct privilege equivalent to PF.
3173          */
3174         if (be_is_mc(adapter))
3175                 adapter->cmd_privileges = MAX_PRIVILEGES;
3176
3177         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3178                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3179
3180         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3181                 en_flags |= BE_IF_FLAGS_RSS;
3182
3183         en_flags = en_flags & adapter->if_cap_flags;
3184
3185         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3186                                   &adapter->if_handle, 0);
3187         if (status != 0)
3188                 goto err;
3189
3190         memset(mac, 0, ETH_ALEN);
3191         active_mac = false;
3192         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3193                                  &active_mac, &adapter->pmac_id[0]);
3194         if (status != 0)
3195                 goto err;
3196
3197         if (!active_mac) {
3198                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3199                                          &adapter->pmac_id[0], 0);
3200                 if (status != 0)
3201                         goto err;
3202         }
3203
3204         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3205                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3206                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3207         }
3208
3209         status = be_tx_qs_create(adapter);
3210         if (status)
3211                 goto err;
3212
3213         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3214
3215         if (adapter->vlans_added)
3216                 be_vid_config(adapter);
3217
3218         be_set_rx_mode(adapter->netdev);
3219
3220         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3221
3222         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3223                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3224                                         adapter->rx_fc);
3225
3226         if (be_physfn(adapter)) {
3227                 if (adapter->dev_num_vfs)
3228                         be_vf_setup(adapter);
3229                 else
3230                         dev_warn(dev, "device doesn't support SRIOV\n");
3231         }
3232
3233         status = be_cmd_get_phy_info(adapter);
3234         if (!status && be_pause_supported(adapter))
3235                 adapter->phy.fc_autoneg = 1;
3236
3237         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3238         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3239         return 0;
3240 err:
3241         be_clear(adapter);
3242         return status;
3243 }
3244
3245 #ifdef CONFIG_NET_POLL_CONTROLLER
3246 static void be_netpoll(struct net_device *netdev)
3247 {
3248         struct be_adapter *adapter = netdev_priv(netdev);
3249         struct be_eq_obj *eqo;
3250         int i;
3251
3252         for_all_evt_queues(adapter, eqo, i) {
3253                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3254                 napi_schedule(&eqo->napi);
3255         }
3256
3257         return;
3258 }
3259 #endif
3260
3261 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3262 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3263
3264 static bool be_flash_redboot(struct be_adapter *adapter,
3265                         const u8 *p, u32 img_start, int image_size,
3266                         int hdr_size)
3267 {
3268         u32 crc_offset;
3269         u8 flashed_crc[4];
3270         int status;
3271
3272         crc_offset = hdr_size + img_start + image_size - 4;
3273
3274         p += crc_offset;
3275
3276         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3277                         (image_size - 4));
3278         if (status) {
3279                 dev_err(&adapter->pdev->dev,
3280                 "could not get crc from flash, not flashing redboot\n");
3281                 return false;
3282         }
3283
3284         /*update redboot only if crc does not match*/
3285         if (!memcmp(flashed_crc, p, 4))
3286                 return false;
3287         else
3288                 return true;
3289 }
3290
3291 static bool phy_flashing_required(struct be_adapter *adapter)
3292 {
3293         return (adapter->phy.phy_type == TN_8022 &&
3294                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3295 }
3296
3297 static bool is_comp_in_ufi(struct be_adapter *adapter,
3298                            struct flash_section_info *fsec, int type)
3299 {
3300         int i = 0, img_type = 0;
3301         struct flash_section_info_g2 *fsec_g2 = NULL;
3302
3303         if (BE2_chip(adapter))
3304                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3305
3306         for (i = 0; i < MAX_FLASH_COMP; i++) {
3307                 if (fsec_g2)
3308                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3309                 else
3310                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3311
3312                 if (img_type == type)
3313                         return true;
3314         }
3315         return false;
3316
3317 }
3318
3319 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3320                                          int header_size,
3321                                          const struct firmware *fw)
3322 {
3323         struct flash_section_info *fsec = NULL;
3324         const u8 *p = fw->data;
3325
3326         p += header_size;
3327         while (p < (fw->data + fw->size)) {
3328                 fsec = (struct flash_section_info *)p;
3329                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3330                         return fsec;
3331                 p += 32;
3332         }
3333         return NULL;
3334 }
3335
3336 static int be_flash(struct be_adapter *adapter, const u8 *img,
3337                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3338 {
3339         u32 total_bytes = 0, flash_op, num_bytes = 0;
3340         int status = 0;
3341         struct be_cmd_write_flashrom *req = flash_cmd->va;
3342
3343         total_bytes = img_size;
3344         while (total_bytes) {
3345                 num_bytes = min_t(u32, 32*1024, total_bytes);
3346
3347                 total_bytes -= num_bytes;
3348
3349                 if (!total_bytes) {
3350                         if (optype == OPTYPE_PHY_FW)
3351                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3352                         else
3353                                 flash_op = FLASHROM_OPER_FLASH;
3354                 } else {
3355                         if (optype == OPTYPE_PHY_FW)
3356                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3357                         else
3358                                 flash_op = FLASHROM_OPER_SAVE;
3359                 }
3360
3361                 memcpy(req->data_buf, img, num_bytes);
3362                 img += num_bytes;
3363                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3364                                                 flash_op, num_bytes);
3365                 if (status) {
3366                         if (status == ILLEGAL_IOCTL_REQ &&
3367                             optype == OPTYPE_PHY_FW)
3368                                 break;
3369                         dev_err(&adapter->pdev->dev,
3370                                 "cmd to write to flash rom failed.\n");
3371                         return status;
3372                 }
3373         }
3374         return 0;
3375 }
3376
3377 /* For BE2, BE3 and BE3-R */
3378 static int be_flash_BEx(struct be_adapter *adapter,
3379                          const struct firmware *fw,
3380                          struct be_dma_mem *flash_cmd,
3381                          int num_of_images)
3382
3383 {
3384         int status = 0, i, filehdr_size = 0;
3385         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3386         const u8 *p = fw->data;
3387         const struct flash_comp *pflashcomp;
3388         int num_comp, redboot;
3389         struct flash_section_info *fsec = NULL;
3390
3391         struct flash_comp gen3_flash_types[] = {
3392                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3393                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3394                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3395                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3396                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3397                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3398                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3399                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3400                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3401                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3402                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3403                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3404                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3405                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3406                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3407                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3408                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3409                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3410                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3411                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3412         };
3413
3414         struct flash_comp gen2_flash_types[] = {
3415                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3416                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3417                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3418                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3419                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3420                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3421                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3422                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3423                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3424                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3425                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3426                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3427                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3428                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3429                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3430                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3431         };
3432
3433         if (BE3_chip(adapter)) {
3434                 pflashcomp = gen3_flash_types;
3435                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3436                 num_comp = ARRAY_SIZE(gen3_flash_types);
3437         } else {
3438                 pflashcomp = gen2_flash_types;
3439                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3440                 num_comp = ARRAY_SIZE(gen2_flash_types);
3441         }
3442
3443         /* Get flash section info*/
3444         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3445         if (!fsec) {
3446                 dev_err(&adapter->pdev->dev,
3447                         "Invalid Cookie. UFI corrupted ?\n");
3448                 return -1;
3449         }
3450         for (i = 0; i < num_comp; i++) {
3451                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3452                         continue;
3453
3454                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3455                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3456                         continue;
3457
3458                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3459                     !phy_flashing_required(adapter))
3460                                 continue;
3461
3462                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3463                         redboot = be_flash_redboot(adapter, fw->data,
3464                                 pflashcomp[i].offset, pflashcomp[i].size,
3465                                 filehdr_size + img_hdrs_size);
3466                         if (!redboot)
3467                                 continue;
3468                 }
3469
3470                 p = fw->data;
3471                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3472                 if (p + pflashcomp[i].size > fw->data + fw->size)
3473                         return -1;
3474
3475                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3476                                         pflashcomp[i].size);
3477                 if (status) {
3478                         dev_err(&adapter->pdev->dev,
3479                                 "Flashing section type %d failed.\n",
3480                                 pflashcomp[i].img_type);
3481                         return status;
3482                 }
3483         }
3484         return 0;
3485 }
3486
3487 static int be_flash_skyhawk(struct be_adapter *adapter,
3488                 const struct firmware *fw,
3489                 struct be_dma_mem *flash_cmd, int num_of_images)
3490 {
3491         int status = 0, i, filehdr_size = 0;
3492         int img_offset, img_size, img_optype, redboot;
3493         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3494         const u8 *p = fw->data;
3495         struct flash_section_info *fsec = NULL;
3496
3497         filehdr_size = sizeof(struct flash_file_hdr_g3);
3498         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3499         if (!fsec) {
3500                 dev_err(&adapter->pdev->dev,
3501                         "Invalid Cookie. UFI corrupted ?\n");
3502                 return -1;
3503         }
3504
3505         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3506                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3507                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3508
3509                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3510                 case IMAGE_FIRMWARE_iSCSI:
3511                         img_optype = OPTYPE_ISCSI_ACTIVE;
3512                         break;
3513                 case IMAGE_BOOT_CODE:
3514                         img_optype = OPTYPE_REDBOOT;
3515                         break;
3516                 case IMAGE_OPTION_ROM_ISCSI:
3517                         img_optype = OPTYPE_BIOS;
3518                         break;
3519                 case IMAGE_OPTION_ROM_PXE:
3520                         img_optype = OPTYPE_PXE_BIOS;
3521                         break;
3522                 case IMAGE_OPTION_ROM_FCoE:
3523                         img_optype = OPTYPE_FCOE_BIOS;
3524                         break;
3525                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3526                         img_optype = OPTYPE_ISCSI_BACKUP;
3527                         break;
3528                 case IMAGE_NCSI:
3529                         img_optype = OPTYPE_NCSI_FW;
3530                         break;
3531                 default:
3532                         continue;
3533                 }
3534
3535                 if (img_optype == OPTYPE_REDBOOT) {
3536                         redboot = be_flash_redboot(adapter, fw->data,
3537                                         img_offset, img_size,
3538                                         filehdr_size + img_hdrs_size);
3539                         if (!redboot)
3540                                 continue;
3541                 }
3542
3543                 p = fw->data;
3544                 p += filehdr_size + img_offset + img_hdrs_size;
3545                 if (p + img_size > fw->data + fw->size)
3546                         return -1;
3547
3548                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3549                 if (status) {
3550                         dev_err(&adapter->pdev->dev,
3551                                 "Flashing section type %d failed.\n",
3552                                 fsec->fsec_entry[i].type);
3553                         return status;
3554                 }
3555         }
3556         return 0;
3557 }
3558
3559 static int lancer_fw_download(struct be_adapter *adapter,
3560                                 const struct firmware *fw)
3561 {
3562 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3563 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3564         struct be_dma_mem flash_cmd;
3565         const u8 *data_ptr = NULL;
3566         u8 *dest_image_ptr = NULL;
3567         size_t image_size = 0;
3568         u32 chunk_size = 0;
3569         u32 data_written = 0;
3570         u32 offset = 0;
3571         int status = 0;
3572         u8 add_status = 0;
3573         u8 change_status;
3574
3575         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3576                 dev_err(&adapter->pdev->dev,
3577                         "FW Image not properly aligned. "
3578                         "Length must be 4 byte aligned.\n");
3579                 status = -EINVAL;
3580                 goto lancer_fw_exit;
3581         }
3582
3583         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3584                                 + LANCER_FW_DOWNLOAD_CHUNK;
3585         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3586                                           &flash_cmd.dma, GFP_KERNEL);
3587         if (!flash_cmd.va) {
3588                 status = -ENOMEM;
3589                 goto lancer_fw_exit;
3590         }
3591
3592         dest_image_ptr = flash_cmd.va +
3593                                 sizeof(struct lancer_cmd_req_write_object);
3594         image_size = fw->size;
3595         data_ptr = fw->data;
3596
3597         while (image_size) {
3598                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3599
3600                 /* Copy the image chunk content. */
3601                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3602
3603                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3604                                                  chunk_size, offset,
3605                                                  LANCER_FW_DOWNLOAD_LOCATION,
3606                                                  &data_written, &change_status,
3607                                                  &add_status);
3608                 if (status)
3609                         break;
3610
3611                 offset += data_written;
3612                 data_ptr += data_written;
3613                 image_size -= data_written;
3614         }
3615
3616         if (!status) {
3617                 /* Commit the FW written */
3618                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3619                                                  0, offset,
3620                                                  LANCER_FW_DOWNLOAD_LOCATION,
3621                                                  &data_written, &change_status,
3622                                                  &add_status);
3623         }
3624
3625         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3626                                 flash_cmd.dma);
3627         if (status) {
3628                 dev_err(&adapter->pdev->dev,
3629                         "Firmware load error. "
3630                         "Status code: 0x%x Additional Status: 0x%x\n",
3631                         status, add_status);
3632                 goto lancer_fw_exit;
3633         }
3634
3635         if (change_status == LANCER_FW_RESET_NEEDED) {
3636                 status = lancer_physdev_ctrl(adapter,
3637                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3638                 if (status) {
3639                         dev_err(&adapter->pdev->dev,
3640                                 "Adapter busy for FW reset.\n"
3641                                 "New FW will not be active.\n");
3642                         goto lancer_fw_exit;
3643                 }
3644         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3645                         dev_err(&adapter->pdev->dev,
3646                                 "System reboot required for new FW"
3647                                 " to be active\n");
3648         }
3649
3650         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3651 lancer_fw_exit:
3652         return status;
3653 }
3654
3655 #define UFI_TYPE2               2
3656 #define UFI_TYPE3               3
3657 #define UFI_TYPE3R              10
3658 #define UFI_TYPE4               4
3659 static int be_get_ufi_type(struct be_adapter *adapter,
3660                            struct flash_file_hdr_g3 *fhdr)
3661 {
3662         if (fhdr == NULL)
3663                 goto be_get_ufi_exit;
3664
3665         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3666                 return UFI_TYPE4;
3667         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3668                 if (fhdr->asic_type_rev == 0x10)
3669                         return UFI_TYPE3R;
3670                 else
3671                         return UFI_TYPE3;
3672         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3673                 return UFI_TYPE2;
3674
3675 be_get_ufi_exit:
3676         dev_err(&adapter->pdev->dev,
3677                 "UFI and Interface are not compatible for flashing\n");
3678         return -1;
3679 }
3680
3681 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3682 {
3683         struct flash_file_hdr_g3 *fhdr3;
3684         struct image_hdr *img_hdr_ptr = NULL;
3685         struct be_dma_mem flash_cmd;
3686         const u8 *p;
3687         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3688
3689         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3690         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3691                                           &flash_cmd.dma, GFP_KERNEL);
3692         if (!flash_cmd.va) {
3693                 status = -ENOMEM;
3694                 goto be_fw_exit;
3695         }
3696
3697         p = fw->data;
3698         fhdr3 = (struct flash_file_hdr_g3 *)p;
3699
3700         ufi_type = be_get_ufi_type(adapter, fhdr3);
3701
3702         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3703         for (i = 0; i < num_imgs; i++) {
3704                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3705                                 (sizeof(struct flash_file_hdr_g3) +
3706                                  i * sizeof(struct image_hdr)));
3707                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3708                         switch (ufi_type) {
3709                         case UFI_TYPE4:
3710                                 status = be_flash_skyhawk(adapter, fw,
3711                                                         &flash_cmd, num_imgs);
3712                                 break;
3713                         case UFI_TYPE3R:
3714                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3715                                                       num_imgs);
3716                                 break;
3717                         case UFI_TYPE3:
3718                                 /* Do not flash this ufi on BE3-R cards */
3719                                 if (adapter->asic_rev < 0x10)
3720                                         status = be_flash_BEx(adapter, fw,
3721                                                               &flash_cmd,
3722                                                               num_imgs);
3723                                 else {
3724                                         status = -1;
3725                                         dev_err(&adapter->pdev->dev,
3726                                                 "Can't load BE3 UFI on BE3R\n");
3727                                 }
3728                         }
3729                 }
3730         }
3731
3732         if (ufi_type == UFI_TYPE2)
3733                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3734         else if (ufi_type == -1)
3735                 status = -1;
3736
3737         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3738                           flash_cmd.dma);
3739         if (status) {
3740                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3741                 goto be_fw_exit;
3742         }
3743
3744         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3745
3746 be_fw_exit:
3747         return status;
3748 }
3749
3750 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3751 {
3752         const struct firmware *fw;
3753         int status;
3754
3755         if (!netif_running(adapter->netdev)) {
3756                 dev_err(&adapter->pdev->dev,
3757                         "Firmware load not allowed (interface is down)\n");
3758                 return -1;
3759         }
3760
3761         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3762         if (status)
3763                 goto fw_exit;
3764
3765         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3766
3767         if (lancer_chip(adapter))
3768                 status = lancer_fw_download(adapter, fw);
3769         else
3770                 status = be_fw_download(adapter, fw);
3771
3772         if (!status)
3773                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3774                                   adapter->fw_on_flash);
3775
3776 fw_exit:
3777         release_firmware(fw);
3778         return status;
3779 }
3780
3781 static const struct net_device_ops be_netdev_ops = {
3782         .ndo_open               = be_open,
3783         .ndo_stop               = be_close,
3784         .ndo_start_xmit         = be_xmit,
3785         .ndo_set_rx_mode        = be_set_rx_mode,
3786         .ndo_set_mac_address    = be_mac_addr_set,
3787         .ndo_change_mtu         = be_change_mtu,
3788         .ndo_get_stats64        = be_get_stats64,
3789         .ndo_validate_addr      = eth_validate_addr,
3790         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3791         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3792         .ndo_set_vf_mac         = be_set_vf_mac,
3793         .ndo_set_vf_vlan        = be_set_vf_vlan,
3794         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3795         .ndo_get_vf_config      = be_get_vf_config,
3796 #ifdef CONFIG_NET_POLL_CONTROLLER
3797         .ndo_poll_controller    = be_netpoll,
3798 #endif
3799 };
3800
3801 static void be_netdev_init(struct net_device *netdev)
3802 {
3803         struct be_adapter *adapter = netdev_priv(netdev);
3804         struct be_eq_obj *eqo;
3805         int i;
3806
3807         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3808                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3809                 NETIF_F_HW_VLAN_CTAG_TX;
3810         if (be_multi_rxq(adapter))
3811                 netdev->hw_features |= NETIF_F_RXHASH;
3812
3813         netdev->features |= netdev->hw_features |
3814                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3815
3816         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3817                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3818
3819         netdev->priv_flags |= IFF_UNICAST_FLT;
3820
3821         netdev->flags |= IFF_MULTICAST;
3822
3823         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3824
3825         netdev->netdev_ops = &be_netdev_ops;
3826
3827         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3828
3829         for_all_evt_queues(adapter, eqo, i)
3830                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3831 }
3832
3833 static void be_unmap_pci_bars(struct be_adapter *adapter)
3834 {
3835         if (adapter->csr)
3836                 pci_iounmap(adapter->pdev, adapter->csr);
3837         if (adapter->db)
3838                 pci_iounmap(adapter->pdev, adapter->db);
3839 }
3840
3841 static int db_bar(struct be_adapter *adapter)
3842 {
3843         if (lancer_chip(adapter) || !be_physfn(adapter))
3844                 return 0;
3845         else
3846                 return 4;
3847 }
3848
3849 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3850 {
3851         if (skyhawk_chip(adapter)) {
3852                 adapter->roce_db.size = 4096;
3853                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3854                                                               db_bar(adapter));
3855                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3856                                                                db_bar(adapter));
3857         }
3858         return 0;
3859 }
3860
3861 static int be_map_pci_bars(struct be_adapter *adapter)
3862 {
3863         u8 __iomem *addr;
3864         u32 sli_intf;
3865
3866         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3867         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3868                                 SLI_INTF_IF_TYPE_SHIFT;
3869
3870         if (BEx_chip(adapter) && be_physfn(adapter)) {
3871                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3872                 if (adapter->csr == NULL)
3873                         return -ENOMEM;
3874         }
3875
3876         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3877         if (addr == NULL)
3878                 goto pci_map_err;
3879         adapter->db = addr;
3880
3881         be_roce_map_pci_bars(adapter);
3882         return 0;
3883
3884 pci_map_err:
3885         be_unmap_pci_bars(adapter);
3886         return -ENOMEM;
3887 }
3888
3889 static void be_ctrl_cleanup(struct be_adapter *adapter)
3890 {
3891         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3892
3893         be_unmap_pci_bars(adapter);
3894
3895         if (mem->va)
3896                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3897                                   mem->dma);
3898
3899         mem = &adapter->rx_filter;
3900         if (mem->va)
3901                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3902                                   mem->dma);
3903 }
3904
3905 static int be_ctrl_init(struct be_adapter *adapter)
3906 {
3907         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3908         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3909         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3910         u32 sli_intf;
3911         int status;
3912
3913         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3914         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3915                                  SLI_INTF_FAMILY_SHIFT;
3916         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3917
3918         status = be_map_pci_bars(adapter);
3919         if (status)
3920                 goto done;
3921
3922         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3923         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3924                                                 mbox_mem_alloc->size,
3925                                                 &mbox_mem_alloc->dma,
3926                                                 GFP_KERNEL);
3927         if (!mbox_mem_alloc->va) {
3928                 status = -ENOMEM;
3929                 goto unmap_pci_bars;
3930         }
3931         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3932         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3933         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3934         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3935
3936         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3937         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3938                                            &rx_filter->dma,
3939                                            GFP_KERNEL | __GFP_ZERO);
3940         if (rx_filter->va == NULL) {
3941                 status = -ENOMEM;
3942                 goto free_mbox;
3943         }
3944
3945         mutex_init(&adapter->mbox_lock);
3946         spin_lock_init(&adapter->mcc_lock);
3947         spin_lock_init(&adapter->mcc_cq_lock);
3948
3949         init_completion(&adapter->flash_compl);
3950         pci_save_state(adapter->pdev);
3951         return 0;
3952
3953 free_mbox:
3954         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3955                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3956
3957 unmap_pci_bars:
3958         be_unmap_pci_bars(adapter);
3959
3960 done:
3961         return status;
3962 }
3963
3964 static void be_stats_cleanup(struct be_adapter *adapter)
3965 {
3966         struct be_dma_mem *cmd = &adapter->stats_cmd;
3967
3968         if (cmd->va)
3969                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3970                                   cmd->va, cmd->dma);
3971 }
3972
3973 static int be_stats_init(struct be_adapter *adapter)
3974 {
3975         struct be_dma_mem *cmd = &adapter->stats_cmd;
3976
3977         if (lancer_chip(adapter))
3978                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3979         else if (BE2_chip(adapter))
3980                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3981         else
3982                 /* BE3 and Skyhawk */
3983                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3984
3985         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3986                                      GFP_KERNEL | __GFP_ZERO);
3987         if (cmd->va == NULL)
3988                 return -1;
3989         return 0;
3990 }
3991
3992 static void be_remove(struct pci_dev *pdev)
3993 {
3994         struct be_adapter *adapter = pci_get_drvdata(pdev);
3995
3996         if (!adapter)
3997                 return;
3998
3999         be_roce_dev_remove(adapter);
4000         be_intr_set(adapter, false);
4001
4002         cancel_delayed_work_sync(&adapter->func_recovery_work);
4003
4004         unregister_netdev(adapter->netdev);
4005
4006         be_clear(adapter);
4007
4008         /* tell fw we're done with firing cmds */
4009         be_cmd_fw_clean(adapter);
4010
4011         be_stats_cleanup(adapter);
4012
4013         be_ctrl_cleanup(adapter);
4014
4015         pci_disable_pcie_error_reporting(pdev);
4016
4017         pci_set_drvdata(pdev, NULL);
4018         pci_release_regions(pdev);
4019         pci_disable_device(pdev);
4020
4021         free_netdev(adapter->netdev);
4022 }
4023
4024 bool be_is_wol_supported(struct be_adapter *adapter)
4025 {
4026         return ((adapter->wol_cap & BE_WOL_CAP) &&
4027                 !be_is_wol_excluded(adapter)) ? true : false;
4028 }
4029
4030 u32 be_get_fw_log_level(struct be_adapter *adapter)
4031 {
4032         struct be_dma_mem extfat_cmd;
4033         struct be_fat_conf_params *cfgs;
4034         int status;
4035         u32 level = 0;
4036         int j;
4037
4038         if (lancer_chip(adapter))
4039                 return 0;
4040
4041         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4042         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4043         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4044                                              &extfat_cmd.dma);
4045
4046         if (!extfat_cmd.va) {
4047                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4048                         __func__);
4049                 goto err;
4050         }
4051
4052         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4053         if (!status) {
4054                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4055                                                 sizeof(struct be_cmd_resp_hdr));
4056                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4057                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4058                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4059                 }
4060         }
4061         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4062                             extfat_cmd.dma);
4063 err:
4064         return level;
4065 }
4066
4067 static int be_get_initial_config(struct be_adapter *adapter)
4068 {
4069         int status;
4070         u32 level;
4071
4072         status = be_cmd_get_cntl_attributes(adapter);
4073         if (status)
4074                 return status;
4075
4076         status = be_cmd_get_acpi_wol_cap(adapter);
4077         if (status) {
4078                 /* in case of a failure to get wol capabillities
4079                  * check the exclusion list to determine WOL capability */
4080                 if (!be_is_wol_excluded(adapter))
4081                         adapter->wol_cap |= BE_WOL_CAP;
4082         }
4083
4084         if (be_is_wol_supported(adapter))
4085                 adapter->wol = true;
4086
4087         /* Must be a power of 2 or else MODULO will BUG_ON */
4088         adapter->be_get_temp_freq = 64;
4089
4090         level = be_get_fw_log_level(adapter);
4091         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4092
4093         return 0;
4094 }
4095
4096 static int lancer_recover_func(struct be_adapter *adapter)
4097 {
4098         struct device *dev = &adapter->pdev->dev;
4099         int status;
4100
4101         status = lancer_test_and_set_rdy_state(adapter);
4102         if (status)
4103                 goto err;
4104
4105         if (netif_running(adapter->netdev))
4106                 be_close(adapter->netdev);
4107
4108         be_clear(adapter);
4109
4110         be_clear_all_error(adapter);
4111
4112         status = be_setup(adapter);
4113         if (status)
4114                 goto err;
4115
4116         if (netif_running(adapter->netdev)) {
4117                 status = be_open(adapter->netdev);
4118                 if (status)
4119                         goto err;
4120         }
4121
4122         dev_err(dev, "Error recovery successful\n");
4123         return 0;
4124 err:
4125         if (status == -EAGAIN)
4126                 dev_err(dev, "Waiting for resource provisioning\n");
4127         else
4128                 dev_err(dev, "Error recovery failed\n");
4129
4130         return status;
4131 }
4132
4133 static void be_func_recovery_task(struct work_struct *work)
4134 {
4135         struct be_adapter *adapter =
4136                 container_of(work, struct be_adapter,  func_recovery_work.work);
4137         int status = 0;
4138
4139         be_detect_error(adapter);
4140
4141         if (adapter->hw_error && lancer_chip(adapter)) {
4142
4143                 rtnl_lock();
4144                 netif_device_detach(adapter->netdev);
4145                 rtnl_unlock();
4146
4147                 status = lancer_recover_func(adapter);
4148                 if (!status)
4149                         netif_device_attach(adapter->netdev);
4150         }
4151
4152         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4153          * no need to attempt further recovery.
4154          */
4155         if (!status || status == -EAGAIN)
4156                 schedule_delayed_work(&adapter->func_recovery_work,
4157                                       msecs_to_jiffies(1000));
4158 }
4159
4160 static void be_worker(struct work_struct *work)
4161 {
4162         struct be_adapter *adapter =
4163                 container_of(work, struct be_adapter, work.work);
4164         struct be_rx_obj *rxo;
4165         struct be_eq_obj *eqo;
4166         int i;
4167
4168         /* when interrupts are not yet enabled, just reap any pending
4169         * mcc completions */
4170         if (!netif_running(adapter->netdev)) {
4171                 local_bh_disable();
4172                 be_process_mcc(adapter);
4173                 local_bh_enable();
4174                 goto reschedule;
4175         }
4176
4177         if (!adapter->stats_cmd_sent) {
4178                 if (lancer_chip(adapter))
4179                         lancer_cmd_get_pport_stats(adapter,
4180                                                 &adapter->stats_cmd);
4181                 else
4182                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4183         }
4184
4185         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4186                 be_cmd_get_die_temperature(adapter);
4187
4188         for_all_rx_queues(adapter, rxo, i) {
4189                 if (rxo->rx_post_starved) {
4190                         rxo->rx_post_starved = false;
4191                         be_post_rx_frags(rxo, GFP_KERNEL);
4192                 }
4193         }
4194
4195         for_all_evt_queues(adapter, eqo, i)
4196                 be_eqd_update(adapter, eqo);
4197
4198 reschedule:
4199         adapter->work_counter++;
4200         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4201 }
4202
4203 static bool be_reset_required(struct be_adapter *adapter)
4204 {
4205         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4206 }
4207
4208 static char *mc_name(struct be_adapter *adapter)
4209 {
4210         if (adapter->function_mode & FLEX10_MODE)
4211                 return "FLEX10";
4212         else if (adapter->function_mode & VNIC_MODE)
4213                 return "vNIC";
4214         else if (adapter->function_mode & UMC_ENABLED)
4215                 return "UMC";
4216         else
4217                 return "";
4218 }
4219
4220 static inline char *func_name(struct be_adapter *adapter)
4221 {
4222         return be_physfn(adapter) ? "PF" : "VF";
4223 }
4224
4225 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4226 {
4227         int status = 0;
4228         struct be_adapter *adapter;
4229         struct net_device *netdev;
4230         char port_name;
4231
4232         status = pci_enable_device(pdev);
4233         if (status)
4234                 goto do_none;
4235
4236         status = pci_request_regions(pdev, DRV_NAME);
4237         if (status)
4238                 goto disable_dev;
4239         pci_set_master(pdev);
4240
4241         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4242         if (netdev == NULL) {
4243                 status = -ENOMEM;
4244                 goto rel_reg;
4245         }
4246         adapter = netdev_priv(netdev);
4247         adapter->pdev = pdev;
4248         pci_set_drvdata(pdev, adapter);
4249         adapter->netdev = netdev;
4250         SET_NETDEV_DEV(netdev, &pdev->dev);
4251
4252         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4253         if (!status) {
4254                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4255                 if (status < 0) {
4256                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4257                         goto free_netdev;
4258                 }
4259                 netdev->features |= NETIF_F_HIGHDMA;
4260         } else {
4261                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4262                 if (!status)
4263                         status = dma_set_coherent_mask(&pdev->dev,
4264                                                        DMA_BIT_MASK(32));
4265                 if (status) {
4266                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4267                         goto free_netdev;
4268                 }
4269         }
4270
4271         status = pci_enable_pcie_error_reporting(pdev);
4272         if (status)
4273                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4274
4275         status = be_ctrl_init(adapter);
4276         if (status)
4277                 goto free_netdev;
4278
4279         /* sync up with fw's ready state */
4280         if (be_physfn(adapter)) {
4281                 status = be_fw_wait_ready(adapter);
4282                 if (status)
4283                         goto ctrl_clean;
4284         }
4285
4286         if (be_reset_required(adapter)) {
4287                 status = be_cmd_reset_function(adapter);
4288                 if (status)
4289                         goto ctrl_clean;
4290
4291                 /* Wait for interrupts to quiesce after an FLR */
4292                 msleep(100);
4293         }
4294
4295         /* Allow interrupts for other ULPs running on NIC function */
4296         be_intr_set(adapter, true);
4297
4298         /* tell fw we're ready to fire cmds */
4299         status = be_cmd_fw_init(adapter);
4300         if (status)
4301                 goto ctrl_clean;
4302
4303         status = be_stats_init(adapter);
4304         if (status)
4305                 goto ctrl_clean;
4306
4307         status = be_get_initial_config(adapter);
4308         if (status)
4309                 goto stats_clean;
4310
4311         INIT_DELAYED_WORK(&adapter->work, be_worker);
4312         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4313         adapter->rx_fc = adapter->tx_fc = true;
4314
4315         status = be_setup(adapter);
4316         if (status)
4317                 goto stats_clean;
4318
4319         be_netdev_init(netdev);
4320         status = register_netdev(netdev);
4321         if (status != 0)
4322                 goto unsetup;
4323
4324         be_roce_dev_add(adapter);
4325
4326         schedule_delayed_work(&adapter->func_recovery_work,
4327                               msecs_to_jiffies(1000));
4328
4329         be_cmd_query_port_name(adapter, &port_name);
4330
4331         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4332                  func_name(adapter), mc_name(adapter), port_name);
4333
4334         return 0;
4335
4336 unsetup:
4337         be_clear(adapter);
4338 stats_clean:
4339         be_stats_cleanup(adapter);
4340 ctrl_clean:
4341         be_ctrl_cleanup(adapter);
4342 free_netdev:
4343         free_netdev(netdev);
4344         pci_set_drvdata(pdev, NULL);
4345 rel_reg:
4346         pci_release_regions(pdev);
4347 disable_dev:
4348         pci_disable_device(pdev);
4349 do_none:
4350         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4351         return status;
4352 }
4353
4354 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4355 {
4356         struct be_adapter *adapter = pci_get_drvdata(pdev);
4357         struct net_device *netdev =  adapter->netdev;
4358
4359         if (adapter->wol)
4360                 be_setup_wol(adapter, true);
4361
4362         cancel_delayed_work_sync(&adapter->func_recovery_work);
4363
4364         netif_device_detach(netdev);
4365         if (netif_running(netdev)) {
4366                 rtnl_lock();
4367                 be_close(netdev);
4368                 rtnl_unlock();
4369         }
4370         be_clear(adapter);
4371
4372         pci_save_state(pdev);
4373         pci_disable_device(pdev);
4374         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4375         return 0;
4376 }
4377
4378 static int be_resume(struct pci_dev *pdev)
4379 {
4380         int status = 0;
4381         struct be_adapter *adapter = pci_get_drvdata(pdev);
4382         struct net_device *netdev =  adapter->netdev;
4383
4384         netif_device_detach(netdev);
4385
4386         status = pci_enable_device(pdev);
4387         if (status)
4388                 return status;
4389
4390         pci_set_power_state(pdev, 0);
4391         pci_restore_state(pdev);
4392
4393         /* tell fw we're ready to fire cmds */
4394         status = be_cmd_fw_init(adapter);
4395         if (status)
4396                 return status;
4397
4398         be_setup(adapter);
4399         if (netif_running(netdev)) {
4400                 rtnl_lock();
4401                 be_open(netdev);
4402                 rtnl_unlock();
4403         }
4404
4405         schedule_delayed_work(&adapter->func_recovery_work,
4406                               msecs_to_jiffies(1000));
4407         netif_device_attach(netdev);
4408
4409         if (adapter->wol)
4410                 be_setup_wol(adapter, false);
4411
4412         return 0;
4413 }
4414
4415 /*
4416  * An FLR will stop BE from DMAing any data.
4417  */
4418 static void be_shutdown(struct pci_dev *pdev)
4419 {
4420         struct be_adapter *adapter = pci_get_drvdata(pdev);
4421
4422         if (!adapter)
4423                 return;
4424
4425         cancel_delayed_work_sync(&adapter->work);
4426         cancel_delayed_work_sync(&adapter->func_recovery_work);
4427
4428         netif_device_detach(adapter->netdev);
4429
4430         be_cmd_reset_function(adapter);
4431
4432         pci_disable_device(pdev);
4433 }
4434
4435 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4436                                 pci_channel_state_t state)
4437 {
4438         struct be_adapter *adapter = pci_get_drvdata(pdev);
4439         struct net_device *netdev =  adapter->netdev;
4440
4441         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4442
4443         if (!adapter->eeh_error) {
4444                 adapter->eeh_error = true;
4445
4446                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4447
4448                 rtnl_lock();
4449                 netif_device_detach(netdev);
4450                 if (netif_running(netdev))
4451                         be_close(netdev);
4452                 rtnl_unlock();
4453
4454                 be_clear(adapter);
4455         }
4456
4457         if (state == pci_channel_io_perm_failure)
4458                 return PCI_ERS_RESULT_DISCONNECT;
4459
4460         pci_disable_device(pdev);
4461
4462         /* The error could cause the FW to trigger a flash debug dump.
4463          * Resetting the card while flash dump is in progress
4464          * can cause it not to recover; wait for it to finish.
4465          * Wait only for first function as it is needed only once per
4466          * adapter.
4467          */
4468         if (pdev->devfn == 0)
4469                 ssleep(30);
4470
4471         return PCI_ERS_RESULT_NEED_RESET;
4472 }
4473
4474 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4475 {
4476         struct be_adapter *adapter = pci_get_drvdata(pdev);
4477         int status;
4478
4479         dev_info(&adapter->pdev->dev, "EEH reset\n");
4480
4481         status = pci_enable_device(pdev);
4482         if (status)
4483                 return PCI_ERS_RESULT_DISCONNECT;
4484
4485         pci_set_master(pdev);
4486         pci_set_power_state(pdev, 0);
4487         pci_restore_state(pdev);
4488
4489         /* Check if card is ok and fw is ready */
4490         dev_info(&adapter->pdev->dev,
4491                  "Waiting for FW to be ready after EEH reset\n");
4492         status = be_fw_wait_ready(adapter);
4493         if (status)
4494                 return PCI_ERS_RESULT_DISCONNECT;
4495
4496         pci_cleanup_aer_uncorrect_error_status(pdev);
4497         be_clear_all_error(adapter);
4498         return PCI_ERS_RESULT_RECOVERED;
4499 }
4500
4501 static void be_eeh_resume(struct pci_dev *pdev)
4502 {
4503         int status = 0;
4504         struct be_adapter *adapter = pci_get_drvdata(pdev);
4505         struct net_device *netdev =  adapter->netdev;
4506
4507         dev_info(&adapter->pdev->dev, "EEH resume\n");
4508
4509         pci_save_state(pdev);
4510
4511         status = be_cmd_reset_function(adapter);
4512         if (status)
4513                 goto err;
4514
4515         /* tell fw we're ready to fire cmds */
4516         status = be_cmd_fw_init(adapter);
4517         if (status)
4518                 goto err;
4519
4520         status = be_setup(adapter);
4521         if (status)
4522                 goto err;
4523
4524         if (netif_running(netdev)) {
4525                 status = be_open(netdev);
4526                 if (status)
4527                         goto err;
4528         }
4529
4530         schedule_delayed_work(&adapter->func_recovery_work,
4531                               msecs_to_jiffies(1000));
4532         netif_device_attach(netdev);
4533         return;
4534 err:
4535         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4536 }
4537
4538 static const struct pci_error_handlers be_eeh_handlers = {
4539         .error_detected = be_eeh_err_detected,
4540         .slot_reset = be_eeh_reset,
4541         .resume = be_eeh_resume,
4542 };
4543
4544 static struct pci_driver be_driver = {
4545         .name = DRV_NAME,
4546         .id_table = be_dev_ids,
4547         .probe = be_probe,
4548         .remove = be_remove,
4549         .suspend = be_suspend,
4550         .resume = be_resume,
4551         .shutdown = be_shutdown,
4552         .err_handler = &be_eeh_handlers
4553 };
4554
4555 static int __init be_init_module(void)
4556 {
4557         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4558             rx_frag_size != 2048) {
4559                 printk(KERN_WARNING DRV_NAME
4560                         " : Module param rx_frag_size must be 2048/4096/8192."
4561                         " Using 2048\n");
4562                 rx_frag_size = 2048;
4563         }
4564
4565         return pci_register_driver(&be_driver);
4566 }
4567 module_init(be_init_module);
4568
4569 static void __exit be_exit_module(void)
4570 {
4571         pci_unregister_driver(&be_driver);
4572 }
4573 module_exit(be_exit_module);