pandora: defconfig: update
[pandora-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static ushort rx_frag_size = 2048;
31 static unsigned int num_vfs;
32 module_param(rx_frag_size, ushort, S_IRUGO);
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static const char * const ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static const char * const ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 /* Is BE in a multi-channel mode */
119 static inline bool be_is_mc(struct be_adapter *adapter) {
120         return (adapter->function_mode & FLEX10_MODE ||
121                 adapter->function_mode & VNIC_MODE ||
122                 adapter->function_mode & UMC_ENABLED);
123 }
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va)
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131 }
132
133 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134                 u16 len, u16 entry_size)
135 {
136         struct be_dma_mem *mem = &q->dma_mem;
137
138         memset(q, 0, sizeof(*q));
139         q->len = len;
140         q->entry_size = entry_size;
141         mem->size = len * entry_size;
142         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143                                      GFP_KERNEL);
144         if (!mem->va)
145                 return -1;
146         memset(mem->va, 0, mem->size);
147         return 0;
148 }
149
150 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 {
152         u32 reg, enabled;
153
154         if (adapter->eeh_err)
155                 return;
156
157         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158                                 &reg);
159         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
161         if (!enabled && enable)
162                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else if (enabled && !enable)
164                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else
166                 return;
167
168         pci_write_config_dword(adapter->pdev,
169                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170 }
171
172 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 {
174         u32 val = 0;
175         val |= qid & DB_RQ_RING_ID_MASK;
176         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
177
178         wmb();
179         iowrite32(val, adapter->db + DB_RQ_OFFSET);
180 }
181
182 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
183 {
184         u32 val = 0;
185         val |= qid & DB_TXULP_RING_ID_MASK;
186         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
187
188         wmb();
189         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
190 }
191
192 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193                 bool arm, bool clear_int, u16 num_popped)
194 {
195         u32 val = 0;
196         val |= qid & DB_EQ_RING_ID_MASK;
197         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_EQ_REARM_SHIFT;
205         if (clear_int)
206                 val |= 1 << DB_EQ_CLR_SHIFT;
207         val |= 1 << DB_EQ_EVNT_SHIFT;
208         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209         iowrite32(val, adapter->db + DB_EQ_OFFSET);
210 }
211
212 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_CQ_RING_ID_MASK;
216         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_err)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_CQ_REARM_SHIFT;
224         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_CQ_OFFSET);
226 }
227
228 static int be_mac_addr_set(struct net_device *netdev, void *p)
229 {
230         struct be_adapter *adapter = netdev_priv(netdev);
231         struct sockaddr *addr = p;
232         int status = 0;
233         u8 current_mac[ETH_ALEN];
234         u32 pmac_id = adapter->pmac_id;
235
236         if (!is_valid_ether_addr(addr->sa_data))
237                 return -EADDRNOTAVAIL;
238
239         status = be_cmd_mac_addr_query(adapter, current_mac,
240                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
241         if (status)
242                 goto err;
243
244         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
245                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
246                                 adapter->if_handle, &adapter->pmac_id, 0);
247                 if (status)
248                         goto err;
249
250                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
251         }
252         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253         return 0;
254 err:
255         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
256         return status;
257 }
258
259 static void populate_be2_stats(struct be_adapter *adapter)
260 {
261         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
262         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
263         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
264         struct be_port_rxf_stats_v0 *port_stats =
265                                         &rxf_stats->port[adapter->port_num];
266         struct be_drv_stats *drvs = &adapter->drv_stats;
267
268         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
269         drvs->rx_pause_frames = port_stats->rx_pause_frames;
270         drvs->rx_crc_errors = port_stats->rx_crc_errors;
271         drvs->rx_control_frames = port_stats->rx_control_frames;
272         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
273         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
274         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
275         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
276         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
277         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
278         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
279         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
280         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
281         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
282         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
283         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
284         drvs->rx_dropped_header_too_small =
285                 port_stats->rx_dropped_header_too_small;
286         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
287         drvs->rx_alignment_symbol_errors =
288                 port_stats->rx_alignment_symbol_errors;
289
290         drvs->tx_pauseframes = port_stats->tx_pauseframes;
291         drvs->tx_controlframes = port_stats->tx_controlframes;
292
293         if (adapter->port_num)
294                 drvs->jabber_events = rxf_stats->port1_jabber_events;
295         else
296                 drvs->jabber_events = rxf_stats->port0_jabber_events;
297         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
298         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
299         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
300         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
301         drvs->forwarded_packets = rxf_stats->forwarded_packets;
302         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
303         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
304         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
305         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
306 }
307
308 static void populate_be3_stats(struct be_adapter *adapter)
309 {
310         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
312         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
313         struct be_port_rxf_stats_v1 *port_stats =
314                                         &rxf_stats->port[adapter->port_num];
315         struct be_drv_stats *drvs = &adapter->drv_stats;
316
317         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
318         drvs->rx_pause_frames = port_stats->rx_pause_frames;
319         drvs->rx_crc_errors = port_stats->rx_crc_errors;
320         drvs->rx_control_frames = port_stats->rx_control_frames;
321         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
322         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
323         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
324         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
325         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
326         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
327         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
328         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
329         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
330         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
331         drvs->rx_dropped_header_too_small =
332                 port_stats->rx_dropped_header_too_small;
333         drvs->rx_input_fifo_overflow_drop =
334                 port_stats->rx_input_fifo_overflow_drop;
335         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
336         drvs->rx_alignment_symbol_errors =
337                 port_stats->rx_alignment_symbol_errors;
338         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
339         drvs->tx_pauseframes = port_stats->tx_pauseframes;
340         drvs->tx_controlframes = port_stats->tx_controlframes;
341         drvs->jabber_events = port_stats->jabber_events;
342         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346         drvs->forwarded_packets = rxf_stats->forwarded_packets;
347         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
349         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
350         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
351 }
352
353 static void populate_lancer_stats(struct be_adapter *adapter)
354 {
355
356         struct be_drv_stats *drvs = &adapter->drv_stats;
357         struct lancer_pport_stats *pport_stats =
358                                         pport_stats_from_cmd(adapter);
359
360         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
361         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
362         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
363         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
364         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
365         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
366         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
367         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
368         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
369         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
370         drvs->rx_dropped_tcp_length =
371                                 pport_stats->rx_dropped_invalid_tcp_length;
372         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
373         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
374         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
375         drvs->rx_dropped_header_too_small =
376                                 pport_stats->rx_dropped_header_too_small;
377         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
378         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
379         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
380         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
381         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
382         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
383         drvs->jabber_events = pport_stats->rx_jabbers;
384         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
385         drvs->forwarded_packets = pport_stats->num_forwards_lo;
386         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
387         drvs->rx_drops_too_many_frags =
388                                 pport_stats->rx_drops_too_many_frags_lo;
389 }
390
391 static void accumulate_16bit_val(u32 *acc, u16 val)
392 {
393 #define lo(x)                   (x & 0xFFFF)
394 #define hi(x)                   (x & 0xFFFF0000)
395         bool wrapped = val < lo(*acc);
396         u32 newacc = hi(*acc) + val;
397
398         if (wrapped)
399                 newacc += 65536;
400         ACCESS_ONCE(*acc) = newacc;
401 }
402
403 void be_parse_stats(struct be_adapter *adapter)
404 {
405         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
406         struct be_rx_obj *rxo;
407         int i;
408
409         if (adapter->generation == BE_GEN3) {
410                 if (lancer_chip(adapter))
411                         populate_lancer_stats(adapter);
412                  else
413                         populate_be3_stats(adapter);
414         } else {
415                 populate_be2_stats(adapter);
416         }
417
418         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
419         for_all_rx_queues(adapter, rxo, i) {
420                 /* below erx HW counter can actually wrap around after
421                  * 65535. Driver accumulates a 32-bit value
422                  */
423                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
424                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
425         }
426 }
427
428 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
429                                         struct rtnl_link_stats64 *stats)
430 {
431         struct be_adapter *adapter = netdev_priv(netdev);
432         struct be_drv_stats *drvs = &adapter->drv_stats;
433         struct be_rx_obj *rxo;
434         struct be_tx_obj *txo;
435         u64 pkts, bytes;
436         unsigned int start;
437         int i;
438
439         for_all_rx_queues(adapter, rxo, i) {
440                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
441                 do {
442                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
443                         pkts = rx_stats(rxo)->rx_pkts;
444                         bytes = rx_stats(rxo)->rx_bytes;
445                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
446                 stats->rx_packets += pkts;
447                 stats->rx_bytes += bytes;
448                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
449                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
450                                         rx_stats(rxo)->rx_drops_no_frags;
451         }
452
453         for_all_tx_queues(adapter, txo, i) {
454                 const struct be_tx_stats *tx_stats = tx_stats(txo);
455                 do {
456                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
457                         pkts = tx_stats(txo)->tx_pkts;
458                         bytes = tx_stats(txo)->tx_bytes;
459                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
460                 stats->tx_packets += pkts;
461                 stats->tx_bytes += bytes;
462         }
463
464         /* bad pkts received */
465         stats->rx_errors = drvs->rx_crc_errors +
466                 drvs->rx_alignment_symbol_errors +
467                 drvs->rx_in_range_errors +
468                 drvs->rx_out_range_errors +
469                 drvs->rx_frame_too_long +
470                 drvs->rx_dropped_too_small +
471                 drvs->rx_dropped_too_short +
472                 drvs->rx_dropped_header_too_small +
473                 drvs->rx_dropped_tcp_length +
474                 drvs->rx_dropped_runt;
475
476         /* detailed rx errors */
477         stats->rx_length_errors = drvs->rx_in_range_errors +
478                 drvs->rx_out_range_errors +
479                 drvs->rx_frame_too_long;
480
481         stats->rx_crc_errors = drvs->rx_crc_errors;
482
483         /* frame alignment errors */
484         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485
486         /* receiver fifo overrun */
487         /* drops_no_pbuf is no per i/f, it's per BE card */
488         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489                                 drvs->rx_input_fifo_overflow_drop +
490                                 drvs->rx_drops_no_pbuf;
491         return stats;
492 }
493
494 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
495 {
496         struct net_device *netdev = adapter->netdev;
497
498         /* when link status changes, link speed must be re-queried from card */
499         adapter->link_speed = -1;
500         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
501                 netif_carrier_on(netdev);
502                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
503         } else {
504                 netif_carrier_off(netdev);
505                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
506         }
507 }
508
509 static void be_tx_stats_update(struct be_tx_obj *txo,
510                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
511 {
512         struct be_tx_stats *stats = tx_stats(txo);
513
514         u64_stats_update_begin(&stats->sync);
515         stats->tx_reqs++;
516         stats->tx_wrbs += wrb_cnt;
517         stats->tx_bytes += copied;
518         stats->tx_pkts += (gso_segs ? gso_segs : 1);
519         if (stopped)
520                 stats->tx_stops++;
521         u64_stats_update_end(&stats->sync);
522 }
523
524 /* Determine number of WRB entries needed to xmit data in an skb */
525 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
526                                                                 bool *dummy)
527 {
528         int cnt = (skb->len > skb->data_len);
529
530         cnt += skb_shinfo(skb)->nr_frags;
531
532         /* to account for hdr wrb */
533         cnt++;
534         if (lancer_chip(adapter) || !(cnt & 1)) {
535                 *dummy = false;
536         } else {
537                 /* add a dummy to make it an even num */
538                 cnt++;
539                 *dummy = true;
540         }
541         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
542         return cnt;
543 }
544
545 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
546 {
547         wrb->frag_pa_hi = upper_32_bits(addr);
548         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
549         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
550 }
551
552 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
553                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
554 {
555         u8 vlan_prio = 0;
556         u16 vlan_tag = 0;
557
558         memset(hdr, 0, sizeof(*hdr));
559
560         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
561
562         if (skb_is_gso(skb)) {
563                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
564                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
565                         hdr, skb_shinfo(skb)->gso_size);
566                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
567                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
568                 if (lancer_chip(adapter) && adapter->sli_family  ==
569                                                         LANCER_A0_SLI_FAMILY) {
570                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
571                         if (is_tcp_pkt(skb))
572                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573                                                                 tcpcs, hdr, 1);
574                         else if (is_udp_pkt(skb))
575                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576                                                                 udpcs, hdr, 1);
577                 }
578         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
579                 if (is_tcp_pkt(skb))
580                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
581                 else if (is_udp_pkt(skb))
582                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
583         }
584
585         if (vlan_tx_tag_present(skb)) {
586                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
587                 vlan_tag = vlan_tx_tag_get(skb);
588                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
589                 /* If vlan priority provided by OS is NOT in available bmap */
590                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
591                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
592                                         adapter->recommended_prio;
593                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
594         }
595
596         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
597         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
598         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
599         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
600 }
601
602 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
603                 bool unmap_single)
604 {
605         dma_addr_t dma;
606
607         be_dws_le_to_cpu(wrb, sizeof(*wrb));
608
609         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
610         if (wrb->frag_len) {
611                 if (unmap_single)
612                         dma_unmap_single(dev, dma, wrb->frag_len,
613                                          DMA_TO_DEVICE);
614                 else
615                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
616         }
617 }
618
619 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
620                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
621 {
622         dma_addr_t busaddr;
623         int i, copied = 0;
624         struct device *dev = &adapter->pdev->dev;
625         struct sk_buff *first_skb = skb;
626         struct be_eth_wrb *wrb;
627         struct be_eth_hdr_wrb *hdr;
628         bool map_single = false;
629         u16 map_head;
630
631         hdr = queue_head_node(txq);
632         queue_head_inc(txq);
633         map_head = txq->head;
634
635         if (skb->len > skb->data_len) {
636                 int len = skb_headlen(skb);
637                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
638                 if (dma_mapping_error(dev, busaddr))
639                         goto dma_err;
640                 map_single = true;
641                 wrb = queue_head_node(txq);
642                 wrb_fill(wrb, busaddr, len);
643                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
644                 queue_head_inc(txq);
645                 copied += len;
646         }
647
648         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
649                 const struct skb_frag_struct *frag =
650                         &skb_shinfo(skb)->frags[i];
651                 busaddr = skb_frag_dma_map(dev, frag, 0,
652                                            skb_frag_size(frag), DMA_TO_DEVICE);
653                 if (dma_mapping_error(dev, busaddr))
654                         goto dma_err;
655                 wrb = queue_head_node(txq);
656                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
657                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658                 queue_head_inc(txq);
659                 copied += skb_frag_size(frag);
660         }
661
662         if (dummy_wrb) {
663                 wrb = queue_head_node(txq);
664                 wrb_fill(wrb, 0, 0);
665                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
666                 queue_head_inc(txq);
667         }
668
669         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
670         be_dws_cpu_to_le(hdr, sizeof(*hdr));
671
672         return copied;
673 dma_err:
674         txq->head = map_head;
675         while (copied) {
676                 wrb = queue_head_node(txq);
677                 unmap_tx_frag(dev, wrb, map_single);
678                 map_single = false;
679                 copied -= wrb->frag_len;
680                 queue_head_inc(txq);
681         }
682         return 0;
683 }
684
685 static netdev_tx_t be_xmit(struct sk_buff *skb,
686                         struct net_device *netdev)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
690         struct be_queue_info *txq = &txo->q;
691         u32 wrb_cnt = 0, copied = 0;
692         u32 start = txq->head;
693         bool dummy_wrb, stopped = false;
694
695         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
696
697         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
698         if (copied) {
699                 int gso_segs = skb_shinfo(skb)->gso_segs;
700
701                 /* record the sent skb in the sent_skb table */
702                 BUG_ON(txo->sent_skb_list[start]);
703                 txo->sent_skb_list[start] = skb;
704
705                 /* Ensure txq has space for the next skb; Else stop the queue
706                  * *BEFORE* ringing the tx doorbell, so that we serialze the
707                  * tx compls of the current transmit which'll wake up the queue
708                  */
709                 atomic_add(wrb_cnt, &txq->used);
710                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
711                                                                 txq->len) {
712                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
713                         stopped = true;
714                 }
715
716                 be_txq_notify(adapter, txq->id, wrb_cnt);
717
718                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
719         } else {
720                 txq->head = start;
721                 dev_kfree_skb_any(skb);
722         }
723         return NETDEV_TX_OK;
724 }
725
726 static int be_change_mtu(struct net_device *netdev, int new_mtu)
727 {
728         struct be_adapter *adapter = netdev_priv(netdev);
729         if (new_mtu < BE_MIN_MTU ||
730                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731                                         (ETH_HLEN + ETH_FCS_LEN))) {
732                 dev_info(&adapter->pdev->dev,
733                         "MTU must be between %d and %d bytes\n",
734                         BE_MIN_MTU,
735                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
736                 return -EINVAL;
737         }
738         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739                         netdev->mtu, new_mtu);
740         netdev->mtu = new_mtu;
741         return 0;
742 }
743
744 /*
745  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746  * If the user configures more, place BE in vlan promiscuous mode.
747  */
748 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
749 {
750         u16 vtag[BE_NUM_VLANS_SUPPORTED];
751         u16 ntags = 0, i;
752         int status = 0;
753         u32 if_handle;
754
755         if (vf) {
756                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759         }
760
761         /* No need to further configure vids if in promiscuous mode */
762         if (adapter->promiscuous)
763                 return 0;
764
765         if (adapter->vlans_added <= adapter->max_vlans)  {
766                 /* Construct VLAN Table to give to HW */
767                 for (i = 0; i < VLAN_N_VID; i++) {
768                         if (adapter->vlan_tag[i]) {
769                                 vtag[ntags] = cpu_to_le16(i);
770                                 ntags++;
771                         }
772                 }
773                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774                                         vtag, ntags, 1, 0);
775         } else {
776                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777                                         NULL, 0, 1, 1);
778         }
779
780         return status;
781 }
782
783 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784 {
785         struct be_adapter *adapter = netdev_priv(netdev);
786
787         adapter->vlans_added++;
788         if (!be_physfn(adapter))
789                 return;
790
791         adapter->vlan_tag[vid] = 1;
792         if (adapter->vlans_added <= (adapter->max_vlans + 1))
793                 be_vid_config(adapter, false, 0);
794 }
795
796 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799
800         adapter->vlans_added--;
801
802         if (!be_physfn(adapter))
803                 return;
804
805         adapter->vlan_tag[vid] = 0;
806         if (adapter->vlans_added <= adapter->max_vlans)
807                 be_vid_config(adapter, false, 0);
808 }
809
810 static void be_set_rx_mode(struct net_device *netdev)
811 {
812         struct be_adapter *adapter = netdev_priv(netdev);
813
814         if (netdev->flags & IFF_PROMISC) {
815                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
816                 adapter->promiscuous = true;
817                 goto done;
818         }
819
820         /* BE was previously in promiscuous mode; disable it */
821         if (adapter->promiscuous) {
822                 adapter->promiscuous = false;
823                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
824
825                 if (adapter->vlans_added)
826                         be_vid_config(adapter, false, 0);
827         }
828
829         /* Enable multicast promisc if num configured exceeds what we support */
830         if (netdev->flags & IFF_ALLMULTI ||
831                         netdev_mc_count(netdev) > BE_MAX_MC) {
832                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
833                 goto done;
834         }
835
836         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
837 done:
838         return;
839 }
840
841 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842 {
843         struct be_adapter *adapter = netdev_priv(netdev);
844         int status;
845
846         if (!adapter->sriov_enabled)
847                 return -EPERM;
848
849         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850                 return -EINVAL;
851
852         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
853                 status = be_cmd_pmac_del(adapter,
854                                         adapter->vf_cfg[vf].vf_if_handle,
855                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
856
857         status = be_cmd_pmac_add(adapter, mac,
858                                 adapter->vf_cfg[vf].vf_if_handle,
859                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
860
861         if (status)
862                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
863                                 mac, vf);
864         else
865                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
866
867         return status;
868 }
869
870 static int be_get_vf_config(struct net_device *netdev, int vf,
871                         struct ifla_vf_info *vi)
872 {
873         struct be_adapter *adapter = netdev_priv(netdev);
874
875         if (!adapter->sriov_enabled)
876                 return -EPERM;
877
878         if (vf >= num_vfs)
879                 return -EINVAL;
880
881         vi->vf = vf;
882         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
883         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
884         vi->qos = 0;
885         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
886
887         return 0;
888 }
889
890 static int be_set_vf_vlan(struct net_device *netdev,
891                         int vf, u16 vlan, u8 qos)
892 {
893         struct be_adapter *adapter = netdev_priv(netdev);
894         int status = 0;
895
896         if (!adapter->sriov_enabled)
897                 return -EPERM;
898
899         if ((vf >= num_vfs) || (vlan > 4095))
900                 return -EINVAL;
901
902         if (vlan) {
903                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
904                 adapter->vlans_added++;
905         } else {
906                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
907                 adapter->vlans_added--;
908         }
909
910         status = be_vid_config(adapter, true, vf);
911
912         if (status)
913                 dev_info(&adapter->pdev->dev,
914                                 "VLAN %d config on VF %d failed\n", vlan, vf);
915         return status;
916 }
917
918 static int be_set_vf_tx_rate(struct net_device *netdev,
919                         int vf, int rate)
920 {
921         struct be_adapter *adapter = netdev_priv(netdev);
922         int status = 0;
923
924         if (!adapter->sriov_enabled)
925                 return -EPERM;
926
927         if ((vf >= num_vfs) || (rate < 0))
928                 return -EINVAL;
929
930         if (rate > 10000)
931                 rate = 10000;
932
933         adapter->vf_cfg[vf].vf_tx_rate = rate;
934         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
935
936         if (status)
937                 dev_info(&adapter->pdev->dev,
938                                 "tx rate %d on VF %d failed\n", rate, vf);
939         return status;
940 }
941
942 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
943 {
944         struct be_eq_obj *rx_eq = &rxo->rx_eq;
945         struct be_rx_stats *stats = rx_stats(rxo);
946         ulong now = jiffies;
947         ulong delta = now - stats->rx_jiffies;
948         u64 pkts;
949         unsigned int start, eqd;
950
951         if (!rx_eq->enable_aic)
952                 return;
953
954         /* Wrapped around */
955         if (time_before(now, stats->rx_jiffies)) {
956                 stats->rx_jiffies = now;
957                 return;
958         }
959
960         /* Update once a second */
961         if (delta < HZ)
962                 return;
963
964         do {
965                 start = u64_stats_fetch_begin_bh(&stats->sync);
966                 pkts = stats->rx_pkts;
967         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
968
969         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
970         stats->rx_pkts_prev = pkts;
971         stats->rx_jiffies = now;
972         eqd = stats->rx_pps / 110000;
973         eqd = eqd << 3;
974         if (eqd > rx_eq->max_eqd)
975                 eqd = rx_eq->max_eqd;
976         if (eqd < rx_eq->min_eqd)
977                 eqd = rx_eq->min_eqd;
978         if (eqd < 10)
979                 eqd = 0;
980         if (eqd != rx_eq->cur_eqd) {
981                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
982                 rx_eq->cur_eqd = eqd;
983         }
984 }
985
986 static void be_rx_stats_update(struct be_rx_obj *rxo,
987                 struct be_rx_compl_info *rxcp)
988 {
989         struct be_rx_stats *stats = rx_stats(rxo);
990
991         u64_stats_update_begin(&stats->sync);
992         stats->rx_compl++;
993         stats->rx_bytes += rxcp->pkt_size;
994         stats->rx_pkts++;
995         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
996                 stats->rx_mcast_pkts++;
997         if (rxcp->err)
998                 stats->rx_compl_err++;
999         u64_stats_update_end(&stats->sync);
1000 }
1001
1002 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1003 {
1004         /* L4 checksum is not reliable for non TCP/UDP packets.
1005          * Also ignore ipcksm for ipv6 pkts */
1006         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1007                                 (rxcp->ip_csum || rxcp->ipv6);
1008 }
1009
1010 static struct be_rx_page_info *
1011 get_rx_page_info(struct be_adapter *adapter,
1012                 struct be_rx_obj *rxo,
1013                 u16 frag_idx)
1014 {
1015         struct be_rx_page_info *rx_page_info;
1016         struct be_queue_info *rxq = &rxo->q;
1017
1018         rx_page_info = &rxo->page_info_tbl[frag_idx];
1019         BUG_ON(!rx_page_info->page);
1020
1021         if (rx_page_info->last_page_user) {
1022                 dma_unmap_page(&adapter->pdev->dev,
1023                                dma_unmap_addr(rx_page_info, bus),
1024                                adapter->big_page_size, DMA_FROM_DEVICE);
1025                 rx_page_info->last_page_user = false;
1026         }
1027
1028         atomic_dec(&rxq->used);
1029         return rx_page_info;
1030 }
1031
1032 /* Throwaway the data in the Rx completion */
1033 static void be_rx_compl_discard(struct be_adapter *adapter,
1034                 struct be_rx_obj *rxo,
1035                 struct be_rx_compl_info *rxcp)
1036 {
1037         struct be_queue_info *rxq = &rxo->q;
1038         struct be_rx_page_info *page_info;
1039         u16 i, num_rcvd = rxcp->num_rcvd;
1040
1041         for (i = 0; i < num_rcvd; i++) {
1042                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1043                 put_page(page_info->page);
1044                 memset(page_info, 0, sizeof(*page_info));
1045                 index_inc(&rxcp->rxq_idx, rxq->len);
1046         }
1047 }
1048
1049 /*
1050  * skb_fill_rx_data forms a complete skb for an ether frame
1051  * indicated by rxcp.
1052  */
1053 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1054                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1055 {
1056         struct be_queue_info *rxq = &rxo->q;
1057         struct be_rx_page_info *page_info;
1058         u16 i, j;
1059         u16 hdr_len, curr_frag_len, remaining;
1060         u8 *start;
1061
1062         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1063         start = page_address(page_info->page) + page_info->page_offset;
1064         prefetch(start);
1065
1066         /* Copy data in the first descriptor of this completion */
1067         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1068
1069         /* Copy the header portion into skb_data */
1070         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1071         memcpy(skb->data, start, hdr_len);
1072         skb->len = curr_frag_len;
1073         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1074                 /* Complete packet has now been moved to data */
1075                 put_page(page_info->page);
1076                 skb->data_len = 0;
1077                 skb->tail += curr_frag_len;
1078         } else {
1079                 skb_shinfo(skb)->nr_frags = 1;
1080                 skb_frag_set_page(skb, 0, page_info->page);
1081                 skb_shinfo(skb)->frags[0].page_offset =
1082                                         page_info->page_offset + hdr_len;
1083                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1084                 skb->data_len = curr_frag_len - hdr_len;
1085                 skb->truesize += rx_frag_size;
1086                 skb->tail += hdr_len;
1087         }
1088         page_info->page = NULL;
1089
1090         if (rxcp->pkt_size <= rx_frag_size) {
1091                 BUG_ON(rxcp->num_rcvd != 1);
1092                 return;
1093         }
1094
1095         /* More frags present for this completion */
1096         index_inc(&rxcp->rxq_idx, rxq->len);
1097         remaining = rxcp->pkt_size - curr_frag_len;
1098         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1099                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1100                 curr_frag_len = min(remaining, rx_frag_size);
1101
1102                 /* Coalesce all frags from the same physical page in one slot */
1103                 if (page_info->page_offset == 0) {
1104                         /* Fresh page */
1105                         j++;
1106                         skb_frag_set_page(skb, j, page_info->page);
1107                         skb_shinfo(skb)->frags[j].page_offset =
1108                                                         page_info->page_offset;
1109                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1110                         skb_shinfo(skb)->nr_frags++;
1111                 } else {
1112                         put_page(page_info->page);
1113                 }
1114
1115                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1116                 skb->len += curr_frag_len;
1117                 skb->data_len += curr_frag_len;
1118                 skb->truesize += rx_frag_size;
1119                 remaining -= curr_frag_len;
1120                 index_inc(&rxcp->rxq_idx, rxq->len);
1121                 page_info->page = NULL;
1122         }
1123         BUG_ON(j > MAX_SKB_FRAGS);
1124 }
1125
1126 /* Process the RX completion indicated by rxcp when GRO is disabled */
1127 static void be_rx_compl_process(struct be_adapter *adapter,
1128                         struct be_rx_obj *rxo,
1129                         struct be_rx_compl_info *rxcp)
1130 {
1131         struct net_device *netdev = adapter->netdev;
1132         struct sk_buff *skb;
1133
1134         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1135         if (unlikely(!skb)) {
1136                 rx_stats(rxo)->rx_drops_no_skbs++;
1137                 be_rx_compl_discard(adapter, rxo, rxcp);
1138                 return;
1139         }
1140
1141         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1142
1143         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1144                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1145         else
1146                 skb_checksum_none_assert(skb);
1147
1148         skb->protocol = eth_type_trans(skb, netdev);
1149         if (adapter->netdev->features & NETIF_F_RXHASH)
1150                 skb->rxhash = rxcp->rss_hash;
1151
1152
1153         if (rxcp->vlanf)
1154                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1155
1156         netif_receive_skb(skb);
1157 }
1158
1159 /* Process the RX completion indicated by rxcp when GRO is enabled */
1160 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1161                 struct be_rx_obj *rxo,
1162                 struct be_rx_compl_info *rxcp)
1163 {
1164         struct be_rx_page_info *page_info;
1165         struct sk_buff *skb = NULL;
1166         struct be_queue_info *rxq = &rxo->q;
1167         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1168         u16 remaining, curr_frag_len;
1169         u16 i, j;
1170
1171         skb = napi_get_frags(&eq_obj->napi);
1172         if (!skb) {
1173                 be_rx_compl_discard(adapter, rxo, rxcp);
1174                 return;
1175         }
1176
1177         remaining = rxcp->pkt_size;
1178         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1179                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1180
1181                 curr_frag_len = min(remaining, rx_frag_size);
1182
1183                 /* Coalesce all frags from the same physical page in one slot */
1184                 if (i == 0 || page_info->page_offset == 0) {
1185                         /* First frag or Fresh page */
1186                         j++;
1187                         skb_frag_set_page(skb, j, page_info->page);
1188                         skb_shinfo(skb)->frags[j].page_offset =
1189                                                         page_info->page_offset;
1190                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1191                 } else {
1192                         put_page(page_info->page);
1193                 }
1194                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1195                 skb->truesize += rx_frag_size;
1196                 remaining -= curr_frag_len;
1197                 index_inc(&rxcp->rxq_idx, rxq->len);
1198                 memset(page_info, 0, sizeof(*page_info));
1199         }
1200         BUG_ON(j > MAX_SKB_FRAGS);
1201
1202         skb_shinfo(skb)->nr_frags = j + 1;
1203         skb->len = rxcp->pkt_size;
1204         skb->data_len = rxcp->pkt_size;
1205         skb->ip_summed = CHECKSUM_UNNECESSARY;
1206         if (adapter->netdev->features & NETIF_F_RXHASH)
1207                 skb->rxhash = rxcp->rss_hash;
1208
1209         if (rxcp->vlanf)
1210                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1211
1212         napi_gro_frags(&eq_obj->napi);
1213 }
1214
1215 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1216                                 struct be_eth_rx_compl *compl,
1217                                 struct be_rx_compl_info *rxcp)
1218 {
1219         rxcp->pkt_size =
1220                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1221         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1222         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1223         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1224         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1225         rxcp->ip_csum =
1226                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1227         rxcp->l4_csum =
1228                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1229         rxcp->ipv6 =
1230                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1231         rxcp->rxq_idx =
1232                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1233         rxcp->num_rcvd =
1234                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1235         rxcp->pkt_type =
1236                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1237         rxcp->rss_hash =
1238                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1239         if (rxcp->vlanf) {
1240                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1241                                           compl);
1242                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1243                                                compl);
1244         }
1245         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1246 }
1247
1248 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1249                                 struct be_eth_rx_compl *compl,
1250                                 struct be_rx_compl_info *rxcp)
1251 {
1252         rxcp->pkt_size =
1253                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1254         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1255         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1256         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1257         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1258         rxcp->ip_csum =
1259                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1260         rxcp->l4_csum =
1261                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1262         rxcp->ipv6 =
1263                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1264         rxcp->rxq_idx =
1265                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1266         rxcp->num_rcvd =
1267                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1268         rxcp->pkt_type =
1269                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1270         rxcp->rss_hash =
1271                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1272         if (rxcp->vlanf) {
1273                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1274                                           compl);
1275                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1276                                                compl);
1277         }
1278         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1279 }
1280
1281 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1282 {
1283         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1284         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1285         struct be_adapter *adapter = rxo->adapter;
1286
1287         /* For checking the valid bit it is Ok to use either definition as the
1288          * valid bit is at the same position in both v0 and v1 Rx compl */
1289         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1290                 return NULL;
1291
1292         rmb();
1293         be_dws_le_to_cpu(compl, sizeof(*compl));
1294
1295         if (adapter->be3_native)
1296                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1297         else
1298                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1299
1300         if (rxcp->vlanf) {
1301                 /* vlanf could be wrongly set in some cards.
1302                  * ignore if vtm is not set */
1303                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1304                         rxcp->vlanf = 0;
1305
1306                 if (!lancer_chip(adapter))
1307                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1308
1309                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1310                     !adapter->vlan_tag[rxcp->vlan_tag])
1311                         rxcp->vlanf = 0;
1312         }
1313
1314         /* As the compl has been parsed, reset it; we wont touch it again */
1315         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1316
1317         queue_tail_inc(&rxo->cq);
1318         return rxcp;
1319 }
1320
1321 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1322 {
1323         u32 order = get_order(size);
1324
1325         if (order > 0)
1326                 gfp |= __GFP_COMP;
1327         return  alloc_pages(gfp, order);
1328 }
1329
1330 /*
1331  * Allocate a page, split it to fragments of size rx_frag_size and post as
1332  * receive buffers to BE
1333  */
1334 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1335 {
1336         struct be_adapter *adapter = rxo->adapter;
1337         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1338         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1339         struct be_queue_info *rxq = &rxo->q;
1340         struct page *pagep = NULL;
1341         struct be_eth_rx_d *rxd;
1342         u64 page_dmaaddr = 0, frag_dmaaddr;
1343         u32 posted, page_offset = 0;
1344
1345         page_info = &rxo->page_info_tbl[rxq->head];
1346         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1347                 if (!pagep) {
1348                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1349                         if (unlikely(!pagep)) {
1350                                 rx_stats(rxo)->rx_post_fail++;
1351                                 break;
1352                         }
1353                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1354                                                     0, adapter->big_page_size,
1355                                                     DMA_FROM_DEVICE);
1356                         page_info->page_offset = 0;
1357                 } else {
1358                         get_page(pagep);
1359                         page_info->page_offset = page_offset + rx_frag_size;
1360                 }
1361                 page_offset = page_info->page_offset;
1362                 page_info->page = pagep;
1363                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1364                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1365
1366                 rxd = queue_head_node(rxq);
1367                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1368                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1369
1370                 /* Any space left in the current big page for another frag? */
1371                 if ((page_offset + rx_frag_size + rx_frag_size) >
1372                                         adapter->big_page_size) {
1373                         pagep = NULL;
1374                         page_info->last_page_user = true;
1375                 }
1376
1377                 prev_page_info = page_info;
1378                 queue_head_inc(rxq);
1379                 page_info = &page_info_tbl[rxq->head];
1380         }
1381         if (pagep)
1382                 prev_page_info->last_page_user = true;
1383
1384         if (posted) {
1385                 atomic_add(posted, &rxq->used);
1386                 be_rxq_notify(adapter, rxq->id, posted);
1387         } else if (atomic_read(&rxq->used) == 0) {
1388                 /* Let be_worker replenish when memory is available */
1389                 rxo->rx_post_starved = true;
1390         }
1391 }
1392
1393 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1394 {
1395         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1396
1397         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1398                 return NULL;
1399
1400         rmb();
1401         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1402
1403         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1404
1405         queue_tail_inc(tx_cq);
1406         return txcp;
1407 }
1408
1409 static u16 be_tx_compl_process(struct be_adapter *adapter,
1410                 struct be_tx_obj *txo, u16 last_index)
1411 {
1412         struct be_queue_info *txq = &txo->q;
1413         struct be_eth_wrb *wrb;
1414         struct sk_buff **sent_skbs = txo->sent_skb_list;
1415         struct sk_buff *sent_skb;
1416         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1417         bool unmap_skb_hdr = true;
1418
1419         sent_skb = sent_skbs[txq->tail];
1420         BUG_ON(!sent_skb);
1421         sent_skbs[txq->tail] = NULL;
1422
1423         /* skip header wrb */
1424         queue_tail_inc(txq);
1425
1426         do {
1427                 cur_index = txq->tail;
1428                 wrb = queue_tail_node(txq);
1429                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1430                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1431                 unmap_skb_hdr = false;
1432
1433                 num_wrbs++;
1434                 queue_tail_inc(txq);
1435         } while (cur_index != last_index);
1436
1437         dev_kfree_skb_any(sent_skb);
1438         return num_wrbs;
1439 }
1440
1441 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1442 {
1443         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1444
1445         if (!eqe->evt)
1446                 return NULL;
1447
1448         rmb();
1449         eqe->evt = le32_to_cpu(eqe->evt);
1450         queue_tail_inc(&eq_obj->q);
1451         return eqe;
1452 }
1453
1454 static int event_handle(struct be_adapter *adapter,
1455                         struct be_eq_obj *eq_obj,
1456                         bool rearm)
1457 {
1458         struct be_eq_entry *eqe;
1459         u16 num = 0;
1460
1461         while ((eqe = event_get(eq_obj)) != NULL) {
1462                 eqe->evt = 0;
1463                 num++;
1464         }
1465
1466         /* Deal with any spurious interrupts that come
1467          * without events
1468          */
1469         if (!num)
1470                 rearm = true;
1471
1472         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1473         if (num)
1474                 napi_schedule(&eq_obj->napi);
1475
1476         return num;
1477 }
1478
1479 /* Just read and notify events without processing them.
1480  * Used at the time of destroying event queues */
1481 static void be_eq_clean(struct be_adapter *adapter,
1482                         struct be_eq_obj *eq_obj)
1483 {
1484         struct be_eq_entry *eqe;
1485         u16 num = 0;
1486
1487         while ((eqe = event_get(eq_obj)) != NULL) {
1488                 eqe->evt = 0;
1489                 num++;
1490         }
1491
1492         if (num)
1493                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1494 }
1495
1496 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1497 {
1498         struct be_rx_page_info *page_info;
1499         struct be_queue_info *rxq = &rxo->q;
1500         struct be_queue_info *rx_cq = &rxo->cq;
1501         struct be_rx_compl_info *rxcp;
1502         u16 tail;
1503
1504         /* First cleanup pending rx completions */
1505         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1506                 be_rx_compl_discard(adapter, rxo, rxcp);
1507                 be_cq_notify(adapter, rx_cq->id, false, 1);
1508         }
1509
1510         /* Then free posted rx buffer that were not used */
1511         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1512         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1513                 page_info = get_rx_page_info(adapter, rxo, tail);
1514                 put_page(page_info->page);
1515                 memset(page_info, 0, sizeof(*page_info));
1516         }
1517         BUG_ON(atomic_read(&rxq->used));
1518         rxq->tail = rxq->head = 0;
1519 }
1520
1521 static void be_tx_compl_clean(struct be_adapter *adapter,
1522                                 struct be_tx_obj *txo)
1523 {
1524         struct be_queue_info *tx_cq = &txo->cq;
1525         struct be_queue_info *txq = &txo->q;
1526         struct be_eth_tx_compl *txcp;
1527         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1528         struct sk_buff **sent_skbs = txo->sent_skb_list;
1529         struct sk_buff *sent_skb;
1530         bool dummy_wrb;
1531
1532         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1533         do {
1534                 while ((txcp = be_tx_compl_get(tx_cq))) {
1535                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1536                                         wrb_index, txcp);
1537                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1538                         cmpl++;
1539                 }
1540                 if (cmpl) {
1541                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1542                         atomic_sub(num_wrbs, &txq->used);
1543                         cmpl = 0;
1544                         num_wrbs = 0;
1545                 }
1546
1547                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1548                         break;
1549
1550                 mdelay(1);
1551         } while (true);
1552
1553         if (atomic_read(&txq->used))
1554                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1555                         atomic_read(&txq->used));
1556
1557         /* free posted tx for which compls will never arrive */
1558         while (atomic_read(&txq->used)) {
1559                 sent_skb = sent_skbs[txq->tail];
1560                 end_idx = txq->tail;
1561                 index_adv(&end_idx,
1562                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1563                         txq->len);
1564                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1565                 atomic_sub(num_wrbs, &txq->used);
1566         }
1567 }
1568
1569 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1570 {
1571         struct be_queue_info *q;
1572
1573         q = &adapter->mcc_obj.q;
1574         if (q->created)
1575                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1576         be_queue_free(adapter, q);
1577
1578         q = &adapter->mcc_obj.cq;
1579         if (q->created)
1580                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1581         be_queue_free(adapter, q);
1582 }
1583
1584 /* Must be called only after TX qs are created as MCC shares TX EQ */
1585 static int be_mcc_queues_create(struct be_adapter *adapter)
1586 {
1587         struct be_queue_info *q, *cq;
1588
1589         /* Alloc MCC compl queue */
1590         cq = &adapter->mcc_obj.cq;
1591         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1592                         sizeof(struct be_mcc_compl)))
1593                 goto err;
1594
1595         /* Ask BE to create MCC compl queue; share TX's eq */
1596         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1597                 goto mcc_cq_free;
1598
1599         /* Alloc MCC queue */
1600         q = &adapter->mcc_obj.q;
1601         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1602                 goto mcc_cq_destroy;
1603
1604         /* Ask BE to create MCC queue */
1605         if (be_cmd_mccq_create(adapter, q, cq))
1606                 goto mcc_q_free;
1607
1608         return 0;
1609
1610 mcc_q_free:
1611         be_queue_free(adapter, q);
1612 mcc_cq_destroy:
1613         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1614 mcc_cq_free:
1615         be_queue_free(adapter, cq);
1616 err:
1617         return -1;
1618 }
1619
1620 static void be_tx_queues_destroy(struct be_adapter *adapter)
1621 {
1622         struct be_queue_info *q;
1623         struct be_tx_obj *txo;
1624         u8 i;
1625
1626         for_all_tx_queues(adapter, txo, i) {
1627                 q = &txo->q;
1628                 if (q->created)
1629                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1630                 be_queue_free(adapter, q);
1631
1632                 q = &txo->cq;
1633                 if (q->created)
1634                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1635                 be_queue_free(adapter, q);
1636         }
1637
1638         /* Clear any residual events */
1639         be_eq_clean(adapter, &adapter->tx_eq);
1640
1641         q = &adapter->tx_eq.q;
1642         if (q->created)
1643                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1644         be_queue_free(adapter, q);
1645 }
1646
1647 static int be_num_txqs_want(struct be_adapter *adapter)
1648 {
1649         if ((num_vfs && adapter->sriov_enabled) ||
1650                 be_is_mc(adapter) ||
1651                 lancer_chip(adapter) || !be_physfn(adapter) ||
1652                 adapter->generation == BE_GEN2)
1653                 return 1;
1654         else
1655                 return MAX_TX_QS;
1656 }
1657
1658 /* One TX event queue is shared by all TX compl qs */
1659 static int be_tx_queues_create(struct be_adapter *adapter)
1660 {
1661         struct be_queue_info *eq, *q, *cq;
1662         struct be_tx_obj *txo;
1663         u8 i;
1664
1665         adapter->num_tx_qs = be_num_txqs_want(adapter);
1666         if (adapter->num_tx_qs != MAX_TX_QS)
1667                 netif_set_real_num_tx_queues(adapter->netdev,
1668                         adapter->num_tx_qs);
1669
1670         adapter->tx_eq.max_eqd = 0;
1671         adapter->tx_eq.min_eqd = 0;
1672         adapter->tx_eq.cur_eqd = 96;
1673         adapter->tx_eq.enable_aic = false;
1674
1675         eq = &adapter->tx_eq.q;
1676         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1677                 sizeof(struct be_eq_entry)))
1678                 return -1;
1679
1680         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1681                 goto err;
1682         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1683
1684         for_all_tx_queues(adapter, txo, i) {
1685                 cq = &txo->cq;
1686                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1687                         sizeof(struct be_eth_tx_compl)))
1688                         goto err;
1689
1690                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1691                         goto err;
1692
1693                 q = &txo->q;
1694                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1695                         sizeof(struct be_eth_wrb)))
1696                         goto err;
1697
1698                 if (be_cmd_txq_create(adapter, q, cq))
1699                         goto err;
1700         }
1701         return 0;
1702
1703 err:
1704         be_tx_queues_destroy(adapter);
1705         return -1;
1706 }
1707
1708 static void be_rx_queues_destroy(struct be_adapter *adapter)
1709 {
1710         struct be_queue_info *q;
1711         struct be_rx_obj *rxo;
1712         int i;
1713
1714         for_all_rx_queues(adapter, rxo, i) {
1715                 be_queue_free(adapter, &rxo->q);
1716
1717                 q = &rxo->cq;
1718                 if (q->created)
1719                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1720                 be_queue_free(adapter, q);
1721
1722                 q = &rxo->rx_eq.q;
1723                 if (q->created)
1724                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1725                 be_queue_free(adapter, q);
1726         }
1727 }
1728
1729 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1730 {
1731         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1732                 !adapter->sriov_enabled && be_physfn(adapter) &&
1733                 !be_is_mc(adapter)) {
1734                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1735         } else {
1736                 dev_warn(&adapter->pdev->dev,
1737                         "No support for multiple RX queues\n");
1738                 return 1;
1739         }
1740 }
1741
1742 static int be_rx_queues_create(struct be_adapter *adapter)
1743 {
1744         struct be_queue_info *eq, *q, *cq;
1745         struct be_rx_obj *rxo;
1746         int rc, i;
1747
1748         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1749                                 msix_enabled(adapter) ?
1750                                         adapter->num_msix_vec - 1 : 1);
1751         if (adapter->num_rx_qs != MAX_RX_QS)
1752                 dev_warn(&adapter->pdev->dev,
1753                         "Can create only %d RX queues", adapter->num_rx_qs);
1754
1755         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1756         for_all_rx_queues(adapter, rxo, i) {
1757                 rxo->adapter = adapter;
1758                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1759                 rxo->rx_eq.enable_aic = true;
1760
1761                 /* EQ */
1762                 eq = &rxo->rx_eq.q;
1763                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1764                                         sizeof(struct be_eq_entry));
1765                 if (rc)
1766                         goto err;
1767
1768                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1769                 if (rc)
1770                         goto err;
1771
1772                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1773
1774                 /* CQ */
1775                 cq = &rxo->cq;
1776                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1777                                 sizeof(struct be_eth_rx_compl));
1778                 if (rc)
1779                         goto err;
1780
1781                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1782                 if (rc)
1783                         goto err;
1784
1785                 /* Rx Q - will be created in be_open() */
1786                 q = &rxo->q;
1787                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1788                                 sizeof(struct be_eth_rx_d));
1789                 if (rc)
1790                         goto err;
1791
1792         }
1793
1794         return 0;
1795 err:
1796         be_rx_queues_destroy(adapter);
1797         return -1;
1798 }
1799
1800 static bool event_peek(struct be_eq_obj *eq_obj)
1801 {
1802         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1803         if (!eqe->evt)
1804                 return false;
1805         else
1806                 return true;
1807 }
1808
1809 static irqreturn_t be_intx(int irq, void *dev)
1810 {
1811         struct be_adapter *adapter = dev;
1812         struct be_rx_obj *rxo;
1813         int isr, i, tx = 0 , rx = 0;
1814
1815         if (lancer_chip(adapter)) {
1816                 if (event_peek(&adapter->tx_eq))
1817                         tx = event_handle(adapter, &adapter->tx_eq, false);
1818                 for_all_rx_queues(adapter, rxo, i) {
1819                         if (event_peek(&rxo->rx_eq))
1820                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1821                 }
1822
1823                 if (!(tx || rx))
1824                         return IRQ_NONE;
1825
1826         } else {
1827                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1828                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1829                 if (!isr)
1830                         return IRQ_NONE;
1831
1832                 if ((1 << adapter->tx_eq.eq_idx & isr))
1833                         event_handle(adapter, &adapter->tx_eq, false);
1834
1835                 for_all_rx_queues(adapter, rxo, i) {
1836                         if ((1 << rxo->rx_eq.eq_idx & isr))
1837                                 event_handle(adapter, &rxo->rx_eq, true);
1838                 }
1839         }
1840
1841         return IRQ_HANDLED;
1842 }
1843
1844 static irqreturn_t be_msix_rx(int irq, void *dev)
1845 {
1846         struct be_rx_obj *rxo = dev;
1847         struct be_adapter *adapter = rxo->adapter;
1848
1849         event_handle(adapter, &rxo->rx_eq, true);
1850
1851         return IRQ_HANDLED;
1852 }
1853
1854 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1855 {
1856         struct be_adapter *adapter = dev;
1857
1858         event_handle(adapter, &adapter->tx_eq, false);
1859
1860         return IRQ_HANDLED;
1861 }
1862
1863 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1864 {
1865         return (rxcp->tcpf && !rxcp->err) ? true : false;
1866 }
1867
1868 static int be_poll_rx(struct napi_struct *napi, int budget)
1869 {
1870         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1871         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1872         struct be_adapter *adapter = rxo->adapter;
1873         struct be_queue_info *rx_cq = &rxo->cq;
1874         struct be_rx_compl_info *rxcp;
1875         u32 work_done;
1876
1877         rx_stats(rxo)->rx_polls++;
1878         for (work_done = 0; work_done < budget; work_done++) {
1879                 rxcp = be_rx_compl_get(rxo);
1880                 if (!rxcp)
1881                         break;
1882
1883                 /* Is it a flush compl that has no data */
1884                 if (unlikely(rxcp->num_rcvd == 0))
1885                         goto loop_continue;
1886
1887                 /* Discard compl with partial DMA Lancer B0 */
1888                 if (unlikely(!rxcp->pkt_size)) {
1889                         be_rx_compl_discard(adapter, rxo, rxcp);
1890                         goto loop_continue;
1891                 }
1892
1893                 /* On BE drop pkts that arrive due to imperfect filtering in
1894                  * promiscuous mode on some skews
1895                  */
1896                 if (unlikely(rxcp->port != adapter->port_num &&
1897                                 !lancer_chip(adapter))) {
1898                         be_rx_compl_discard(adapter, rxo, rxcp);
1899                         goto loop_continue;
1900                 }
1901
1902                 if (do_gro(rxcp))
1903                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1904                 else
1905                         be_rx_compl_process(adapter, rxo, rxcp);
1906 loop_continue:
1907                 be_rx_stats_update(rxo, rxcp);
1908         }
1909
1910         be_cq_notify(adapter, rx_cq->id, false, work_done);
1911
1912         /* Refill the queue */
1913         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1914                 be_post_rx_frags(rxo, GFP_ATOMIC);
1915
1916         /* All consumed */
1917         if (work_done < budget) {
1918                 napi_complete(napi);
1919                 /* Arm CQ */
1920                 be_cq_notify(adapter, rx_cq->id, true, 0);
1921         }
1922         return work_done;
1923 }
1924
1925 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1926  * For TX/MCC we don't honour budget; consume everything
1927  */
1928 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1929 {
1930         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1931         struct be_adapter *adapter =
1932                 container_of(tx_eq, struct be_adapter, tx_eq);
1933         struct be_tx_obj *txo;
1934         struct be_eth_tx_compl *txcp;
1935         int tx_compl, mcc_compl, status = 0;
1936         u8 i;
1937         u16 num_wrbs;
1938
1939         for_all_tx_queues(adapter, txo, i) {
1940                 tx_compl = 0;
1941                 num_wrbs = 0;
1942                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1943                         num_wrbs += be_tx_compl_process(adapter, txo,
1944                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1945                                         wrb_index, txcp));
1946                         tx_compl++;
1947                 }
1948                 if (tx_compl) {
1949                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1950
1951                         atomic_sub(num_wrbs, &txo->q.used);
1952
1953                         /* As Tx wrbs have been freed up, wake up netdev queue
1954                          * if it was stopped due to lack of tx wrbs.  */
1955                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1956                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1957                                 netif_wake_subqueue(adapter->netdev, i);
1958                         }
1959
1960                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1961                         tx_stats(txo)->tx_compl += tx_compl;
1962                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1963                 }
1964         }
1965
1966         mcc_compl = be_process_mcc(adapter, &status);
1967
1968         if (mcc_compl) {
1969                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1970                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1971         }
1972
1973         napi_complete(napi);
1974
1975         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1976         adapter->drv_stats.tx_events++;
1977         return 1;
1978 }
1979
1980 void be_detect_dump_ue(struct be_adapter *adapter)
1981 {
1982         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1983         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1984         u32 i;
1985
1986         if (lancer_chip(adapter)) {
1987                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1988                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1989                         sliport_err1 = ioread32(adapter->db +
1990                                         SLIPORT_ERROR1_OFFSET);
1991                         sliport_err2 = ioread32(adapter->db +
1992                                         SLIPORT_ERROR2_OFFSET);
1993                 }
1994         } else {
1995                 pci_read_config_dword(adapter->pdev,
1996                                 PCICFG_UE_STATUS_LOW, &ue_lo);
1997                 pci_read_config_dword(adapter->pdev,
1998                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
1999                 pci_read_config_dword(adapter->pdev,
2000                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2001                 pci_read_config_dword(adapter->pdev,
2002                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2003
2004                 ue_lo = (ue_lo & (~ue_lo_mask));
2005                 ue_hi = (ue_hi & (~ue_hi_mask));
2006         }
2007
2008         if (ue_lo || ue_hi ||
2009                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2010                 adapter->ue_detected = true;
2011                 adapter->eeh_err = true;
2012                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2013         }
2014
2015         if (ue_lo) {
2016                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2017                         if (ue_lo & 1)
2018                                 dev_err(&adapter->pdev->dev,
2019                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2020                 }
2021         }
2022         if (ue_hi) {
2023                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2024                         if (ue_hi & 1)
2025                                 dev_err(&adapter->pdev->dev,
2026                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2027                 }
2028         }
2029
2030         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2031                 dev_err(&adapter->pdev->dev,
2032                         "sliport status 0x%x\n", sliport_status);
2033                 dev_err(&adapter->pdev->dev,
2034                         "sliport error1 0x%x\n", sliport_err1);
2035                 dev_err(&adapter->pdev->dev,
2036                         "sliport error2 0x%x\n", sliport_err2);
2037         }
2038 }
2039
2040 static void be_worker(struct work_struct *work)
2041 {
2042         struct be_adapter *adapter =
2043                 container_of(work, struct be_adapter, work.work);
2044         struct be_rx_obj *rxo;
2045         int i;
2046
2047         if (!adapter->ue_detected)
2048                 be_detect_dump_ue(adapter);
2049
2050         /* when interrupts are not yet enabled, just reap any pending
2051         * mcc completions */
2052         if (!netif_running(adapter->netdev)) {
2053                 int mcc_compl, status = 0;
2054
2055                 mcc_compl = be_process_mcc(adapter, &status);
2056
2057                 if (mcc_compl) {
2058                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2059                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2060                 }
2061
2062                 goto reschedule;
2063         }
2064
2065         if (!adapter->stats_cmd_sent) {
2066                 if (lancer_chip(adapter))
2067                         lancer_cmd_get_pport_stats(adapter,
2068                                                 &adapter->stats_cmd);
2069                 else
2070                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2071         }
2072
2073         for_all_rx_queues(adapter, rxo, i) {
2074                 be_rx_eqd_update(adapter, rxo);
2075
2076                 if (rxo->rx_post_starved) {
2077                         rxo->rx_post_starved = false;
2078                         be_post_rx_frags(rxo, GFP_KERNEL);
2079                 }
2080         }
2081
2082 reschedule:
2083         adapter->work_counter++;
2084         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2085 }
2086
2087 static void be_msix_disable(struct be_adapter *adapter)
2088 {
2089         if (msix_enabled(adapter)) {
2090                 pci_disable_msix(adapter->pdev);
2091                 adapter->num_msix_vec = 0;
2092         }
2093 }
2094
2095 static void be_msix_enable(struct be_adapter *adapter)
2096 {
2097 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2098         int i, status, num_vec;
2099
2100         num_vec = be_num_rxqs_want(adapter) + 1;
2101
2102         for (i = 0; i < num_vec; i++)
2103                 adapter->msix_entries[i].entry = i;
2104
2105         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2106         if (status == 0) {
2107                 goto done;
2108         } else if (status >= BE_MIN_MSIX_VECTORS) {
2109                 num_vec = status;
2110                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2111                                 num_vec) == 0)
2112                         goto done;
2113         }
2114         return;
2115 done:
2116         adapter->num_msix_vec = num_vec;
2117         return;
2118 }
2119
2120 static int be_sriov_enable(struct be_adapter *adapter)
2121 {
2122         be_check_sriov_fn_type(adapter);
2123 #ifdef CONFIG_PCI_IOV
2124         if (be_physfn(adapter) && num_vfs) {
2125                 int status, pos;
2126                 u16 nvfs;
2127
2128                 pos = pci_find_ext_capability(adapter->pdev,
2129                                                 PCI_EXT_CAP_ID_SRIOV);
2130                 pci_read_config_word(adapter->pdev,
2131                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2132
2133                 if (num_vfs > nvfs) {
2134                         dev_info(&adapter->pdev->dev,
2135                                         "Device supports %d VFs and not %d\n",
2136                                         nvfs, num_vfs);
2137                         num_vfs = nvfs;
2138                 }
2139
2140                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2141                 adapter->sriov_enabled = status ? false : true;
2142
2143                 if (adapter->sriov_enabled) {
2144                         adapter->vf_cfg = kcalloc(num_vfs,
2145                                                 sizeof(struct be_vf_cfg),
2146                                                 GFP_KERNEL);
2147                         if (!adapter->vf_cfg)
2148                                 return -ENOMEM;
2149                 }
2150         }
2151 #endif
2152         return 0;
2153 }
2154
2155 static void be_sriov_disable(struct be_adapter *adapter)
2156 {
2157 #ifdef CONFIG_PCI_IOV
2158         if (adapter->sriov_enabled) {
2159                 pci_disable_sriov(adapter->pdev);
2160                 kfree(adapter->vf_cfg);
2161                 adapter->sriov_enabled = false;
2162         }
2163 #endif
2164 }
2165
2166 static inline int be_msix_vec_get(struct be_adapter *adapter,
2167                                         struct be_eq_obj *eq_obj)
2168 {
2169         return adapter->msix_entries[eq_obj->eq_idx].vector;
2170 }
2171
2172 static int be_request_irq(struct be_adapter *adapter,
2173                 struct be_eq_obj *eq_obj,
2174                 void *handler, char *desc, void *context)
2175 {
2176         struct net_device *netdev = adapter->netdev;
2177         int vec;
2178
2179         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2180         vec = be_msix_vec_get(adapter, eq_obj);
2181         return request_irq(vec, handler, 0, eq_obj->desc, context);
2182 }
2183
2184 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2185                         void *context)
2186 {
2187         int vec = be_msix_vec_get(adapter, eq_obj);
2188         free_irq(vec, context);
2189 }
2190
2191 static int be_msix_register(struct be_adapter *adapter)
2192 {
2193         struct be_rx_obj *rxo;
2194         int status, i;
2195         char qname[10];
2196
2197         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2198                                 adapter);
2199         if (status)
2200                 goto err;
2201
2202         for_all_rx_queues(adapter, rxo, i) {
2203                 sprintf(qname, "rxq%d", i);
2204                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2205                                 qname, rxo);
2206                 if (status)
2207                         goto err_msix;
2208         }
2209
2210         return 0;
2211
2212 err_msix:
2213         be_free_irq(adapter, &adapter->tx_eq, adapter);
2214
2215         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2216                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2217
2218 err:
2219         dev_warn(&adapter->pdev->dev,
2220                 "MSIX Request IRQ failed - err %d\n", status);
2221         be_msix_disable(adapter);
2222         return status;
2223 }
2224
2225 static int be_irq_register(struct be_adapter *adapter)
2226 {
2227         struct net_device *netdev = adapter->netdev;
2228         int status;
2229
2230         if (msix_enabled(adapter)) {
2231                 status = be_msix_register(adapter);
2232                 if (status == 0)
2233                         goto done;
2234                 /* INTx is not supported for VF */
2235                 if (!be_physfn(adapter))
2236                         return status;
2237         }
2238
2239         /* INTx */
2240         netdev->irq = adapter->pdev->irq;
2241         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2242                         adapter);
2243         if (status) {
2244                 dev_err(&adapter->pdev->dev,
2245                         "INTx request IRQ failed - err %d\n", status);
2246                 return status;
2247         }
2248 done:
2249         adapter->isr_registered = true;
2250         return 0;
2251 }
2252
2253 static void be_irq_unregister(struct be_adapter *adapter)
2254 {
2255         struct net_device *netdev = adapter->netdev;
2256         struct be_rx_obj *rxo;
2257         int i;
2258
2259         if (!adapter->isr_registered)
2260                 return;
2261
2262         /* INTx */
2263         if (!msix_enabled(adapter)) {
2264                 free_irq(netdev->irq, adapter);
2265                 goto done;
2266         }
2267
2268         /* MSIx */
2269         be_free_irq(adapter, &adapter->tx_eq, adapter);
2270
2271         for_all_rx_queues(adapter, rxo, i)
2272                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2273
2274 done:
2275         adapter->isr_registered = false;
2276 }
2277
2278 static void be_rx_queues_clear(struct be_adapter *adapter)
2279 {
2280         struct be_queue_info *q;
2281         struct be_rx_obj *rxo;
2282         int i;
2283
2284         for_all_rx_queues(adapter, rxo, i) {
2285                 q = &rxo->q;
2286                 if (q->created) {
2287                         be_cmd_rxq_destroy(adapter, q);
2288                         /* After the rxq is invalidated, wait for a grace time
2289                          * of 1ms for all dma to end and the flush compl to
2290                          * arrive
2291                          */
2292                         mdelay(1);
2293                         be_rx_q_clean(adapter, rxo);
2294                 }
2295
2296                 /* Clear any residual events */
2297                 q = &rxo->rx_eq.q;
2298                 if (q->created)
2299                         be_eq_clean(adapter, &rxo->rx_eq);
2300         }
2301 }
2302
2303 static int be_close(struct net_device *netdev)
2304 {
2305         struct be_adapter *adapter = netdev_priv(netdev);
2306         struct be_rx_obj *rxo;
2307         struct be_tx_obj *txo;
2308         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2309         int vec, i;
2310
2311         be_async_mcc_disable(adapter);
2312
2313         if (!lancer_chip(adapter))
2314                 be_intr_set(adapter, false);
2315
2316         for_all_rx_queues(adapter, rxo, i)
2317                 napi_disable(&rxo->rx_eq.napi);
2318
2319         napi_disable(&tx_eq->napi);
2320
2321         if (lancer_chip(adapter)) {
2322                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2323                 for_all_rx_queues(adapter, rxo, i)
2324                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2325                 for_all_tx_queues(adapter, txo, i)
2326                          be_cq_notify(adapter, txo->cq.id, false, 0);
2327         }
2328
2329         if (msix_enabled(adapter)) {
2330                 vec = be_msix_vec_get(adapter, tx_eq);
2331                 synchronize_irq(vec);
2332
2333                 for_all_rx_queues(adapter, rxo, i) {
2334                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2335                         synchronize_irq(vec);
2336                 }
2337         } else {
2338                 synchronize_irq(netdev->irq);
2339         }
2340         be_irq_unregister(adapter);
2341
2342         /* Wait for all pending tx completions to arrive so that
2343          * all tx skbs are freed.
2344          */
2345         for_all_tx_queues(adapter, txo, i)
2346                 be_tx_compl_clean(adapter, txo);
2347
2348         be_rx_queues_clear(adapter);
2349         return 0;
2350 }
2351
2352 static int be_rx_queues_setup(struct be_adapter *adapter)
2353 {
2354         struct be_rx_obj *rxo;
2355         int rc, i;
2356         u8 rsstable[MAX_RSS_QS];
2357
2358         for_all_rx_queues(adapter, rxo, i) {
2359                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2360                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2361                         adapter->if_handle,
2362                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2363                 if (rc)
2364                         return rc;
2365         }
2366
2367         if (be_multi_rxq(adapter)) {
2368                 for_all_rss_queues(adapter, rxo, i)
2369                         rsstable[i] = rxo->rss_id;
2370
2371                 rc = be_cmd_rss_config(adapter, rsstable,
2372                         adapter->num_rx_qs - 1);
2373                 if (rc)
2374                         return rc;
2375         }
2376
2377         /* First time posting */
2378         for_all_rx_queues(adapter, rxo, i) {
2379                 be_post_rx_frags(rxo, GFP_KERNEL);
2380                 napi_enable(&rxo->rx_eq.napi);
2381         }
2382         return 0;
2383 }
2384
2385 static int be_open(struct net_device *netdev)
2386 {
2387         struct be_adapter *adapter = netdev_priv(netdev);
2388         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2389         struct be_rx_obj *rxo;
2390         int status, i;
2391
2392         status = be_rx_queues_setup(adapter);
2393         if (status)
2394                 goto err;
2395
2396         napi_enable(&tx_eq->napi);
2397
2398         be_irq_register(adapter);
2399
2400         if (!lancer_chip(adapter))
2401                 be_intr_set(adapter, true);
2402
2403         /* The evt queues are created in unarmed state; arm them */
2404         for_all_rx_queues(adapter, rxo, i) {
2405                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2406                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2407         }
2408         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2409
2410         /* Now that interrupts are on we can process async mcc */
2411         be_async_mcc_enable(adapter);
2412
2413         return 0;
2414 err:
2415         be_close(adapter->netdev);
2416         return -EIO;
2417 }
2418
2419 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2420 {
2421         struct be_dma_mem cmd;
2422         int status = 0;
2423         u8 mac[ETH_ALEN];
2424
2425         memset(mac, 0, ETH_ALEN);
2426
2427         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2428         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2429                                     GFP_KERNEL);
2430         if (cmd.va == NULL)
2431                 return -1;
2432         memset(cmd.va, 0, cmd.size);
2433
2434         if (enable) {
2435                 status = pci_write_config_dword(adapter->pdev,
2436                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2437                 if (status) {
2438                         dev_err(&adapter->pdev->dev,
2439                                 "Could not enable Wake-on-lan\n");
2440                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2441                                           cmd.dma);
2442                         return status;
2443                 }
2444                 status = be_cmd_enable_magic_wol(adapter,
2445                                 adapter->netdev->dev_addr, &cmd);
2446                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2447                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2448         } else {
2449                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2450                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2451                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2452         }
2453
2454         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2455         return status;
2456 }
2457
2458 /*
2459  * Generate a seed MAC address from the PF MAC Address using jhash.
2460  * MAC Address for VFs are assigned incrementally starting from the seed.
2461  * These addresses are programmed in the ASIC by the PF and the VF driver
2462  * queries for the MAC address during its probe.
2463  */
2464 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2465 {
2466         u32 vf;
2467         int status = 0;
2468         u8 mac[ETH_ALEN];
2469
2470         be_vf_eth_addr_generate(adapter, mac);
2471
2472         for (vf = 0; vf < num_vfs; vf++) {
2473                 status = be_cmd_pmac_add(adapter, mac,
2474                                         adapter->vf_cfg[vf].vf_if_handle,
2475                                         &adapter->vf_cfg[vf].vf_pmac_id,
2476                                         vf + 1);
2477                 if (status)
2478                         dev_err(&adapter->pdev->dev,
2479                                 "Mac address add failed for VF %d\n", vf);
2480                 else
2481                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2482
2483                 mac[5] += 1;
2484         }
2485         return status;
2486 }
2487
2488 static void be_vf_clear(struct be_adapter *adapter)
2489 {
2490         u32 vf;
2491
2492         for (vf = 0; vf < num_vfs; vf++) {
2493                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2494                         be_cmd_pmac_del(adapter,
2495                                         adapter->vf_cfg[vf].vf_if_handle,
2496                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2497         }
2498
2499         for (vf = 0; vf < num_vfs; vf++)
2500                 if (adapter->vf_cfg[vf].vf_if_handle)
2501                         be_cmd_if_destroy(adapter,
2502                                 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2503 }
2504
2505 static int be_clear(struct be_adapter *adapter)
2506 {
2507         if (be_physfn(adapter) && adapter->sriov_enabled)
2508                 be_vf_clear(adapter);
2509
2510         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2511
2512         be_mcc_queues_destroy(adapter);
2513         be_rx_queues_destroy(adapter);
2514         be_tx_queues_destroy(adapter);
2515         adapter->eq_next_idx = 0;
2516
2517         adapter->be3_native = false;
2518         adapter->promiscuous = false;
2519
2520         /* tell fw we're done with firing cmds */
2521         be_cmd_fw_clean(adapter);
2522         return 0;
2523 }
2524
2525 static int be_vf_setup(struct be_adapter *adapter)
2526 {
2527         u32 cap_flags, en_flags, vf;
2528         u16 lnk_speed;
2529         int status;
2530
2531         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2532         for (vf = 0; vf < num_vfs; vf++) {
2533                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2534                                         &adapter->vf_cfg[vf].vf_if_handle,
2535                                         NULL, vf+1);
2536                 if (status)
2537                         goto err;
2538                 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2539         }
2540
2541         if (!lancer_chip(adapter)) {
2542                 status = be_vf_eth_addr_config(adapter);
2543                 if (status)
2544                         goto err;
2545         }
2546
2547         for (vf = 0; vf < num_vfs; vf++) {
2548                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2549                                 vf + 1);
2550                 if (status)
2551                         goto err;
2552                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2553         }
2554         return 0;
2555 err:
2556         return status;
2557 }
2558
2559 static int be_setup(struct be_adapter *adapter)
2560 {
2561         struct net_device *netdev = adapter->netdev;
2562         u32 cap_flags, en_flags;
2563         u32 tx_fc, rx_fc;
2564         int status;
2565         u8 mac[ETH_ALEN];
2566
2567         /* Allow all priorities by default. A GRP5 evt may modify this */
2568         adapter->vlan_prio_bmap = 0xff;
2569         adapter->link_speed = -1;
2570
2571         be_cmd_req_native_mode(adapter);
2572
2573         status = be_tx_queues_create(adapter);
2574         if (status != 0)
2575                 goto err;
2576
2577         status = be_rx_queues_create(adapter);
2578         if (status != 0)
2579                 goto err;
2580
2581         status = be_mcc_queues_create(adapter);
2582         if (status != 0)
2583                 goto err;
2584
2585         memset(mac, 0, ETH_ALEN);
2586         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2587                         true /*permanent */, 0);
2588         if (status)
2589                 return status;
2590         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2591         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2592
2593         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2594                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2595         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2596                         BE_IF_FLAGS_PROMISCUOUS;
2597         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2598                 cap_flags |= BE_IF_FLAGS_RSS;
2599                 en_flags |= BE_IF_FLAGS_RSS;
2600         }
2601         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2602                         netdev->dev_addr, &adapter->if_handle,
2603                         &adapter->pmac_id, 0);
2604         if (status != 0)
2605                 goto err;
2606
2607         /* For BEx, the VF's permanent mac queried from card is incorrect.
2608          * Query the mac configued by the PF using if_handle
2609          */
2610         if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2611                 status = be_cmd_mac_addr_query(adapter, mac,
2612                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2613                 if (!status) {
2614                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2615                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2616                 }
2617         }
2618
2619         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2620
2621         status = be_vid_config(adapter, false, 0);
2622         if (status)
2623                 goto err;
2624
2625         be_set_rx_mode(adapter->netdev);
2626
2627         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2628         if (status)
2629                 goto err;
2630         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2631                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2632                                         adapter->rx_fc);
2633                 if (status)
2634                         goto err;
2635         }
2636
2637         pcie_set_readrq(adapter->pdev, 4096);
2638
2639         if (be_physfn(adapter) && adapter->sriov_enabled) {
2640                 status = be_vf_setup(adapter);
2641                 if (status)
2642                         goto err;
2643         }
2644
2645         return 0;
2646 err:
2647         be_clear(adapter);
2648         return status;
2649 }
2650
2651 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2652 static bool be_flash_redboot(struct be_adapter *adapter,
2653                         const u8 *p, u32 img_start, int image_size,
2654                         int hdr_size)
2655 {
2656         u32 crc_offset;
2657         u8 flashed_crc[4];
2658         int status;
2659
2660         crc_offset = hdr_size + img_start + image_size - 4;
2661
2662         p += crc_offset;
2663
2664         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2665                         (image_size - 4));
2666         if (status) {
2667                 dev_err(&adapter->pdev->dev,
2668                 "could not get crc from flash, not flashing redboot\n");
2669                 return false;
2670         }
2671
2672         /*update redboot only if crc does not match*/
2673         if (!memcmp(flashed_crc, p, 4))
2674                 return false;
2675         else
2676                 return true;
2677 }
2678
2679 static bool phy_flashing_required(struct be_adapter *adapter)
2680 {
2681         int status = 0;
2682         struct be_phy_info phy_info;
2683
2684         status = be_cmd_get_phy_info(adapter, &phy_info);
2685         if (status)
2686                 return false;
2687         if ((phy_info.phy_type == TN_8022) &&
2688                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2689                 return true;
2690         }
2691         return false;
2692 }
2693
2694 static int be_flash_data(struct be_adapter *adapter,
2695                         const struct firmware *fw,
2696                         struct be_dma_mem *flash_cmd, int num_of_images)
2697
2698 {
2699         int status = 0, i, filehdr_size = 0;
2700         u32 total_bytes = 0, flash_op;
2701         int num_bytes;
2702         const u8 *p = fw->data;
2703         struct be_cmd_write_flashrom *req = flash_cmd->va;
2704         const struct flash_comp *pflashcomp;
2705         int num_comp;
2706
2707         static const struct flash_comp gen3_flash_types[10] = {
2708                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2709                         FLASH_IMAGE_MAX_SIZE_g3},
2710                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2711                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2712                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2713                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2714                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2715                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2716                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2717                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2718                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2719                         FLASH_IMAGE_MAX_SIZE_g3},
2720                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2721                         FLASH_IMAGE_MAX_SIZE_g3},
2722                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2723                         FLASH_IMAGE_MAX_SIZE_g3},
2724                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2725                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2726                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2727                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2728         };
2729         static const struct flash_comp gen2_flash_types[8] = {
2730                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2731                         FLASH_IMAGE_MAX_SIZE_g2},
2732                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2733                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2734                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2735                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2736                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2737                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2738                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2739                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2740                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2741                         FLASH_IMAGE_MAX_SIZE_g2},
2742                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2743                         FLASH_IMAGE_MAX_SIZE_g2},
2744                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2745                          FLASH_IMAGE_MAX_SIZE_g2}
2746         };
2747
2748         if (adapter->generation == BE_GEN3) {
2749                 pflashcomp = gen3_flash_types;
2750                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2751                 num_comp = ARRAY_SIZE(gen3_flash_types);
2752         } else {
2753                 pflashcomp = gen2_flash_types;
2754                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2755                 num_comp = ARRAY_SIZE(gen2_flash_types);
2756         }
2757         for (i = 0; i < num_comp; i++) {
2758                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2759                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2760                         continue;
2761                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2762                         if (!phy_flashing_required(adapter))
2763                                 continue;
2764                 }
2765                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2766                         (!be_flash_redboot(adapter, fw->data,
2767                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2768                         (num_of_images * sizeof(struct image_hdr)))))
2769                         continue;
2770                 p = fw->data;
2771                 p += filehdr_size + pflashcomp[i].offset
2772                         + (num_of_images * sizeof(struct image_hdr));
2773                 if (p + pflashcomp[i].size > fw->data + fw->size)
2774                         return -1;
2775                 total_bytes = pflashcomp[i].size;
2776                 while (total_bytes) {
2777                         if (total_bytes > 32*1024)
2778                                 num_bytes = 32*1024;
2779                         else
2780                                 num_bytes = total_bytes;
2781                         total_bytes -= num_bytes;
2782                         if (!total_bytes) {
2783                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2784                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2785                                 else
2786                                         flash_op = FLASHROM_OPER_FLASH;
2787                         } else {
2788                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2789                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2790                                 else
2791                                         flash_op = FLASHROM_OPER_SAVE;
2792                         }
2793                         memcpy(req->params.data_buf, p, num_bytes);
2794                         p += num_bytes;
2795                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2796                                 pflashcomp[i].optype, flash_op, num_bytes);
2797                         if (status) {
2798                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2799                                         (pflashcomp[i].optype ==
2800                                                 IMG_TYPE_PHY_FW))
2801                                         break;
2802                                 dev_err(&adapter->pdev->dev,
2803                                         "cmd to write to flash rom failed.\n");
2804                                 return -1;
2805                         }
2806                 }
2807         }
2808         return 0;
2809 }
2810
2811 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2812 {
2813         if (fhdr == NULL)
2814                 return 0;
2815         if (fhdr->build[0] == '3')
2816                 return BE_GEN3;
2817         else if (fhdr->build[0] == '2')
2818                 return BE_GEN2;
2819         else
2820                 return 0;
2821 }
2822
2823 static int lancer_fw_download(struct be_adapter *adapter,
2824                                 const struct firmware *fw)
2825 {
2826 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2827 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2828         struct be_dma_mem flash_cmd;
2829         const u8 *data_ptr = NULL;
2830         u8 *dest_image_ptr = NULL;
2831         size_t image_size = 0;
2832         u32 chunk_size = 0;
2833         u32 data_written = 0;
2834         u32 offset = 0;
2835         int status = 0;
2836         u8 add_status = 0;
2837
2838         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2839                 dev_err(&adapter->pdev->dev,
2840                         "FW Image not properly aligned. "
2841                         "Length must be 4 byte aligned.\n");
2842                 status = -EINVAL;
2843                 goto lancer_fw_exit;
2844         }
2845
2846         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2847                                 + LANCER_FW_DOWNLOAD_CHUNK;
2848         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2849                                                 &flash_cmd.dma, GFP_KERNEL);
2850         if (!flash_cmd.va) {
2851                 status = -ENOMEM;
2852                 dev_err(&adapter->pdev->dev,
2853                         "Memory allocation failure while flashing\n");
2854                 goto lancer_fw_exit;
2855         }
2856
2857         dest_image_ptr = flash_cmd.va +
2858                                 sizeof(struct lancer_cmd_req_write_object);
2859         image_size = fw->size;
2860         data_ptr = fw->data;
2861
2862         while (image_size) {
2863                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2864
2865                 /* Copy the image chunk content. */
2866                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2867
2868                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2869                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2870                                 &data_written, &add_status);
2871
2872                 if (status)
2873                         break;
2874
2875                 offset += data_written;
2876                 data_ptr += data_written;
2877                 image_size -= data_written;
2878         }
2879
2880         if (!status) {
2881                 /* Commit the FW written */
2882                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2883                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2884                                         &data_written, &add_status);
2885         }
2886
2887         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2888                                 flash_cmd.dma);
2889         if (status) {
2890                 dev_err(&adapter->pdev->dev,
2891                         "Firmware load error. "
2892                         "Status code: 0x%x Additional Status: 0x%x\n",
2893                         status, add_status);
2894                 goto lancer_fw_exit;
2895         }
2896
2897         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2898 lancer_fw_exit:
2899         return status;
2900 }
2901
2902 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2903 {
2904         struct flash_file_hdr_g2 *fhdr;
2905         struct flash_file_hdr_g3 *fhdr3;
2906         struct image_hdr *img_hdr_ptr = NULL;
2907         struct be_dma_mem flash_cmd;
2908         const u8 *p;
2909         int status = 0, i = 0, num_imgs = 0;
2910
2911         p = fw->data;
2912         fhdr = (struct flash_file_hdr_g2 *) p;
2913
2914         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2915         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2916                                           &flash_cmd.dma, GFP_KERNEL);
2917         if (!flash_cmd.va) {
2918                 status = -ENOMEM;
2919                 dev_err(&adapter->pdev->dev,
2920                         "Memory allocation failure while flashing\n");
2921                 goto be_fw_exit;
2922         }
2923
2924         if ((adapter->generation == BE_GEN3) &&
2925                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2926                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2927                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2928                 for (i = 0; i < num_imgs; i++) {
2929                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2930                                         (sizeof(struct flash_file_hdr_g3) +
2931                                          i * sizeof(struct image_hdr)));
2932                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2933                                 status = be_flash_data(adapter, fw, &flash_cmd,
2934                                                         num_imgs);
2935                 }
2936         } else if ((adapter->generation == BE_GEN2) &&
2937                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2938                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2939         } else {
2940                 dev_err(&adapter->pdev->dev,
2941                         "UFI and Interface are not compatible for flashing\n");
2942                 status = -1;
2943         }
2944
2945         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2946                           flash_cmd.dma);
2947         if (status) {
2948                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2949                 goto be_fw_exit;
2950         }
2951
2952         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2953
2954 be_fw_exit:
2955         return status;
2956 }
2957
2958 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2959 {
2960         const struct firmware *fw;
2961         int status;
2962
2963         if (!netif_running(adapter->netdev)) {
2964                 dev_err(&adapter->pdev->dev,
2965                         "Firmware load not allowed (interface is down)\n");
2966                 return -1;
2967         }
2968
2969         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2970         if (status)
2971                 goto fw_exit;
2972
2973         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2974
2975         if (lancer_chip(adapter))
2976                 status = lancer_fw_download(adapter, fw);
2977         else
2978                 status = be_fw_download(adapter, fw);
2979
2980 fw_exit:
2981         release_firmware(fw);
2982         return status;
2983 }
2984
2985 static struct net_device_ops be_netdev_ops = {
2986         .ndo_open               = be_open,
2987         .ndo_stop               = be_close,
2988         .ndo_start_xmit         = be_xmit,
2989         .ndo_set_rx_mode        = be_set_rx_mode,
2990         .ndo_set_mac_address    = be_mac_addr_set,
2991         .ndo_change_mtu         = be_change_mtu,
2992         .ndo_get_stats64        = be_get_stats64,
2993         .ndo_validate_addr      = eth_validate_addr,
2994         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2995         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2996         .ndo_set_vf_mac         = be_set_vf_mac,
2997         .ndo_set_vf_vlan        = be_set_vf_vlan,
2998         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2999         .ndo_get_vf_config      = be_get_vf_config
3000 };
3001
3002 static void be_netdev_init(struct net_device *netdev)
3003 {
3004         struct be_adapter *adapter = netdev_priv(netdev);
3005         struct be_rx_obj *rxo;
3006         int i;
3007
3008         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3009                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3010                 NETIF_F_HW_VLAN_TX;
3011         if (be_multi_rxq(adapter))
3012                 netdev->hw_features |= NETIF_F_RXHASH;
3013
3014         netdev->features |= netdev->hw_features |
3015                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3016
3017         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3018                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3019
3020         netdev->flags |= IFF_MULTICAST;
3021
3022         netif_set_gso_max_size(netdev, 65535);
3023
3024         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3025
3026         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3027
3028         for_all_rx_queues(adapter, rxo, i)
3029                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3030                                 BE_NAPI_WEIGHT);
3031
3032         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3033                 BE_NAPI_WEIGHT);
3034 }
3035
3036 static void be_unmap_pci_bars(struct be_adapter *adapter)
3037 {
3038         if (adapter->csr)
3039                 iounmap(adapter->csr);
3040         if (adapter->db)
3041                 iounmap(adapter->db);
3042 }
3043
3044 static int be_map_pci_bars(struct be_adapter *adapter)
3045 {
3046         u8 __iomem *addr;
3047         int db_reg;
3048
3049         if (lancer_chip(adapter)) {
3050                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3051                         pci_resource_len(adapter->pdev, 0));
3052                 if (addr == NULL)
3053                         return -ENOMEM;
3054                 adapter->db = addr;
3055                 return 0;
3056         }
3057
3058         if (be_physfn(adapter)) {
3059                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3060                                 pci_resource_len(adapter->pdev, 2));
3061                 if (addr == NULL)
3062                         return -ENOMEM;
3063                 adapter->csr = addr;
3064         }
3065
3066         if (adapter->generation == BE_GEN2) {
3067                 db_reg = 4;
3068         } else {
3069                 if (be_physfn(adapter))
3070                         db_reg = 4;
3071                 else
3072                         db_reg = 0;
3073         }
3074         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3075                                 pci_resource_len(adapter->pdev, db_reg));
3076         if (addr == NULL)
3077                 goto pci_map_err;
3078         adapter->db = addr;
3079
3080         return 0;
3081 pci_map_err:
3082         be_unmap_pci_bars(adapter);
3083         return -ENOMEM;
3084 }
3085
3086
3087 static void be_ctrl_cleanup(struct be_adapter *adapter)
3088 {
3089         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3090
3091         be_unmap_pci_bars(adapter);
3092
3093         if (mem->va)
3094                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3095                                   mem->dma);
3096
3097         mem = &adapter->rx_filter;
3098         if (mem->va)
3099                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3100                                   mem->dma);
3101 }
3102
3103 static int be_ctrl_init(struct be_adapter *adapter)
3104 {
3105         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3106         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3107         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3108         int status;
3109
3110         status = be_map_pci_bars(adapter);
3111         if (status)
3112                 goto done;
3113
3114         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3115         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3116                                                 mbox_mem_alloc->size,
3117                                                 &mbox_mem_alloc->dma,
3118                                                 GFP_KERNEL);
3119         if (!mbox_mem_alloc->va) {
3120                 status = -ENOMEM;
3121                 goto unmap_pci_bars;
3122         }
3123         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3124         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3125         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3126         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3127
3128         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3129         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3130                                         &rx_filter->dma, GFP_KERNEL);
3131         if (rx_filter->va == NULL) {
3132                 status = -ENOMEM;
3133                 goto free_mbox;
3134         }
3135         memset(rx_filter->va, 0, rx_filter->size);
3136
3137         mutex_init(&adapter->mbox_lock);
3138         spin_lock_init(&adapter->mcc_lock);
3139         spin_lock_init(&adapter->mcc_cq_lock);
3140
3141         init_completion(&adapter->flash_compl);
3142         pci_save_state(adapter->pdev);
3143         return 0;
3144
3145 free_mbox:
3146         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3147                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3148
3149 unmap_pci_bars:
3150         be_unmap_pci_bars(adapter);
3151
3152 done:
3153         return status;
3154 }
3155
3156 static void be_stats_cleanup(struct be_adapter *adapter)
3157 {
3158         struct be_dma_mem *cmd = &adapter->stats_cmd;
3159
3160         if (cmd->va)
3161                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3162                                   cmd->va, cmd->dma);
3163 }
3164
3165 static int be_stats_init(struct be_adapter *adapter)
3166 {
3167         struct be_dma_mem *cmd = &adapter->stats_cmd;
3168
3169         if (adapter->generation == BE_GEN2) {
3170                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3171         } else {
3172                 if (lancer_chip(adapter))
3173                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3174                 else
3175                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3176         }
3177         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3178                                      GFP_KERNEL);
3179         if (cmd->va == NULL)
3180                 return -1;
3181         memset(cmd->va, 0, cmd->size);
3182         return 0;
3183 }
3184
3185 static void __devexit be_remove(struct pci_dev *pdev)
3186 {
3187         struct be_adapter *adapter = pci_get_drvdata(pdev);
3188
3189         if (!adapter)
3190                 return;
3191
3192         cancel_delayed_work_sync(&adapter->work);
3193
3194         unregister_netdev(adapter->netdev);
3195
3196         be_clear(adapter);
3197
3198         be_stats_cleanup(adapter);
3199
3200         be_ctrl_cleanup(adapter);
3201
3202         be_sriov_disable(adapter);
3203
3204         be_msix_disable(adapter);
3205
3206         pci_set_drvdata(pdev, NULL);
3207         pci_release_regions(pdev);
3208         pci_disable_device(pdev);
3209
3210         free_netdev(adapter->netdev);
3211 }
3212
3213 static int be_get_config(struct be_adapter *adapter)
3214 {
3215         int status;
3216
3217         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3218                         &adapter->function_mode, &adapter->function_caps);
3219         if (status)
3220                 return status;
3221
3222         if (adapter->function_mode & FLEX10_MODE)
3223                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3224         else
3225                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3226
3227         status = be_cmd_get_cntl_attributes(adapter);
3228         if (status)
3229                 return status;
3230
3231         return 0;
3232 }
3233
3234 static int be_dev_family_check(struct be_adapter *adapter)
3235 {
3236         struct pci_dev *pdev = adapter->pdev;
3237         u32 sli_intf = 0, if_type;
3238
3239         switch (pdev->device) {
3240         case BE_DEVICE_ID1:
3241         case OC_DEVICE_ID1:
3242                 adapter->generation = BE_GEN2;
3243                 break;
3244         case BE_DEVICE_ID2:
3245         case OC_DEVICE_ID2:
3246                 adapter->generation = BE_GEN3;
3247                 break;
3248         case OC_DEVICE_ID3:
3249         case OC_DEVICE_ID4:
3250                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3251                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3252                                                 SLI_INTF_IF_TYPE_SHIFT;
3253
3254                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3255                         if_type != 0x02) {
3256                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3257                         return -EINVAL;
3258                 }
3259                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3260                                          SLI_INTF_FAMILY_SHIFT);
3261                 adapter->generation = BE_GEN3;
3262                 break;
3263         default:
3264                 adapter->generation = 0;
3265         }
3266         return 0;
3267 }
3268
3269 static int lancer_wait_ready(struct be_adapter *adapter)
3270 {
3271 #define SLIPORT_READY_TIMEOUT 500
3272         u32 sliport_status;
3273         int status = 0, i;
3274
3275         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3276                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3277                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3278                         break;
3279
3280                 msleep(20);
3281         }
3282
3283         if (i == SLIPORT_READY_TIMEOUT)
3284                 status = -1;
3285
3286         return status;
3287 }
3288
3289 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3290 {
3291         int status;
3292         u32 sliport_status, err, reset_needed;
3293         status = lancer_wait_ready(adapter);
3294         if (!status) {
3295                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3296                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3297                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3298                 if (err && reset_needed) {
3299                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3300                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3301
3302                         /* check adapter has corrected the error */
3303                         status = lancer_wait_ready(adapter);
3304                         sliport_status = ioread32(adapter->db +
3305                                                         SLIPORT_STATUS_OFFSET);
3306                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3307                                                 SLIPORT_STATUS_RN_MASK);
3308                         if (status || sliport_status)
3309                                 status = -1;
3310                 } else if (err || reset_needed) {
3311                         status = -1;
3312                 }
3313         }
3314         return status;
3315 }
3316
3317 static int __devinit be_probe(struct pci_dev *pdev,
3318                         const struct pci_device_id *pdev_id)
3319 {
3320         int status = 0;
3321         struct be_adapter *adapter;
3322         struct net_device *netdev;
3323
3324         status = pci_enable_device(pdev);
3325         if (status)
3326                 goto do_none;
3327
3328         status = pci_request_regions(pdev, DRV_NAME);
3329         if (status)
3330                 goto disable_dev;
3331         pci_set_master(pdev);
3332
3333         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3334         if (netdev == NULL) {
3335                 status = -ENOMEM;
3336                 goto rel_reg;
3337         }
3338         adapter = netdev_priv(netdev);
3339         adapter->pdev = pdev;
3340         pci_set_drvdata(pdev, adapter);
3341
3342         status = be_dev_family_check(adapter);
3343         if (status)
3344                 goto free_netdev;
3345
3346         adapter->netdev = netdev;
3347         SET_NETDEV_DEV(netdev, &pdev->dev);
3348
3349         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3350         if (!status) {
3351                 netdev->features |= NETIF_F_HIGHDMA;
3352         } else {
3353                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3354                 if (status) {
3355                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3356                         goto free_netdev;
3357                 }
3358         }
3359
3360         status = be_sriov_enable(adapter);
3361         if (status)
3362                 goto free_netdev;
3363
3364         status = be_ctrl_init(adapter);
3365         if (status)
3366                 goto disable_sriov;
3367
3368         if (lancer_chip(adapter)) {
3369                 status = lancer_test_and_set_rdy_state(adapter);
3370                 if (status) {
3371                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3372                         goto ctrl_clean;
3373                 }
3374         }
3375
3376         /* sync up with fw's ready state */
3377         if (be_physfn(adapter)) {
3378                 status = be_cmd_POST(adapter);
3379                 if (status)
3380                         goto ctrl_clean;
3381         }
3382
3383         /* tell fw we're ready to fire cmds */
3384         status = be_cmd_fw_init(adapter);
3385         if (status)
3386                 goto ctrl_clean;
3387
3388         status = be_cmd_reset_function(adapter);
3389         if (status)
3390                 goto ctrl_clean;
3391
3392         status = be_stats_init(adapter);
3393         if (status)
3394                 goto ctrl_clean;
3395
3396         status = be_get_config(adapter);
3397         if (status)
3398                 goto stats_clean;
3399
3400         /* The INTR bit may be set in the card when probed by a kdump kernel
3401          * after a crash.
3402          */
3403         if (!lancer_chip(adapter))
3404                 be_intr_set(adapter, false);
3405
3406         be_msix_enable(adapter);
3407
3408         INIT_DELAYED_WORK(&adapter->work, be_worker);
3409         adapter->rx_fc = adapter->tx_fc = true;
3410
3411         status = be_setup(adapter);
3412         if (status)
3413                 goto msix_disable;
3414
3415         be_netdev_init(netdev);
3416         status = register_netdev(netdev);
3417         if (status != 0)
3418                 goto unsetup;
3419
3420         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3421
3422         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3423         return 0;
3424
3425 unsetup:
3426         be_clear(adapter);
3427 msix_disable:
3428         be_msix_disable(adapter);
3429 stats_clean:
3430         be_stats_cleanup(adapter);
3431 ctrl_clean:
3432         be_ctrl_cleanup(adapter);
3433 disable_sriov:
3434         be_sriov_disable(adapter);
3435 free_netdev:
3436         free_netdev(netdev);
3437         pci_set_drvdata(pdev, NULL);
3438 rel_reg:
3439         pci_release_regions(pdev);
3440 disable_dev:
3441         pci_disable_device(pdev);
3442 do_none:
3443         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3444         return status;
3445 }
3446
3447 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3448 {
3449         struct be_adapter *adapter = pci_get_drvdata(pdev);
3450         struct net_device *netdev =  adapter->netdev;
3451
3452         cancel_delayed_work_sync(&adapter->work);
3453         if (adapter->wol)
3454                 be_setup_wol(adapter, true);
3455
3456         netif_device_detach(netdev);
3457         if (netif_running(netdev)) {
3458                 rtnl_lock();
3459                 be_close(netdev);
3460                 rtnl_unlock();
3461         }
3462         be_clear(adapter);
3463
3464         be_msix_disable(adapter);
3465         pci_save_state(pdev);
3466         pci_disable_device(pdev);
3467         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3468         return 0;
3469 }
3470
3471 static int be_resume(struct pci_dev *pdev)
3472 {
3473         int status = 0;
3474         struct be_adapter *adapter = pci_get_drvdata(pdev);
3475         struct net_device *netdev =  adapter->netdev;
3476
3477         netif_device_detach(netdev);
3478
3479         status = pci_enable_device(pdev);
3480         if (status)
3481                 return status;
3482
3483         pci_set_power_state(pdev, 0);
3484         pci_restore_state(pdev);
3485
3486         be_msix_enable(adapter);
3487         /* tell fw we're ready to fire cmds */
3488         status = be_cmd_fw_init(adapter);
3489         if (status)
3490                 return status;
3491
3492         be_setup(adapter);
3493         if (netif_running(netdev)) {
3494                 rtnl_lock();
3495                 be_open(netdev);
3496                 rtnl_unlock();
3497         }
3498         netif_device_attach(netdev);
3499
3500         if (adapter->wol)
3501                 be_setup_wol(adapter, false);
3502
3503         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3504         return 0;
3505 }
3506
3507 /*
3508  * An FLR will stop BE from DMAing any data.
3509  */
3510 static void be_shutdown(struct pci_dev *pdev)
3511 {
3512         struct be_adapter *adapter = pci_get_drvdata(pdev);
3513
3514         if (!adapter)
3515                 return;
3516
3517         cancel_delayed_work_sync(&adapter->work);
3518
3519         netif_device_detach(adapter->netdev);
3520
3521         if (adapter->wol)
3522                 be_setup_wol(adapter, true);
3523
3524         be_cmd_reset_function(adapter);
3525
3526         pci_disable_device(pdev);
3527 }
3528
3529 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3530                                 pci_channel_state_t state)
3531 {
3532         struct be_adapter *adapter = pci_get_drvdata(pdev);
3533         struct net_device *netdev =  adapter->netdev;
3534
3535         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3536
3537         adapter->eeh_err = true;
3538
3539         netif_device_detach(netdev);
3540
3541         if (netif_running(netdev)) {
3542                 rtnl_lock();
3543                 be_close(netdev);
3544                 rtnl_unlock();
3545         }
3546         be_clear(adapter);
3547
3548         if (state == pci_channel_io_perm_failure)
3549                 return PCI_ERS_RESULT_DISCONNECT;
3550
3551         pci_disable_device(pdev);
3552
3553         return PCI_ERS_RESULT_NEED_RESET;
3554 }
3555
3556 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3557 {
3558         struct be_adapter *adapter = pci_get_drvdata(pdev);
3559         int status;
3560
3561         dev_info(&adapter->pdev->dev, "EEH reset\n");
3562         adapter->eeh_err = false;
3563
3564         status = pci_enable_device(pdev);
3565         if (status)
3566                 return PCI_ERS_RESULT_DISCONNECT;
3567
3568         pci_set_master(pdev);
3569         pci_set_power_state(pdev, 0);
3570         pci_restore_state(pdev);
3571
3572         /* Check if card is ok and fw is ready */
3573         status = be_cmd_POST(adapter);
3574         if (status)
3575                 return PCI_ERS_RESULT_DISCONNECT;
3576
3577         return PCI_ERS_RESULT_RECOVERED;
3578 }
3579
3580 static void be_eeh_resume(struct pci_dev *pdev)
3581 {
3582         int status = 0;
3583         struct be_adapter *adapter = pci_get_drvdata(pdev);
3584         struct net_device *netdev =  adapter->netdev;
3585
3586         dev_info(&adapter->pdev->dev, "EEH resume\n");
3587
3588         pci_save_state(pdev);
3589
3590         /* tell fw we're ready to fire cmds */
3591         status = be_cmd_fw_init(adapter);
3592         if (status)
3593                 goto err;
3594
3595         status = be_setup(adapter);
3596         if (status)
3597                 goto err;
3598
3599         if (netif_running(netdev)) {
3600                 status = be_open(netdev);
3601                 if (status)
3602                         goto err;
3603         }
3604         netif_device_attach(netdev);
3605         return;
3606 err:
3607         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3608 }
3609
3610 static struct pci_error_handlers be_eeh_handlers = {
3611         .error_detected = be_eeh_err_detected,
3612         .slot_reset = be_eeh_reset,
3613         .resume = be_eeh_resume,
3614 };
3615
3616 static struct pci_driver be_driver = {
3617         .name = DRV_NAME,
3618         .id_table = be_dev_ids,
3619         .probe = be_probe,
3620         .remove = be_remove,
3621         .suspend = be_suspend,
3622         .resume = be_resume,
3623         .shutdown = be_shutdown,
3624         .err_handler = &be_eeh_handlers
3625 };
3626
3627 static int __init be_init_module(void)
3628 {
3629         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3630             rx_frag_size != 2048) {
3631                 printk(KERN_WARNING DRV_NAME
3632                         " : Module param rx_frag_size must be 2048/4096/8192."
3633                         " Using 2048\n");
3634                 rx_frag_size = 2048;
3635         }
3636
3637         return pci_register_driver(&be_driver);
3638 }
3639 module_init(be_init_module);
3640
3641 static void __exit be_exit_module(void)
3642 {
3643         pci_unregister_driver(&be_driver);
3644 }
3645 module_exit(be_exit_module);