30606f5ec0d09696f2680b76ac53d26724a8298d
[pandora-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 /* Is BE in a multi-channel mode */
118 static inline bool be_is_mc(struct be_adapter *adapter) {
119         return (adapter->function_mode & FLEX10_MODE ||
120                 adapter->function_mode & VNIC_MODE ||
121                 adapter->function_mode & UMC_ENABLED);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u32 reg, enabled;
152
153         if (adapter->eeh_err)
154                 return;
155
156         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157                                 &reg);
158         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
160         if (!enabled && enable)
161                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else if (enabled && !enable)
163                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164         else
165                 return;
166
167         pci_write_config_dword(adapter->pdev,
168                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169 }
170
171 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172 {
173         u32 val = 0;
174         val |= qid & DB_RQ_RING_ID_MASK;
175         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
176
177         wmb();
178         iowrite32(val, adapter->db + DB_RQ_OFFSET);
179 }
180
181 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
182 {
183         u32 val = 0;
184         val |= qid & DB_TXULP_RING_ID_MASK;
185         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
186
187         wmb();
188         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
189 }
190
191 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
192                 bool arm, bool clear_int, u16 num_popped)
193 {
194         u32 val = 0;
195         val |= qid & DB_EQ_RING_ID_MASK;
196         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
197                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
198
199         if (adapter->eeh_err)
200                 return;
201
202         if (arm)
203                 val |= 1 << DB_EQ_REARM_SHIFT;
204         if (clear_int)
205                 val |= 1 << DB_EQ_CLR_SHIFT;
206         val |= 1 << DB_EQ_EVNT_SHIFT;
207         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
208         iowrite32(val, adapter->db + DB_EQ_OFFSET);
209 }
210
211 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
212 {
213         u32 val = 0;
214         val |= qid & DB_CQ_RING_ID_MASK;
215         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
216                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
217
218         if (adapter->eeh_err)
219                 return;
220
221         if (arm)
222                 val |= 1 << DB_CQ_REARM_SHIFT;
223         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_CQ_OFFSET);
225 }
226
227 static int be_mac_addr_set(struct net_device *netdev, void *p)
228 {
229         struct be_adapter *adapter = netdev_priv(netdev);
230         struct sockaddr *addr = p;
231         int status = 0;
232         u8 current_mac[ETH_ALEN];
233         u32 pmac_id = adapter->pmac_id;
234
235         if (!is_valid_ether_addr(addr->sa_data))
236                 return -EADDRNOTAVAIL;
237
238         status = be_cmd_mac_addr_query(adapter, current_mac,
239                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
240         if (status)
241                 goto err;
242
243         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
244                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246                 if (status)
247                         goto err;
248
249                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
250         }
251         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
252         return 0;
253 err:
254         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
255         return status;
256 }
257
258 static void populate_be2_stats(struct be_adapter *adapter)
259 {
260         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
261         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
262         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
263         struct be_port_rxf_stats_v0 *port_stats =
264                                         &rxf_stats->port[adapter->port_num];
265         struct be_drv_stats *drvs = &adapter->drv_stats;
266
267         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
268         drvs->rx_pause_frames = port_stats->rx_pause_frames;
269         drvs->rx_crc_errors = port_stats->rx_crc_errors;
270         drvs->rx_control_frames = port_stats->rx_control_frames;
271         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
272         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
273         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
274         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
275         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
276         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
277         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
278         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
279         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
280         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
281         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
282         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
283         drvs->rx_dropped_header_too_small =
284                 port_stats->rx_dropped_header_too_small;
285         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
286         drvs->rx_alignment_symbol_errors =
287                 port_stats->rx_alignment_symbol_errors;
288
289         drvs->tx_pauseframes = port_stats->tx_pauseframes;
290         drvs->tx_controlframes = port_stats->tx_controlframes;
291
292         if (adapter->port_num)
293                 drvs->jabber_events = rxf_stats->port1_jabber_events;
294         else
295                 drvs->jabber_events = rxf_stats->port0_jabber_events;
296         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
297         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
298         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
299         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
300         drvs->forwarded_packets = rxf_stats->forwarded_packets;
301         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
302         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
303         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
304         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
305 }
306
307 static void populate_be3_stats(struct be_adapter *adapter)
308 {
309         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
310         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
311         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
312         struct be_port_rxf_stats_v1 *port_stats =
313                                         &rxf_stats->port[adapter->port_num];
314         struct be_drv_stats *drvs = &adapter->drv_stats;
315
316         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
317         drvs->rx_pause_frames = port_stats->rx_pause_frames;
318         drvs->rx_crc_errors = port_stats->rx_crc_errors;
319         drvs->rx_control_frames = port_stats->rx_control_frames;
320         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330         drvs->rx_dropped_header_too_small =
331                 port_stats->rx_dropped_header_too_small;
332         drvs->rx_input_fifo_overflow_drop =
333                 port_stats->rx_input_fifo_overflow_drop;
334         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
335         drvs->rx_alignment_symbol_errors =
336                 port_stats->rx_alignment_symbol_errors;
337         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
338         drvs->tx_pauseframes = port_stats->tx_pauseframes;
339         drvs->tx_controlframes = port_stats->tx_controlframes;
340         drvs->jabber_events = port_stats->jabber_events;
341         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345         drvs->forwarded_packets = rxf_stats->forwarded_packets;
346         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
348         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
349         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350 }
351
352 static void populate_lancer_stats(struct be_adapter *adapter)
353 {
354
355         struct be_drv_stats *drvs = &adapter->drv_stats;
356         struct lancer_pport_stats *pport_stats =
357                                         pport_stats_from_cmd(adapter);
358
359         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
360         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
361         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
362         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
363         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
364         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
365         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
366         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
367         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
368         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
369         drvs->rx_dropped_tcp_length =
370                                 pport_stats->rx_dropped_invalid_tcp_length;
371         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
372         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
373         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
374         drvs->rx_dropped_header_too_small =
375                                 pport_stats->rx_dropped_header_too_small;
376         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
377         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
378         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
379         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
380         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
381         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
382         drvs->jabber_events = pport_stats->rx_jabbers;
383         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
384         drvs->forwarded_packets = pport_stats->num_forwards_lo;
385         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
386         drvs->rx_drops_too_many_frags =
387                                 pport_stats->rx_drops_too_many_frags_lo;
388 }
389
390 static void accumulate_16bit_val(u32 *acc, u16 val)
391 {
392 #define lo(x)                   (x & 0xFFFF)
393 #define hi(x)                   (x & 0xFFFF0000)
394         bool wrapped = val < lo(*acc);
395         u32 newacc = hi(*acc) + val;
396
397         if (wrapped)
398                 newacc += 65536;
399         ACCESS_ONCE(*acc) = newacc;
400 }
401
402 void be_parse_stats(struct be_adapter *adapter)
403 {
404         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
405         struct be_rx_obj *rxo;
406         int i;
407
408         if (adapter->generation == BE_GEN3) {
409                 if (lancer_chip(adapter))
410                         populate_lancer_stats(adapter);
411                  else
412                         populate_be3_stats(adapter);
413         } else {
414                 populate_be2_stats(adapter);
415         }
416
417         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
418         for_all_rx_queues(adapter, rxo, i) {
419                 /* below erx HW counter can actually wrap around after
420                  * 65535. Driver accumulates a 32-bit value
421                  */
422                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
423                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
424         }
425 }
426
427 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
428                                         struct rtnl_link_stats64 *stats)
429 {
430         struct be_adapter *adapter = netdev_priv(netdev);
431         struct be_drv_stats *drvs = &adapter->drv_stats;
432         struct be_rx_obj *rxo;
433         struct be_tx_obj *txo;
434         u64 pkts, bytes;
435         unsigned int start;
436         int i;
437
438         for_all_rx_queues(adapter, rxo, i) {
439                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
440                 do {
441                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
442                         pkts = rx_stats(rxo)->rx_pkts;
443                         bytes = rx_stats(rxo)->rx_bytes;
444                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
445                 stats->rx_packets += pkts;
446                 stats->rx_bytes += bytes;
447                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
448                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
449                                         rx_stats(rxo)->rx_drops_no_frags;
450         }
451
452         for_all_tx_queues(adapter, txo, i) {
453                 const struct be_tx_stats *tx_stats = tx_stats(txo);
454                 do {
455                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
456                         pkts = tx_stats(txo)->tx_pkts;
457                         bytes = tx_stats(txo)->tx_bytes;
458                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
459                 stats->tx_packets += pkts;
460                 stats->tx_bytes += bytes;
461         }
462
463         /* bad pkts received */
464         stats->rx_errors = drvs->rx_crc_errors +
465                 drvs->rx_alignment_symbol_errors +
466                 drvs->rx_in_range_errors +
467                 drvs->rx_out_range_errors +
468                 drvs->rx_frame_too_long +
469                 drvs->rx_dropped_too_small +
470                 drvs->rx_dropped_too_short +
471                 drvs->rx_dropped_header_too_small +
472                 drvs->rx_dropped_tcp_length +
473                 drvs->rx_dropped_runt;
474
475         /* detailed rx errors */
476         stats->rx_length_errors = drvs->rx_in_range_errors +
477                 drvs->rx_out_range_errors +
478                 drvs->rx_frame_too_long;
479
480         stats->rx_crc_errors = drvs->rx_crc_errors;
481
482         /* frame alignment errors */
483         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
484
485         /* receiver fifo overrun */
486         /* drops_no_pbuf is no per i/f, it's per BE card */
487         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488                                 drvs->rx_input_fifo_overflow_drop +
489                                 drvs->rx_drops_no_pbuf;
490         return stats;
491 }
492
493 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
494 {
495         struct net_device *netdev = adapter->netdev;
496
497         /* when link status changes, link speed must be re-queried from card */
498         adapter->link_speed = -1;
499         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
500                 netif_carrier_on(netdev);
501                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
502         } else {
503                 netif_carrier_off(netdev);
504                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
505         }
506 }
507
508 static void be_tx_stats_update(struct be_tx_obj *txo,
509                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
510 {
511         struct be_tx_stats *stats = tx_stats(txo);
512
513         u64_stats_update_begin(&stats->sync);
514         stats->tx_reqs++;
515         stats->tx_wrbs += wrb_cnt;
516         stats->tx_bytes += copied;
517         stats->tx_pkts += (gso_segs ? gso_segs : 1);
518         if (stopped)
519                 stats->tx_stops++;
520         u64_stats_update_end(&stats->sync);
521 }
522
523 /* Determine number of WRB entries needed to xmit data in an skb */
524 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
525                                                                 bool *dummy)
526 {
527         int cnt = (skb->len > skb->data_len);
528
529         cnt += skb_shinfo(skb)->nr_frags;
530
531         /* to account for hdr wrb */
532         cnt++;
533         if (lancer_chip(adapter) || !(cnt & 1)) {
534                 *dummy = false;
535         } else {
536                 /* add a dummy to make it an even num */
537                 cnt++;
538                 *dummy = true;
539         }
540         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
541         return cnt;
542 }
543
544 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
545 {
546         wrb->frag_pa_hi = upper_32_bits(addr);
547         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
548         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
549 }
550
551 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
552                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
553 {
554         u8 vlan_prio = 0;
555         u16 vlan_tag = 0;
556
557         memset(hdr, 0, sizeof(*hdr));
558
559         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
560
561         if (skb_is_gso(skb)) {
562                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
563                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
564                         hdr, skb_shinfo(skb)->gso_size);
565                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
566                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
567                 if (lancer_chip(adapter) && adapter->sli_family  ==
568                                                         LANCER_A0_SLI_FAMILY) {
569                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
570                         if (is_tcp_pkt(skb))
571                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
572                                                                 tcpcs, hdr, 1);
573                         else if (is_udp_pkt(skb))
574                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
575                                                                 udpcs, hdr, 1);
576                 }
577         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
578                 if (is_tcp_pkt(skb))
579                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
580                 else if (is_udp_pkt(skb))
581                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
582         }
583
584         if (vlan_tx_tag_present(skb)) {
585                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
586                 vlan_tag = vlan_tx_tag_get(skb);
587                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
588                 /* If vlan priority provided by OS is NOT in available bmap */
589                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
590                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
591                                         adapter->recommended_prio;
592                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
593         }
594
595         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
596         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
597         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
598         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
599 }
600
601 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
602                 bool unmap_single)
603 {
604         dma_addr_t dma;
605
606         be_dws_le_to_cpu(wrb, sizeof(*wrb));
607
608         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
609         if (wrb->frag_len) {
610                 if (unmap_single)
611                         dma_unmap_single(dev, dma, wrb->frag_len,
612                                          DMA_TO_DEVICE);
613                 else
614                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
615         }
616 }
617
618 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
619                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
620 {
621         dma_addr_t busaddr;
622         int i, copied = 0;
623         struct device *dev = &adapter->pdev->dev;
624         struct sk_buff *first_skb = skb;
625         struct be_eth_wrb *wrb;
626         struct be_eth_hdr_wrb *hdr;
627         bool map_single = false;
628         u16 map_head;
629
630         hdr = queue_head_node(txq);
631         queue_head_inc(txq);
632         map_head = txq->head;
633
634         if (skb->len > skb->data_len) {
635                 int len = skb_headlen(skb);
636                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
637                 if (dma_mapping_error(dev, busaddr))
638                         goto dma_err;
639                 map_single = true;
640                 wrb = queue_head_node(txq);
641                 wrb_fill(wrb, busaddr, len);
642                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
643                 queue_head_inc(txq);
644                 copied += len;
645         }
646
647         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
648                 const struct skb_frag_struct *frag =
649                         &skb_shinfo(skb)->frags[i];
650                 busaddr = skb_frag_dma_map(dev, frag, 0,
651                                            skb_frag_size(frag), DMA_TO_DEVICE);
652                 if (dma_mapping_error(dev, busaddr))
653                         goto dma_err;
654                 wrb = queue_head_node(txq);
655                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
656                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
657                 queue_head_inc(txq);
658                 copied += skb_frag_size(frag);
659         }
660
661         if (dummy_wrb) {
662                 wrb = queue_head_node(txq);
663                 wrb_fill(wrb, 0, 0);
664                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665                 queue_head_inc(txq);
666         }
667
668         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
669         be_dws_cpu_to_le(hdr, sizeof(*hdr));
670
671         return copied;
672 dma_err:
673         txq->head = map_head;
674         while (copied) {
675                 wrb = queue_head_node(txq);
676                 unmap_tx_frag(dev, wrb, map_single);
677                 map_single = false;
678                 copied -= wrb->frag_len;
679                 queue_head_inc(txq);
680         }
681         return 0;
682 }
683
684 static netdev_tx_t be_xmit(struct sk_buff *skb,
685                         struct net_device *netdev)
686 {
687         struct be_adapter *adapter = netdev_priv(netdev);
688         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
689         struct be_queue_info *txq = &txo->q;
690         u32 wrb_cnt = 0, copied = 0;
691         u32 start = txq->head;
692         bool dummy_wrb, stopped = false;
693
694         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
695
696         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
697         if (copied) {
698                 /* record the sent skb in the sent_skb table */
699                 BUG_ON(txo->sent_skb_list[start]);
700                 txo->sent_skb_list[start] = skb;
701
702                 /* Ensure txq has space for the next skb; Else stop the queue
703                  * *BEFORE* ringing the tx doorbell, so that we serialze the
704                  * tx compls of the current transmit which'll wake up the queue
705                  */
706                 atomic_add(wrb_cnt, &txq->used);
707                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
708                                                                 txq->len) {
709                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
710                         stopped = true;
711                 }
712
713                 be_txq_notify(adapter, txq->id, wrb_cnt);
714
715                 be_tx_stats_update(txo, wrb_cnt, copied,
716                                 skb_shinfo(skb)->gso_segs, stopped);
717         } else {
718                 txq->head = start;
719                 dev_kfree_skb_any(skb);
720         }
721         return NETDEV_TX_OK;
722 }
723
724 static int be_change_mtu(struct net_device *netdev, int new_mtu)
725 {
726         struct be_adapter *adapter = netdev_priv(netdev);
727         if (new_mtu < BE_MIN_MTU ||
728                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
729                                         (ETH_HLEN + ETH_FCS_LEN))) {
730                 dev_info(&adapter->pdev->dev,
731                         "MTU must be between %d and %d bytes\n",
732                         BE_MIN_MTU,
733                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
734                 return -EINVAL;
735         }
736         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
737                         netdev->mtu, new_mtu);
738         netdev->mtu = new_mtu;
739         return 0;
740 }
741
742 /*
743  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
744  * If the user configures more, place BE in vlan promiscuous mode.
745  */
746 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
747 {
748         u16 vtag[BE_NUM_VLANS_SUPPORTED];
749         u16 ntags = 0, i;
750         int status = 0;
751         u32 if_handle;
752
753         if (vf) {
754                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
755                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
756                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
757         }
758
759         /* No need to further configure vids if in promiscuous mode */
760         if (adapter->promiscuous)
761                 return 0;
762
763         if (adapter->vlans_added <= adapter->max_vlans)  {
764                 /* Construct VLAN Table to give to HW */
765                 for (i = 0; i < VLAN_N_VID; i++) {
766                         if (adapter->vlan_tag[i]) {
767                                 vtag[ntags] = cpu_to_le16(i);
768                                 ntags++;
769                         }
770                 }
771                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
772                                         vtag, ntags, 1, 0);
773         } else {
774                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
775                                         NULL, 0, 1, 1);
776         }
777
778         return status;
779 }
780
781 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784
785         adapter->vlans_added++;
786         if (!be_physfn(adapter))
787                 return;
788
789         adapter->vlan_tag[vid] = 1;
790         if (adapter->vlans_added <= (adapter->max_vlans + 1))
791                 be_vid_config(adapter, false, 0);
792 }
793
794 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
795 {
796         struct be_adapter *adapter = netdev_priv(netdev);
797
798         adapter->vlans_added--;
799
800         if (!be_physfn(adapter))
801                 return;
802
803         adapter->vlan_tag[vid] = 0;
804         if (adapter->vlans_added <= adapter->max_vlans)
805                 be_vid_config(adapter, false, 0);
806 }
807
808 static void be_set_rx_mode(struct net_device *netdev)
809 {
810         struct be_adapter *adapter = netdev_priv(netdev);
811
812         if (netdev->flags & IFF_PROMISC) {
813                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
814                 adapter->promiscuous = true;
815                 goto done;
816         }
817
818         /* BE was previously in promiscuous mode; disable it */
819         if (adapter->promiscuous) {
820                 adapter->promiscuous = false;
821                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
822
823                 if (adapter->vlans_added)
824                         be_vid_config(adapter, false, 0);
825         }
826
827         /* Enable multicast promisc if num configured exceeds what we support */
828         if (netdev->flags & IFF_ALLMULTI ||
829                         netdev_mc_count(netdev) > BE_MAX_MC) {
830                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
831                 goto done;
832         }
833
834         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
835 done:
836         return;
837 }
838
839 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
840 {
841         struct be_adapter *adapter = netdev_priv(netdev);
842         int status;
843
844         if (!adapter->sriov_enabled)
845                 return -EPERM;
846
847         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
848                 return -EINVAL;
849
850         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
851                 status = be_cmd_pmac_del(adapter,
852                                         adapter->vf_cfg[vf].vf_if_handle,
853                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
854
855         status = be_cmd_pmac_add(adapter, mac,
856                                 adapter->vf_cfg[vf].vf_if_handle,
857                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
858
859         if (status)
860                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
861                                 mac, vf);
862         else
863                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
864
865         return status;
866 }
867
868 static int be_get_vf_config(struct net_device *netdev, int vf,
869                         struct ifla_vf_info *vi)
870 {
871         struct be_adapter *adapter = netdev_priv(netdev);
872
873         if (!adapter->sriov_enabled)
874                 return -EPERM;
875
876         if (vf >= num_vfs)
877                 return -EINVAL;
878
879         vi->vf = vf;
880         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
881         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
882         vi->qos = 0;
883         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
884
885         return 0;
886 }
887
888 static int be_set_vf_vlan(struct net_device *netdev,
889                         int vf, u16 vlan, u8 qos)
890 {
891         struct be_adapter *adapter = netdev_priv(netdev);
892         int status = 0;
893
894         if (!adapter->sriov_enabled)
895                 return -EPERM;
896
897         if ((vf >= num_vfs) || (vlan > 4095))
898                 return -EINVAL;
899
900         if (vlan) {
901                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
902                 adapter->vlans_added++;
903         } else {
904                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
905                 adapter->vlans_added--;
906         }
907
908         status = be_vid_config(adapter, true, vf);
909
910         if (status)
911                 dev_info(&adapter->pdev->dev,
912                                 "VLAN %d config on VF %d failed\n", vlan, vf);
913         return status;
914 }
915
916 static int be_set_vf_tx_rate(struct net_device *netdev,
917                         int vf, int rate)
918 {
919         struct be_adapter *adapter = netdev_priv(netdev);
920         int status = 0;
921
922         if (!adapter->sriov_enabled)
923                 return -EPERM;
924
925         if ((vf >= num_vfs) || (rate < 0))
926                 return -EINVAL;
927
928         if (rate > 10000)
929                 rate = 10000;
930
931         adapter->vf_cfg[vf].vf_tx_rate = rate;
932         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
933
934         if (status)
935                 dev_info(&adapter->pdev->dev,
936                                 "tx rate %d on VF %d failed\n", rate, vf);
937         return status;
938 }
939
940 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
941 {
942         struct be_eq_obj *rx_eq = &rxo->rx_eq;
943         struct be_rx_stats *stats = rx_stats(rxo);
944         ulong now = jiffies;
945         ulong delta = now - stats->rx_jiffies;
946         u64 pkts;
947         unsigned int start, eqd;
948
949         if (!rx_eq->enable_aic)
950                 return;
951
952         /* Wrapped around */
953         if (time_before(now, stats->rx_jiffies)) {
954                 stats->rx_jiffies = now;
955                 return;
956         }
957
958         /* Update once a second */
959         if (delta < HZ)
960                 return;
961
962         do {
963                 start = u64_stats_fetch_begin_bh(&stats->sync);
964                 pkts = stats->rx_pkts;
965         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
966
967         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
968         stats->rx_pkts_prev = pkts;
969         stats->rx_jiffies = now;
970         eqd = stats->rx_pps / 110000;
971         eqd = eqd << 3;
972         if (eqd > rx_eq->max_eqd)
973                 eqd = rx_eq->max_eqd;
974         if (eqd < rx_eq->min_eqd)
975                 eqd = rx_eq->min_eqd;
976         if (eqd < 10)
977                 eqd = 0;
978         if (eqd != rx_eq->cur_eqd) {
979                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
980                 rx_eq->cur_eqd = eqd;
981         }
982 }
983
984 static void be_rx_stats_update(struct be_rx_obj *rxo,
985                 struct be_rx_compl_info *rxcp)
986 {
987         struct be_rx_stats *stats = rx_stats(rxo);
988
989         u64_stats_update_begin(&stats->sync);
990         stats->rx_compl++;
991         stats->rx_bytes += rxcp->pkt_size;
992         stats->rx_pkts++;
993         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
994                 stats->rx_mcast_pkts++;
995         if (rxcp->err)
996                 stats->rx_compl_err++;
997         u64_stats_update_end(&stats->sync);
998 }
999
1000 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1001 {
1002         /* L4 checksum is not reliable for non TCP/UDP packets.
1003          * Also ignore ipcksm for ipv6 pkts */
1004         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1005                                 (rxcp->ip_csum || rxcp->ipv6);
1006 }
1007
1008 static struct be_rx_page_info *
1009 get_rx_page_info(struct be_adapter *adapter,
1010                 struct be_rx_obj *rxo,
1011                 u16 frag_idx)
1012 {
1013         struct be_rx_page_info *rx_page_info;
1014         struct be_queue_info *rxq = &rxo->q;
1015
1016         rx_page_info = &rxo->page_info_tbl[frag_idx];
1017         BUG_ON(!rx_page_info->page);
1018
1019         if (rx_page_info->last_page_user) {
1020                 dma_unmap_page(&adapter->pdev->dev,
1021                                dma_unmap_addr(rx_page_info, bus),
1022                                adapter->big_page_size, DMA_FROM_DEVICE);
1023                 rx_page_info->last_page_user = false;
1024         }
1025
1026         atomic_dec(&rxq->used);
1027         return rx_page_info;
1028 }
1029
1030 /* Throwaway the data in the Rx completion */
1031 static void be_rx_compl_discard(struct be_adapter *adapter,
1032                 struct be_rx_obj *rxo,
1033                 struct be_rx_compl_info *rxcp)
1034 {
1035         struct be_queue_info *rxq = &rxo->q;
1036         struct be_rx_page_info *page_info;
1037         u16 i, num_rcvd = rxcp->num_rcvd;
1038
1039         for (i = 0; i < num_rcvd; i++) {
1040                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1041                 put_page(page_info->page);
1042                 memset(page_info, 0, sizeof(*page_info));
1043                 index_inc(&rxcp->rxq_idx, rxq->len);
1044         }
1045 }
1046
1047 /*
1048  * skb_fill_rx_data forms a complete skb for an ether frame
1049  * indicated by rxcp.
1050  */
1051 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1052                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1053 {
1054         struct be_queue_info *rxq = &rxo->q;
1055         struct be_rx_page_info *page_info;
1056         u16 i, j;
1057         u16 hdr_len, curr_frag_len, remaining;
1058         u8 *start;
1059
1060         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1061         start = page_address(page_info->page) + page_info->page_offset;
1062         prefetch(start);
1063
1064         /* Copy data in the first descriptor of this completion */
1065         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1066
1067         /* Copy the header portion into skb_data */
1068         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1069         memcpy(skb->data, start, hdr_len);
1070         skb->len = curr_frag_len;
1071         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1072                 /* Complete packet has now been moved to data */
1073                 put_page(page_info->page);
1074                 skb->data_len = 0;
1075                 skb->tail += curr_frag_len;
1076         } else {
1077                 skb_shinfo(skb)->nr_frags = 1;
1078                 skb_frag_set_page(skb, 0, page_info->page);
1079                 skb_shinfo(skb)->frags[0].page_offset =
1080                                         page_info->page_offset + hdr_len;
1081                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1082                 skb->data_len = curr_frag_len - hdr_len;
1083                 skb->truesize += rx_frag_size;
1084                 skb->tail += hdr_len;
1085         }
1086         page_info->page = NULL;
1087
1088         if (rxcp->pkt_size <= rx_frag_size) {
1089                 BUG_ON(rxcp->num_rcvd != 1);
1090                 return;
1091         }
1092
1093         /* More frags present for this completion */
1094         index_inc(&rxcp->rxq_idx, rxq->len);
1095         remaining = rxcp->pkt_size - curr_frag_len;
1096         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1097                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1098                 curr_frag_len = min(remaining, rx_frag_size);
1099
1100                 /* Coalesce all frags from the same physical page in one slot */
1101                 if (page_info->page_offset == 0) {
1102                         /* Fresh page */
1103                         j++;
1104                         skb_frag_set_page(skb, j, page_info->page);
1105                         skb_shinfo(skb)->frags[j].page_offset =
1106                                                         page_info->page_offset;
1107                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1108                         skb_shinfo(skb)->nr_frags++;
1109                 } else {
1110                         put_page(page_info->page);
1111                 }
1112
1113                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1114                 skb->len += curr_frag_len;
1115                 skb->data_len += curr_frag_len;
1116                 skb->truesize += rx_frag_size;
1117                 remaining -= curr_frag_len;
1118                 index_inc(&rxcp->rxq_idx, rxq->len);
1119                 page_info->page = NULL;
1120         }
1121         BUG_ON(j > MAX_SKB_FRAGS);
1122 }
1123
1124 /* Process the RX completion indicated by rxcp when GRO is disabled */
1125 static void be_rx_compl_process(struct be_adapter *adapter,
1126                         struct be_rx_obj *rxo,
1127                         struct be_rx_compl_info *rxcp)
1128 {
1129         struct net_device *netdev = adapter->netdev;
1130         struct sk_buff *skb;
1131
1132         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1133         if (unlikely(!skb)) {
1134                 rx_stats(rxo)->rx_drops_no_skbs++;
1135                 be_rx_compl_discard(adapter, rxo, rxcp);
1136                 return;
1137         }
1138
1139         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1140
1141         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1142                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1143         else
1144                 skb_checksum_none_assert(skb);
1145
1146         skb->protocol = eth_type_trans(skb, netdev);
1147         if (adapter->netdev->features & NETIF_F_RXHASH)
1148                 skb->rxhash = rxcp->rss_hash;
1149
1150
1151         if (rxcp->vlanf)
1152                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1153
1154         netif_receive_skb(skb);
1155 }
1156
1157 /* Process the RX completion indicated by rxcp when GRO is enabled */
1158 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1159                 struct be_rx_obj *rxo,
1160                 struct be_rx_compl_info *rxcp)
1161 {
1162         struct be_rx_page_info *page_info;
1163         struct sk_buff *skb = NULL;
1164         struct be_queue_info *rxq = &rxo->q;
1165         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1166         u16 remaining, curr_frag_len;
1167         u16 i, j;
1168
1169         skb = napi_get_frags(&eq_obj->napi);
1170         if (!skb) {
1171                 be_rx_compl_discard(adapter, rxo, rxcp);
1172                 return;
1173         }
1174
1175         remaining = rxcp->pkt_size;
1176         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1177                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1178
1179                 curr_frag_len = min(remaining, rx_frag_size);
1180
1181                 /* Coalesce all frags from the same physical page in one slot */
1182                 if (i == 0 || page_info->page_offset == 0) {
1183                         /* First frag or Fresh page */
1184                         j++;
1185                         skb_frag_set_page(skb, j, page_info->page);
1186                         skb_shinfo(skb)->frags[j].page_offset =
1187                                                         page_info->page_offset;
1188                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1189                 } else {
1190                         put_page(page_info->page);
1191                 }
1192                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1193                 skb->truesize += rx_frag_size;
1194                 remaining -= curr_frag_len;
1195                 index_inc(&rxcp->rxq_idx, rxq->len);
1196                 memset(page_info, 0, sizeof(*page_info));
1197         }
1198         BUG_ON(j > MAX_SKB_FRAGS);
1199
1200         skb_shinfo(skb)->nr_frags = j + 1;
1201         skb->len = rxcp->pkt_size;
1202         skb->data_len = rxcp->pkt_size;
1203         skb->ip_summed = CHECKSUM_UNNECESSARY;
1204         if (adapter->netdev->features & NETIF_F_RXHASH)
1205                 skb->rxhash = rxcp->rss_hash;
1206
1207         if (rxcp->vlanf)
1208                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1209
1210         napi_gro_frags(&eq_obj->napi);
1211 }
1212
1213 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1214                                 struct be_eth_rx_compl *compl,
1215                                 struct be_rx_compl_info *rxcp)
1216 {
1217         rxcp->pkt_size =
1218                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1219         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1220         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1221         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1222         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1223         rxcp->ip_csum =
1224                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1225         rxcp->l4_csum =
1226                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1227         rxcp->ipv6 =
1228                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1229         rxcp->rxq_idx =
1230                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1231         rxcp->num_rcvd =
1232                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1233         rxcp->pkt_type =
1234                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1235         rxcp->rss_hash =
1236                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1237         if (rxcp->vlanf) {
1238                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1239                                           compl);
1240                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1241                                                compl);
1242         }
1243         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1244 }
1245
1246 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1247                                 struct be_eth_rx_compl *compl,
1248                                 struct be_rx_compl_info *rxcp)
1249 {
1250         rxcp->pkt_size =
1251                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1252         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1253         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1254         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1255         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1256         rxcp->ip_csum =
1257                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1258         rxcp->l4_csum =
1259                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1260         rxcp->ipv6 =
1261                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1262         rxcp->rxq_idx =
1263                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1264         rxcp->num_rcvd =
1265                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1266         rxcp->pkt_type =
1267                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1268         rxcp->rss_hash =
1269                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1270         if (rxcp->vlanf) {
1271                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1272                                           compl);
1273                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1274                                                compl);
1275         }
1276         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1277 }
1278
1279 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1280 {
1281         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1282         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1283         struct be_adapter *adapter = rxo->adapter;
1284
1285         /* For checking the valid bit it is Ok to use either definition as the
1286          * valid bit is at the same position in both v0 and v1 Rx compl */
1287         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1288                 return NULL;
1289
1290         rmb();
1291         be_dws_le_to_cpu(compl, sizeof(*compl));
1292
1293         if (adapter->be3_native)
1294                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1295         else
1296                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1297
1298         if (rxcp->vlanf) {
1299                 /* vlanf could be wrongly set in some cards.
1300                  * ignore if vtm is not set */
1301                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1302                         rxcp->vlanf = 0;
1303
1304                 if (!lancer_chip(adapter))
1305                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1306
1307                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1308                     !adapter->vlan_tag[rxcp->vlan_tag])
1309                         rxcp->vlanf = 0;
1310         }
1311
1312         /* As the compl has been parsed, reset it; we wont touch it again */
1313         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1314
1315         queue_tail_inc(&rxo->cq);
1316         return rxcp;
1317 }
1318
1319 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1320 {
1321         u32 order = get_order(size);
1322
1323         if (order > 0)
1324                 gfp |= __GFP_COMP;
1325         return  alloc_pages(gfp, order);
1326 }
1327
1328 /*
1329  * Allocate a page, split it to fragments of size rx_frag_size and post as
1330  * receive buffers to BE
1331  */
1332 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1333 {
1334         struct be_adapter *adapter = rxo->adapter;
1335         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1336         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1337         struct be_queue_info *rxq = &rxo->q;
1338         struct page *pagep = NULL;
1339         struct be_eth_rx_d *rxd;
1340         u64 page_dmaaddr = 0, frag_dmaaddr;
1341         u32 posted, page_offset = 0;
1342
1343         page_info = &rxo->page_info_tbl[rxq->head];
1344         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1345                 if (!pagep) {
1346                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1347                         if (unlikely(!pagep)) {
1348                                 rx_stats(rxo)->rx_post_fail++;
1349                                 break;
1350                         }
1351                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1352                                                     0, adapter->big_page_size,
1353                                                     DMA_FROM_DEVICE);
1354                         page_info->page_offset = 0;
1355                 } else {
1356                         get_page(pagep);
1357                         page_info->page_offset = page_offset + rx_frag_size;
1358                 }
1359                 page_offset = page_info->page_offset;
1360                 page_info->page = pagep;
1361                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1362                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1363
1364                 rxd = queue_head_node(rxq);
1365                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1366                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1367
1368                 /* Any space left in the current big page for another frag? */
1369                 if ((page_offset + rx_frag_size + rx_frag_size) >
1370                                         adapter->big_page_size) {
1371                         pagep = NULL;
1372                         page_info->last_page_user = true;
1373                 }
1374
1375                 prev_page_info = page_info;
1376                 queue_head_inc(rxq);
1377                 page_info = &page_info_tbl[rxq->head];
1378         }
1379         if (pagep)
1380                 prev_page_info->last_page_user = true;
1381
1382         if (posted) {
1383                 atomic_add(posted, &rxq->used);
1384                 be_rxq_notify(adapter, rxq->id, posted);
1385         } else if (atomic_read(&rxq->used) == 0) {
1386                 /* Let be_worker replenish when memory is available */
1387                 rxo->rx_post_starved = true;
1388         }
1389 }
1390
1391 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1392 {
1393         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1394
1395         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1396                 return NULL;
1397
1398         rmb();
1399         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1400
1401         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1402
1403         queue_tail_inc(tx_cq);
1404         return txcp;
1405 }
1406
1407 static u16 be_tx_compl_process(struct be_adapter *adapter,
1408                 struct be_tx_obj *txo, u16 last_index)
1409 {
1410         struct be_queue_info *txq = &txo->q;
1411         struct be_eth_wrb *wrb;
1412         struct sk_buff **sent_skbs = txo->sent_skb_list;
1413         struct sk_buff *sent_skb;
1414         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1415         bool unmap_skb_hdr = true;
1416
1417         sent_skb = sent_skbs[txq->tail];
1418         BUG_ON(!sent_skb);
1419         sent_skbs[txq->tail] = NULL;
1420
1421         /* skip header wrb */
1422         queue_tail_inc(txq);
1423
1424         do {
1425                 cur_index = txq->tail;
1426                 wrb = queue_tail_node(txq);
1427                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1428                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1429                 unmap_skb_hdr = false;
1430
1431                 num_wrbs++;
1432                 queue_tail_inc(txq);
1433         } while (cur_index != last_index);
1434
1435         kfree_skb(sent_skb);
1436         return num_wrbs;
1437 }
1438
1439 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1440 {
1441         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1442
1443         if (!eqe->evt)
1444                 return NULL;
1445
1446         rmb();
1447         eqe->evt = le32_to_cpu(eqe->evt);
1448         queue_tail_inc(&eq_obj->q);
1449         return eqe;
1450 }
1451
1452 static int event_handle(struct be_adapter *adapter,
1453                         struct be_eq_obj *eq_obj,
1454                         bool rearm)
1455 {
1456         struct be_eq_entry *eqe;
1457         u16 num = 0;
1458
1459         while ((eqe = event_get(eq_obj)) != NULL) {
1460                 eqe->evt = 0;
1461                 num++;
1462         }
1463
1464         /* Deal with any spurious interrupts that come
1465          * without events
1466          */
1467         if (!num)
1468                 rearm = true;
1469
1470         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1471         if (num)
1472                 napi_schedule(&eq_obj->napi);
1473
1474         return num;
1475 }
1476
1477 /* Just read and notify events without processing them.
1478  * Used at the time of destroying event queues */
1479 static void be_eq_clean(struct be_adapter *adapter,
1480                         struct be_eq_obj *eq_obj)
1481 {
1482         struct be_eq_entry *eqe;
1483         u16 num = 0;
1484
1485         while ((eqe = event_get(eq_obj)) != NULL) {
1486                 eqe->evt = 0;
1487                 num++;
1488         }
1489
1490         if (num)
1491                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1492 }
1493
1494 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1495 {
1496         struct be_rx_page_info *page_info;
1497         struct be_queue_info *rxq = &rxo->q;
1498         struct be_queue_info *rx_cq = &rxo->cq;
1499         struct be_rx_compl_info *rxcp;
1500         u16 tail;
1501
1502         /* First cleanup pending rx completions */
1503         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1504                 be_rx_compl_discard(adapter, rxo, rxcp);
1505                 be_cq_notify(adapter, rx_cq->id, false, 1);
1506         }
1507
1508         /* Then free posted rx buffer that were not used */
1509         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1510         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1511                 page_info = get_rx_page_info(adapter, rxo, tail);
1512                 put_page(page_info->page);
1513                 memset(page_info, 0, sizeof(*page_info));
1514         }
1515         BUG_ON(atomic_read(&rxq->used));
1516         rxq->tail = rxq->head = 0;
1517 }
1518
1519 static void be_tx_compl_clean(struct be_adapter *adapter,
1520                                 struct be_tx_obj *txo)
1521 {
1522         struct be_queue_info *tx_cq = &txo->cq;
1523         struct be_queue_info *txq = &txo->q;
1524         struct be_eth_tx_compl *txcp;
1525         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1526         struct sk_buff **sent_skbs = txo->sent_skb_list;
1527         struct sk_buff *sent_skb;
1528         bool dummy_wrb;
1529
1530         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1531         do {
1532                 while ((txcp = be_tx_compl_get(tx_cq))) {
1533                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1534                                         wrb_index, txcp);
1535                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1536                         cmpl++;
1537                 }
1538                 if (cmpl) {
1539                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1540                         atomic_sub(num_wrbs, &txq->used);
1541                         cmpl = 0;
1542                         num_wrbs = 0;
1543                 }
1544
1545                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1546                         break;
1547
1548                 mdelay(1);
1549         } while (true);
1550
1551         if (atomic_read(&txq->used))
1552                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1553                         atomic_read(&txq->used));
1554
1555         /* free posted tx for which compls will never arrive */
1556         while (atomic_read(&txq->used)) {
1557                 sent_skb = sent_skbs[txq->tail];
1558                 end_idx = txq->tail;
1559                 index_adv(&end_idx,
1560                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1561                         txq->len);
1562                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1563                 atomic_sub(num_wrbs, &txq->used);
1564         }
1565 }
1566
1567 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1568 {
1569         struct be_queue_info *q;
1570
1571         q = &adapter->mcc_obj.q;
1572         if (q->created)
1573                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1574         be_queue_free(adapter, q);
1575
1576         q = &adapter->mcc_obj.cq;
1577         if (q->created)
1578                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1579         be_queue_free(adapter, q);
1580 }
1581
1582 /* Must be called only after TX qs are created as MCC shares TX EQ */
1583 static int be_mcc_queues_create(struct be_adapter *adapter)
1584 {
1585         struct be_queue_info *q, *cq;
1586
1587         /* Alloc MCC compl queue */
1588         cq = &adapter->mcc_obj.cq;
1589         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1590                         sizeof(struct be_mcc_compl)))
1591                 goto err;
1592
1593         /* Ask BE to create MCC compl queue; share TX's eq */
1594         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1595                 goto mcc_cq_free;
1596
1597         /* Alloc MCC queue */
1598         q = &adapter->mcc_obj.q;
1599         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1600                 goto mcc_cq_destroy;
1601
1602         /* Ask BE to create MCC queue */
1603         if (be_cmd_mccq_create(adapter, q, cq))
1604                 goto mcc_q_free;
1605
1606         return 0;
1607
1608 mcc_q_free:
1609         be_queue_free(adapter, q);
1610 mcc_cq_destroy:
1611         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1612 mcc_cq_free:
1613         be_queue_free(adapter, cq);
1614 err:
1615         return -1;
1616 }
1617
1618 static void be_tx_queues_destroy(struct be_adapter *adapter)
1619 {
1620         struct be_queue_info *q;
1621         struct be_tx_obj *txo;
1622         u8 i;
1623
1624         for_all_tx_queues(adapter, txo, i) {
1625                 q = &txo->q;
1626                 if (q->created)
1627                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1628                 be_queue_free(adapter, q);
1629
1630                 q = &txo->cq;
1631                 if (q->created)
1632                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1633                 be_queue_free(adapter, q);
1634         }
1635
1636         /* Clear any residual events */
1637         be_eq_clean(adapter, &adapter->tx_eq);
1638
1639         q = &adapter->tx_eq.q;
1640         if (q->created)
1641                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1642         be_queue_free(adapter, q);
1643 }
1644
1645 static int be_num_txqs_want(struct be_adapter *adapter)
1646 {
1647         if ((num_vfs && adapter->sriov_enabled) ||
1648                 be_is_mc(adapter) ||
1649                 lancer_chip(adapter) || !be_physfn(adapter) ||
1650                 adapter->generation == BE_GEN2)
1651                 return 1;
1652         else
1653                 return MAX_TX_QS;
1654 }
1655
1656 /* One TX event queue is shared by all TX compl qs */
1657 static int be_tx_queues_create(struct be_adapter *adapter)
1658 {
1659         struct be_queue_info *eq, *q, *cq;
1660         struct be_tx_obj *txo;
1661         u8 i;
1662
1663         adapter->num_tx_qs = be_num_txqs_want(adapter);
1664         if (adapter->num_tx_qs != MAX_TX_QS)
1665                 netif_set_real_num_tx_queues(adapter->netdev,
1666                         adapter->num_tx_qs);
1667
1668         adapter->tx_eq.max_eqd = 0;
1669         adapter->tx_eq.min_eqd = 0;
1670         adapter->tx_eq.cur_eqd = 96;
1671         adapter->tx_eq.enable_aic = false;
1672
1673         eq = &adapter->tx_eq.q;
1674         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1675                 sizeof(struct be_eq_entry)))
1676                 return -1;
1677
1678         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1679                 goto err;
1680         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1681
1682         for_all_tx_queues(adapter, txo, i) {
1683                 cq = &txo->cq;
1684                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1685                         sizeof(struct be_eth_tx_compl)))
1686                         goto err;
1687
1688                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1689                         goto err;
1690
1691                 q = &txo->q;
1692                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1693                         sizeof(struct be_eth_wrb)))
1694                         goto err;
1695
1696                 if (be_cmd_txq_create(adapter, q, cq))
1697                         goto err;
1698         }
1699         return 0;
1700
1701 err:
1702         be_tx_queues_destroy(adapter);
1703         return -1;
1704 }
1705
1706 static void be_rx_queues_destroy(struct be_adapter *adapter)
1707 {
1708         struct be_queue_info *q;
1709         struct be_rx_obj *rxo;
1710         int i;
1711
1712         for_all_rx_queues(adapter, rxo, i) {
1713                 be_queue_free(adapter, &rxo->q);
1714
1715                 q = &rxo->cq;
1716                 if (q->created)
1717                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1718                 be_queue_free(adapter, q);
1719
1720                 q = &rxo->rx_eq.q;
1721                 if (q->created)
1722                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1723                 be_queue_free(adapter, q);
1724         }
1725 }
1726
1727 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1728 {
1729         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1730                 !adapter->sriov_enabled && be_physfn(adapter) &&
1731                 !be_is_mc(adapter)) {
1732                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1733         } else {
1734                 dev_warn(&adapter->pdev->dev,
1735                         "No support for multiple RX queues\n");
1736                 return 1;
1737         }
1738 }
1739
1740 static int be_rx_queues_create(struct be_adapter *adapter)
1741 {
1742         struct be_queue_info *eq, *q, *cq;
1743         struct be_rx_obj *rxo;
1744         int rc, i;
1745
1746         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1747                                 msix_enabled(adapter) ?
1748                                         adapter->num_msix_vec - 1 : 1);
1749         if (adapter->num_rx_qs != MAX_RX_QS)
1750                 dev_warn(&adapter->pdev->dev,
1751                         "Can create only %d RX queues", adapter->num_rx_qs);
1752
1753         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1754         for_all_rx_queues(adapter, rxo, i) {
1755                 rxo->adapter = adapter;
1756                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1757                 rxo->rx_eq.enable_aic = true;
1758
1759                 /* EQ */
1760                 eq = &rxo->rx_eq.q;
1761                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762                                         sizeof(struct be_eq_entry));
1763                 if (rc)
1764                         goto err;
1765
1766                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1767                 if (rc)
1768                         goto err;
1769
1770                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1771
1772                 /* CQ */
1773                 cq = &rxo->cq;
1774                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1775                                 sizeof(struct be_eth_rx_compl));
1776                 if (rc)
1777                         goto err;
1778
1779                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1780                 if (rc)
1781                         goto err;
1782
1783                 /* Rx Q - will be created in be_open() */
1784                 q = &rxo->q;
1785                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1786                                 sizeof(struct be_eth_rx_d));
1787                 if (rc)
1788                         goto err;
1789
1790         }
1791
1792         return 0;
1793 err:
1794         be_rx_queues_destroy(adapter);
1795         return -1;
1796 }
1797
1798 static bool event_peek(struct be_eq_obj *eq_obj)
1799 {
1800         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1801         if (!eqe->evt)
1802                 return false;
1803         else
1804                 return true;
1805 }
1806
1807 static irqreturn_t be_intx(int irq, void *dev)
1808 {
1809         struct be_adapter *adapter = dev;
1810         struct be_rx_obj *rxo;
1811         int isr, i, tx = 0 , rx = 0;
1812
1813         if (lancer_chip(adapter)) {
1814                 if (event_peek(&adapter->tx_eq))
1815                         tx = event_handle(adapter, &adapter->tx_eq, false);
1816                 for_all_rx_queues(adapter, rxo, i) {
1817                         if (event_peek(&rxo->rx_eq))
1818                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1819                 }
1820
1821                 if (!(tx || rx))
1822                         return IRQ_NONE;
1823
1824         } else {
1825                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1826                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1827                 if (!isr)
1828                         return IRQ_NONE;
1829
1830                 if ((1 << adapter->tx_eq.eq_idx & isr))
1831                         event_handle(adapter, &adapter->tx_eq, false);
1832
1833                 for_all_rx_queues(adapter, rxo, i) {
1834                         if ((1 << rxo->rx_eq.eq_idx & isr))
1835                                 event_handle(adapter, &rxo->rx_eq, true);
1836                 }
1837         }
1838
1839         return IRQ_HANDLED;
1840 }
1841
1842 static irqreturn_t be_msix_rx(int irq, void *dev)
1843 {
1844         struct be_rx_obj *rxo = dev;
1845         struct be_adapter *adapter = rxo->adapter;
1846
1847         event_handle(adapter, &rxo->rx_eq, true);
1848
1849         return IRQ_HANDLED;
1850 }
1851
1852 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1853 {
1854         struct be_adapter *adapter = dev;
1855
1856         event_handle(adapter, &adapter->tx_eq, false);
1857
1858         return IRQ_HANDLED;
1859 }
1860
1861 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1862 {
1863         return (rxcp->tcpf && !rxcp->err) ? true : false;
1864 }
1865
1866 static int be_poll_rx(struct napi_struct *napi, int budget)
1867 {
1868         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1869         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1870         struct be_adapter *adapter = rxo->adapter;
1871         struct be_queue_info *rx_cq = &rxo->cq;
1872         struct be_rx_compl_info *rxcp;
1873         u32 work_done;
1874
1875         rx_stats(rxo)->rx_polls++;
1876         for (work_done = 0; work_done < budget; work_done++) {
1877                 rxcp = be_rx_compl_get(rxo);
1878                 if (!rxcp)
1879                         break;
1880
1881                 /* Is it a flush compl that has no data */
1882                 if (unlikely(rxcp->num_rcvd == 0))
1883                         goto loop_continue;
1884
1885                 /* Discard compl with partial DMA Lancer B0 */
1886                 if (unlikely(!rxcp->pkt_size)) {
1887                         be_rx_compl_discard(adapter, rxo, rxcp);
1888                         goto loop_continue;
1889                 }
1890
1891                 /* On BE drop pkts that arrive due to imperfect filtering in
1892                  * promiscuous mode on some skews
1893                  */
1894                 if (unlikely(rxcp->port != adapter->port_num &&
1895                                 !lancer_chip(adapter))) {
1896                         be_rx_compl_discard(adapter, rxo, rxcp);
1897                         goto loop_continue;
1898                 }
1899
1900                 if (do_gro(rxcp))
1901                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1902                 else
1903                         be_rx_compl_process(adapter, rxo, rxcp);
1904 loop_continue:
1905                 be_rx_stats_update(rxo, rxcp);
1906         }
1907
1908         be_cq_notify(adapter, rx_cq->id, false, work_done);
1909
1910         /* Refill the queue */
1911         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1912                 be_post_rx_frags(rxo, GFP_ATOMIC);
1913
1914         /* All consumed */
1915         if (work_done < budget) {
1916                 napi_complete(napi);
1917                 /* Arm CQ */
1918                 be_cq_notify(adapter, rx_cq->id, true, 0);
1919         }
1920         return work_done;
1921 }
1922
1923 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1924  * For TX/MCC we don't honour budget; consume everything
1925  */
1926 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1927 {
1928         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1929         struct be_adapter *adapter =
1930                 container_of(tx_eq, struct be_adapter, tx_eq);
1931         struct be_tx_obj *txo;
1932         struct be_eth_tx_compl *txcp;
1933         int tx_compl, mcc_compl, status = 0;
1934         u8 i;
1935         u16 num_wrbs;
1936
1937         for_all_tx_queues(adapter, txo, i) {
1938                 tx_compl = 0;
1939                 num_wrbs = 0;
1940                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1941                         num_wrbs += be_tx_compl_process(adapter, txo,
1942                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1943                                         wrb_index, txcp));
1944                         tx_compl++;
1945                 }
1946                 if (tx_compl) {
1947                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1948
1949                         atomic_sub(num_wrbs, &txo->q.used);
1950
1951                         /* As Tx wrbs have been freed up, wake up netdev queue
1952                          * if it was stopped due to lack of tx wrbs.  */
1953                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1954                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1955                                 netif_wake_subqueue(adapter->netdev, i);
1956                         }
1957
1958                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1959                         tx_stats(txo)->tx_compl += tx_compl;
1960                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1961                 }
1962         }
1963
1964         mcc_compl = be_process_mcc(adapter, &status);
1965
1966         if (mcc_compl) {
1967                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1968                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1969         }
1970
1971         napi_complete(napi);
1972
1973         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1974         adapter->drv_stats.tx_events++;
1975         return 1;
1976 }
1977
1978 void be_detect_dump_ue(struct be_adapter *adapter)
1979 {
1980         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1981         u32 i;
1982
1983         pci_read_config_dword(adapter->pdev,
1984                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1985         pci_read_config_dword(adapter->pdev,
1986                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1987         pci_read_config_dword(adapter->pdev,
1988                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1989         pci_read_config_dword(adapter->pdev,
1990                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1991
1992         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1993         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1994
1995         if (ue_status_lo || ue_status_hi) {
1996                 adapter->ue_detected = true;
1997                 adapter->eeh_err = true;
1998                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1999         }
2000
2001         if (ue_status_lo) {
2002                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2003                         if (ue_status_lo & 1)
2004                                 dev_err(&adapter->pdev->dev,
2005                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2006                 }
2007         }
2008         if (ue_status_hi) {
2009                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2010                         if (ue_status_hi & 1)
2011                                 dev_err(&adapter->pdev->dev,
2012                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2013                 }
2014         }
2015
2016 }
2017
2018 static void be_worker(struct work_struct *work)
2019 {
2020         struct be_adapter *adapter =
2021                 container_of(work, struct be_adapter, work.work);
2022         struct be_rx_obj *rxo;
2023         int i;
2024
2025         if (!adapter->ue_detected && !lancer_chip(adapter))
2026                 be_detect_dump_ue(adapter);
2027
2028         /* when interrupts are not yet enabled, just reap any pending
2029         * mcc completions */
2030         if (!netif_running(adapter->netdev)) {
2031                 int mcc_compl, status = 0;
2032
2033                 mcc_compl = be_process_mcc(adapter, &status);
2034
2035                 if (mcc_compl) {
2036                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2037                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2038                 }
2039
2040                 goto reschedule;
2041         }
2042
2043         if (!adapter->stats_cmd_sent) {
2044                 if (lancer_chip(adapter))
2045                         lancer_cmd_get_pport_stats(adapter,
2046                                                 &adapter->stats_cmd);
2047                 else
2048                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2049         }
2050
2051         for_all_rx_queues(adapter, rxo, i) {
2052                 be_rx_eqd_update(adapter, rxo);
2053
2054                 if (rxo->rx_post_starved) {
2055                         rxo->rx_post_starved = false;
2056                         be_post_rx_frags(rxo, GFP_KERNEL);
2057                 }
2058         }
2059
2060 reschedule:
2061         adapter->work_counter++;
2062         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2063 }
2064
2065 static void be_msix_disable(struct be_adapter *adapter)
2066 {
2067         if (msix_enabled(adapter)) {
2068                 pci_disable_msix(adapter->pdev);
2069                 adapter->num_msix_vec = 0;
2070         }
2071 }
2072
2073 static void be_msix_enable(struct be_adapter *adapter)
2074 {
2075 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2076         int i, status, num_vec;
2077
2078         num_vec = be_num_rxqs_want(adapter) + 1;
2079
2080         for (i = 0; i < num_vec; i++)
2081                 adapter->msix_entries[i].entry = i;
2082
2083         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2084         if (status == 0) {
2085                 goto done;
2086         } else if (status >= BE_MIN_MSIX_VECTORS) {
2087                 num_vec = status;
2088                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2089                                 num_vec) == 0)
2090                         goto done;
2091         }
2092         return;
2093 done:
2094         adapter->num_msix_vec = num_vec;
2095         return;
2096 }
2097
2098 static int be_sriov_enable(struct be_adapter *adapter)
2099 {
2100         be_check_sriov_fn_type(adapter);
2101 #ifdef CONFIG_PCI_IOV
2102         if (be_physfn(adapter) && num_vfs) {
2103                 int status, pos;
2104                 u16 nvfs;
2105
2106                 pos = pci_find_ext_capability(adapter->pdev,
2107                                                 PCI_EXT_CAP_ID_SRIOV);
2108                 pci_read_config_word(adapter->pdev,
2109                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2110
2111                 if (num_vfs > nvfs) {
2112                         dev_info(&adapter->pdev->dev,
2113                                         "Device supports %d VFs and not %d\n",
2114                                         nvfs, num_vfs);
2115                         num_vfs = nvfs;
2116                 }
2117
2118                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2119                 adapter->sriov_enabled = status ? false : true;
2120
2121                 if (adapter->sriov_enabled) {
2122                         adapter->vf_cfg = kcalloc(num_vfs,
2123                                                 sizeof(struct be_vf_cfg),
2124                                                 GFP_KERNEL);
2125                         if (!adapter->vf_cfg)
2126                                 return -ENOMEM;
2127                 }
2128         }
2129 #endif
2130         return 0;
2131 }
2132
2133 static void be_sriov_disable(struct be_adapter *adapter)
2134 {
2135 #ifdef CONFIG_PCI_IOV
2136         if (adapter->sriov_enabled) {
2137                 pci_disable_sriov(adapter->pdev);
2138                 kfree(adapter->vf_cfg);
2139                 adapter->sriov_enabled = false;
2140         }
2141 #endif
2142 }
2143
2144 static inline int be_msix_vec_get(struct be_adapter *adapter,
2145                                         struct be_eq_obj *eq_obj)
2146 {
2147         return adapter->msix_entries[eq_obj->eq_idx].vector;
2148 }
2149
2150 static int be_request_irq(struct be_adapter *adapter,
2151                 struct be_eq_obj *eq_obj,
2152                 void *handler, char *desc, void *context)
2153 {
2154         struct net_device *netdev = adapter->netdev;
2155         int vec;
2156
2157         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2158         vec = be_msix_vec_get(adapter, eq_obj);
2159         return request_irq(vec, handler, 0, eq_obj->desc, context);
2160 }
2161
2162 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2163                         void *context)
2164 {
2165         int vec = be_msix_vec_get(adapter, eq_obj);
2166         free_irq(vec, context);
2167 }
2168
2169 static int be_msix_register(struct be_adapter *adapter)
2170 {
2171         struct be_rx_obj *rxo;
2172         int status, i;
2173         char qname[10];
2174
2175         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2176                                 adapter);
2177         if (status)
2178                 goto err;
2179
2180         for_all_rx_queues(adapter, rxo, i) {
2181                 sprintf(qname, "rxq%d", i);
2182                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2183                                 qname, rxo);
2184                 if (status)
2185                         goto err_msix;
2186         }
2187
2188         return 0;
2189
2190 err_msix:
2191         be_free_irq(adapter, &adapter->tx_eq, adapter);
2192
2193         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2194                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2195
2196 err:
2197         dev_warn(&adapter->pdev->dev,
2198                 "MSIX Request IRQ failed - err %d\n", status);
2199         be_msix_disable(adapter);
2200         return status;
2201 }
2202
2203 static int be_irq_register(struct be_adapter *adapter)
2204 {
2205         struct net_device *netdev = adapter->netdev;
2206         int status;
2207
2208         if (msix_enabled(adapter)) {
2209                 status = be_msix_register(adapter);
2210                 if (status == 0)
2211                         goto done;
2212                 /* INTx is not supported for VF */
2213                 if (!be_physfn(adapter))
2214                         return status;
2215         }
2216
2217         /* INTx */
2218         netdev->irq = adapter->pdev->irq;
2219         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2220                         adapter);
2221         if (status) {
2222                 dev_err(&adapter->pdev->dev,
2223                         "INTx request IRQ failed - err %d\n", status);
2224                 return status;
2225         }
2226 done:
2227         adapter->isr_registered = true;
2228         return 0;
2229 }
2230
2231 static void be_irq_unregister(struct be_adapter *adapter)
2232 {
2233         struct net_device *netdev = adapter->netdev;
2234         struct be_rx_obj *rxo;
2235         int i;
2236
2237         if (!adapter->isr_registered)
2238                 return;
2239
2240         /* INTx */
2241         if (!msix_enabled(adapter)) {
2242                 free_irq(netdev->irq, adapter);
2243                 goto done;
2244         }
2245
2246         /* MSIx */
2247         be_free_irq(adapter, &adapter->tx_eq, adapter);
2248
2249         for_all_rx_queues(adapter, rxo, i)
2250                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2251
2252 done:
2253         adapter->isr_registered = false;
2254 }
2255
2256 static void be_rx_queues_clear(struct be_adapter *adapter)
2257 {
2258         struct be_queue_info *q;
2259         struct be_rx_obj *rxo;
2260         int i;
2261
2262         for_all_rx_queues(adapter, rxo, i) {
2263                 q = &rxo->q;
2264                 if (q->created) {
2265                         be_cmd_rxq_destroy(adapter, q);
2266                         /* After the rxq is invalidated, wait for a grace time
2267                          * of 1ms for all dma to end and the flush compl to
2268                          * arrive
2269                          */
2270                         mdelay(1);
2271                         be_rx_q_clean(adapter, rxo);
2272                 }
2273
2274                 /* Clear any residual events */
2275                 q = &rxo->rx_eq.q;
2276                 if (q->created)
2277                         be_eq_clean(adapter, &rxo->rx_eq);
2278         }
2279 }
2280
2281 static int be_close(struct net_device *netdev)
2282 {
2283         struct be_adapter *adapter = netdev_priv(netdev);
2284         struct be_rx_obj *rxo;
2285         struct be_tx_obj *txo;
2286         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2287         int vec, i;
2288
2289         be_async_mcc_disable(adapter);
2290
2291         if (!lancer_chip(adapter))
2292                 be_intr_set(adapter, false);
2293
2294         for_all_rx_queues(adapter, rxo, i)
2295                 napi_disable(&rxo->rx_eq.napi);
2296
2297         napi_disable(&tx_eq->napi);
2298
2299         if (lancer_chip(adapter)) {
2300                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2301                 for_all_rx_queues(adapter, rxo, i)
2302                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2303                 for_all_tx_queues(adapter, txo, i)
2304                          be_cq_notify(adapter, txo->cq.id, false, 0);
2305         }
2306
2307         if (msix_enabled(adapter)) {
2308                 vec = be_msix_vec_get(adapter, tx_eq);
2309                 synchronize_irq(vec);
2310
2311                 for_all_rx_queues(adapter, rxo, i) {
2312                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2313                         synchronize_irq(vec);
2314                 }
2315         } else {
2316                 synchronize_irq(netdev->irq);
2317         }
2318         be_irq_unregister(adapter);
2319
2320         /* Wait for all pending tx completions to arrive so that
2321          * all tx skbs are freed.
2322          */
2323         for_all_tx_queues(adapter, txo, i)
2324                 be_tx_compl_clean(adapter, txo);
2325
2326         be_rx_queues_clear(adapter);
2327         return 0;
2328 }
2329
2330 static int be_rx_queues_setup(struct be_adapter *adapter)
2331 {
2332         struct be_rx_obj *rxo;
2333         int rc, i;
2334         u8 rsstable[MAX_RSS_QS];
2335
2336         for_all_rx_queues(adapter, rxo, i) {
2337                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2338                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2339                         adapter->if_handle,
2340                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2341                 if (rc)
2342                         return rc;
2343         }
2344
2345         if (be_multi_rxq(adapter)) {
2346                 for_all_rss_queues(adapter, rxo, i)
2347                         rsstable[i] = rxo->rss_id;
2348
2349                 rc = be_cmd_rss_config(adapter, rsstable,
2350                         adapter->num_rx_qs - 1);
2351                 if (rc)
2352                         return rc;
2353         }
2354
2355         /* First time posting */
2356         for_all_rx_queues(adapter, rxo, i) {
2357                 be_post_rx_frags(rxo, GFP_KERNEL);
2358                 napi_enable(&rxo->rx_eq.napi);
2359         }
2360         return 0;
2361 }
2362
2363 static int be_open(struct net_device *netdev)
2364 {
2365         struct be_adapter *adapter = netdev_priv(netdev);
2366         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2367         struct be_rx_obj *rxo;
2368         int status, i;
2369
2370         status = be_rx_queues_setup(adapter);
2371         if (status)
2372                 goto err;
2373
2374         napi_enable(&tx_eq->napi);
2375
2376         be_irq_register(adapter);
2377
2378         if (!lancer_chip(adapter))
2379                 be_intr_set(adapter, true);
2380
2381         /* The evt queues are created in unarmed state; arm them */
2382         for_all_rx_queues(adapter, rxo, i) {
2383                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2384                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2385         }
2386         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2387
2388         /* Now that interrupts are on we can process async mcc */
2389         be_async_mcc_enable(adapter);
2390
2391         return 0;
2392 err:
2393         be_close(adapter->netdev);
2394         return -EIO;
2395 }
2396
2397 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2398 {
2399         struct be_dma_mem cmd;
2400         int status = 0;
2401         u8 mac[ETH_ALEN];
2402
2403         memset(mac, 0, ETH_ALEN);
2404
2405         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2406         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2407                                     GFP_KERNEL);
2408         if (cmd.va == NULL)
2409                 return -1;
2410         memset(cmd.va, 0, cmd.size);
2411
2412         if (enable) {
2413                 status = pci_write_config_dword(adapter->pdev,
2414                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2415                 if (status) {
2416                         dev_err(&adapter->pdev->dev,
2417                                 "Could not enable Wake-on-lan\n");
2418                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2419                                           cmd.dma);
2420                         return status;
2421                 }
2422                 status = be_cmd_enable_magic_wol(adapter,
2423                                 adapter->netdev->dev_addr, &cmd);
2424                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2425                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2426         } else {
2427                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2428                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2429                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2430         }
2431
2432         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2433         return status;
2434 }
2435
2436 /*
2437  * Generate a seed MAC address from the PF MAC Address using jhash.
2438  * MAC Address for VFs are assigned incrementally starting from the seed.
2439  * These addresses are programmed in the ASIC by the PF and the VF driver
2440  * queries for the MAC address during its probe.
2441  */
2442 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2443 {
2444         u32 vf;
2445         int status = 0;
2446         u8 mac[ETH_ALEN];
2447
2448         be_vf_eth_addr_generate(adapter, mac);
2449
2450         for (vf = 0; vf < num_vfs; vf++) {
2451                 status = be_cmd_pmac_add(adapter, mac,
2452                                         adapter->vf_cfg[vf].vf_if_handle,
2453                                         &adapter->vf_cfg[vf].vf_pmac_id,
2454                                         vf + 1);
2455                 if (status)
2456                         dev_err(&adapter->pdev->dev,
2457                                 "Mac address add failed for VF %d\n", vf);
2458                 else
2459                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2460
2461                 mac[5] += 1;
2462         }
2463         return status;
2464 }
2465
2466 static void be_vf_clear(struct be_adapter *adapter)
2467 {
2468         u32 vf;
2469
2470         for (vf = 0; vf < num_vfs; vf++) {
2471                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2472                         be_cmd_pmac_del(adapter,
2473                                         adapter->vf_cfg[vf].vf_if_handle,
2474                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2475         }
2476
2477         for (vf = 0; vf < num_vfs; vf++)
2478                 if (adapter->vf_cfg[vf].vf_if_handle)
2479                         be_cmd_if_destroy(adapter,
2480                                 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2481 }
2482
2483 static int be_clear(struct be_adapter *adapter)
2484 {
2485         if (be_physfn(adapter) && adapter->sriov_enabled)
2486                 be_vf_clear(adapter);
2487
2488         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2489
2490         be_mcc_queues_destroy(adapter);
2491         be_rx_queues_destroy(adapter);
2492         be_tx_queues_destroy(adapter);
2493         adapter->eq_next_idx = 0;
2494
2495         adapter->be3_native = false;
2496         adapter->promiscuous = false;
2497
2498         /* tell fw we're done with firing cmds */
2499         be_cmd_fw_clean(adapter);
2500         return 0;
2501 }
2502
2503 static int be_vf_setup(struct be_adapter *adapter)
2504 {
2505         u32 cap_flags, en_flags, vf;
2506         u16 lnk_speed;
2507         int status;
2508
2509         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2510         for (vf = 0; vf < num_vfs; vf++) {
2511                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2512                                         &adapter->vf_cfg[vf].vf_if_handle,
2513                                         NULL, vf+1);
2514                 if (status)
2515                         goto err;
2516                 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2517         }
2518
2519         if (!lancer_chip(adapter)) {
2520                 status = be_vf_eth_addr_config(adapter);
2521                 if (status)
2522                         goto err;
2523         }
2524
2525         for (vf = 0; vf < num_vfs; vf++) {
2526                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2527                                 vf + 1);
2528                 if (status)
2529                         goto err;
2530                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2531         }
2532         return 0;
2533 err:
2534         return status;
2535 }
2536
2537 static int be_setup(struct be_adapter *adapter)
2538 {
2539         struct net_device *netdev = adapter->netdev;
2540         u32 cap_flags, en_flags;
2541         u32 tx_fc, rx_fc;
2542         int status;
2543         u8 mac[ETH_ALEN];
2544
2545         /* Allow all priorities by default. A GRP5 evt may modify this */
2546         adapter->vlan_prio_bmap = 0xff;
2547         adapter->link_speed = -1;
2548
2549         be_cmd_req_native_mode(adapter);
2550
2551         status = be_tx_queues_create(adapter);
2552         if (status != 0)
2553                 goto err;
2554
2555         status = be_rx_queues_create(adapter);
2556         if (status != 0)
2557                 goto err;
2558
2559         status = be_mcc_queues_create(adapter);
2560         if (status != 0)
2561                 goto err;
2562
2563         memset(mac, 0, ETH_ALEN);
2564         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2565                         true /*permanent */, 0);
2566         if (status)
2567                 return status;
2568         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2569         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2570
2571         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2572                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2573         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2574                         BE_IF_FLAGS_PROMISCUOUS;
2575         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2576                 cap_flags |= BE_IF_FLAGS_RSS;
2577                 en_flags |= BE_IF_FLAGS_RSS;
2578         }
2579         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2580                         netdev->dev_addr, &adapter->if_handle,
2581                         &adapter->pmac_id, 0);
2582         if (status != 0)
2583                 goto err;
2584
2585         /* For BEx, the VF's permanent mac queried from card is incorrect.
2586          * Query the mac configued by the PF using if_handle
2587          */
2588         if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2589                 status = be_cmd_mac_addr_query(adapter, mac,
2590                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2591                 if (!status) {
2592                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2593                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2594                 }
2595         }
2596
2597         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2598
2599         status = be_vid_config(adapter, false, 0);
2600         if (status)
2601                 goto err;
2602
2603         be_set_rx_mode(adapter->netdev);
2604
2605         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2606         if (status)
2607                 goto err;
2608         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2609                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2610                                         adapter->rx_fc);
2611                 if (status)
2612                         goto err;
2613         }
2614
2615         pcie_set_readrq(adapter->pdev, 4096);
2616
2617         if (be_physfn(adapter) && adapter->sriov_enabled) {
2618                 status = be_vf_setup(adapter);
2619                 if (status)
2620                         goto err;
2621         }
2622
2623         return 0;
2624 err:
2625         be_clear(adapter);
2626         return status;
2627 }
2628
2629 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2630 static bool be_flash_redboot(struct be_adapter *adapter,
2631                         const u8 *p, u32 img_start, int image_size,
2632                         int hdr_size)
2633 {
2634         u32 crc_offset;
2635         u8 flashed_crc[4];
2636         int status;
2637
2638         crc_offset = hdr_size + img_start + image_size - 4;
2639
2640         p += crc_offset;
2641
2642         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2643                         (image_size - 4));
2644         if (status) {
2645                 dev_err(&adapter->pdev->dev,
2646                 "could not get crc from flash, not flashing redboot\n");
2647                 return false;
2648         }
2649
2650         /*update redboot only if crc does not match*/
2651         if (!memcmp(flashed_crc, p, 4))
2652                 return false;
2653         else
2654                 return true;
2655 }
2656
2657 static bool phy_flashing_required(struct be_adapter *adapter)
2658 {
2659         int status = 0;
2660         struct be_phy_info phy_info;
2661
2662         status = be_cmd_get_phy_info(adapter, &phy_info);
2663         if (status)
2664                 return false;
2665         if ((phy_info.phy_type == TN_8022) &&
2666                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2667                 return true;
2668         }
2669         return false;
2670 }
2671
2672 static int be_flash_data(struct be_adapter *adapter,
2673                         const struct firmware *fw,
2674                         struct be_dma_mem *flash_cmd, int num_of_images)
2675
2676 {
2677         int status = 0, i, filehdr_size = 0;
2678         u32 total_bytes = 0, flash_op;
2679         int num_bytes;
2680         const u8 *p = fw->data;
2681         struct be_cmd_write_flashrom *req = flash_cmd->va;
2682         const struct flash_comp *pflashcomp;
2683         int num_comp;
2684
2685         static const struct flash_comp gen3_flash_types[10] = {
2686                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2687                         FLASH_IMAGE_MAX_SIZE_g3},
2688                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2689                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2690                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2691                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2692                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2693                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2694                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2695                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2696                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2697                         FLASH_IMAGE_MAX_SIZE_g3},
2698                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2699                         FLASH_IMAGE_MAX_SIZE_g3},
2700                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2701                         FLASH_IMAGE_MAX_SIZE_g3},
2702                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2703                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2704                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2705                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2706         };
2707         static const struct flash_comp gen2_flash_types[8] = {
2708                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2709                         FLASH_IMAGE_MAX_SIZE_g2},
2710                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2711                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2712                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2713                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2714                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2715                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2716                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2717                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2718                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2719                         FLASH_IMAGE_MAX_SIZE_g2},
2720                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2721                         FLASH_IMAGE_MAX_SIZE_g2},
2722                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2723                          FLASH_IMAGE_MAX_SIZE_g2}
2724         };
2725
2726         if (adapter->generation == BE_GEN3) {
2727                 pflashcomp = gen3_flash_types;
2728                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2729                 num_comp = ARRAY_SIZE(gen3_flash_types);
2730         } else {
2731                 pflashcomp = gen2_flash_types;
2732                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2733                 num_comp = ARRAY_SIZE(gen2_flash_types);
2734         }
2735         for (i = 0; i < num_comp; i++) {
2736                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2737                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2738                         continue;
2739                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2740                         if (!phy_flashing_required(adapter))
2741                                 continue;
2742                 }
2743                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2744                         (!be_flash_redboot(adapter, fw->data,
2745                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2746                         (num_of_images * sizeof(struct image_hdr)))))
2747                         continue;
2748                 p = fw->data;
2749                 p += filehdr_size + pflashcomp[i].offset
2750                         + (num_of_images * sizeof(struct image_hdr));
2751                 if (p + pflashcomp[i].size > fw->data + fw->size)
2752                         return -1;
2753                 total_bytes = pflashcomp[i].size;
2754                 while (total_bytes) {
2755                         if (total_bytes > 32*1024)
2756                                 num_bytes = 32*1024;
2757                         else
2758                                 num_bytes = total_bytes;
2759                         total_bytes -= num_bytes;
2760                         if (!total_bytes) {
2761                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2762                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2763                                 else
2764                                         flash_op = FLASHROM_OPER_FLASH;
2765                         } else {
2766                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2767                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2768                                 else
2769                                         flash_op = FLASHROM_OPER_SAVE;
2770                         }
2771                         memcpy(req->params.data_buf, p, num_bytes);
2772                         p += num_bytes;
2773                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2774                                 pflashcomp[i].optype, flash_op, num_bytes);
2775                         if (status) {
2776                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2777                                         (pflashcomp[i].optype ==
2778                                                 IMG_TYPE_PHY_FW))
2779                                         break;
2780                                 dev_err(&adapter->pdev->dev,
2781                                         "cmd to write to flash rom failed.\n");
2782                                 return -1;
2783                         }
2784                 }
2785         }
2786         return 0;
2787 }
2788
2789 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2790 {
2791         if (fhdr == NULL)
2792                 return 0;
2793         if (fhdr->build[0] == '3')
2794                 return BE_GEN3;
2795         else if (fhdr->build[0] == '2')
2796                 return BE_GEN2;
2797         else
2798                 return 0;
2799 }
2800
2801 static int lancer_fw_download(struct be_adapter *adapter,
2802                                 const struct firmware *fw)
2803 {
2804 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2805 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2806         struct be_dma_mem flash_cmd;
2807         const u8 *data_ptr = NULL;
2808         u8 *dest_image_ptr = NULL;
2809         size_t image_size = 0;
2810         u32 chunk_size = 0;
2811         u32 data_written = 0;
2812         u32 offset = 0;
2813         int status = 0;
2814         u8 add_status = 0;
2815
2816         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2817                 dev_err(&adapter->pdev->dev,
2818                         "FW Image not properly aligned. "
2819                         "Length must be 4 byte aligned.\n");
2820                 status = -EINVAL;
2821                 goto lancer_fw_exit;
2822         }
2823
2824         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2825                                 + LANCER_FW_DOWNLOAD_CHUNK;
2826         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2827                                                 &flash_cmd.dma, GFP_KERNEL);
2828         if (!flash_cmd.va) {
2829                 status = -ENOMEM;
2830                 dev_err(&adapter->pdev->dev,
2831                         "Memory allocation failure while flashing\n");
2832                 goto lancer_fw_exit;
2833         }
2834
2835         dest_image_ptr = flash_cmd.va +
2836                                 sizeof(struct lancer_cmd_req_write_object);
2837         image_size = fw->size;
2838         data_ptr = fw->data;
2839
2840         while (image_size) {
2841                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2842
2843                 /* Copy the image chunk content. */
2844                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2845
2846                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2847                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2848                                 &data_written, &add_status);
2849
2850                 if (status)
2851                         break;
2852
2853                 offset += data_written;
2854                 data_ptr += data_written;
2855                 image_size -= data_written;
2856         }
2857
2858         if (!status) {
2859                 /* Commit the FW written */
2860                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2861                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2862                                         &data_written, &add_status);
2863         }
2864
2865         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2866                                 flash_cmd.dma);
2867         if (status) {
2868                 dev_err(&adapter->pdev->dev,
2869                         "Firmware load error. "
2870                         "Status code: 0x%x Additional Status: 0x%x\n",
2871                         status, add_status);
2872                 goto lancer_fw_exit;
2873         }
2874
2875         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2876 lancer_fw_exit:
2877         return status;
2878 }
2879
2880 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2881 {
2882         struct flash_file_hdr_g2 *fhdr;
2883         struct flash_file_hdr_g3 *fhdr3;
2884         struct image_hdr *img_hdr_ptr = NULL;
2885         struct be_dma_mem flash_cmd;
2886         const u8 *p;
2887         int status = 0, i = 0, num_imgs = 0;
2888
2889         p = fw->data;
2890         fhdr = (struct flash_file_hdr_g2 *) p;
2891
2892         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2893         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2894                                           &flash_cmd.dma, GFP_KERNEL);
2895         if (!flash_cmd.va) {
2896                 status = -ENOMEM;
2897                 dev_err(&adapter->pdev->dev,
2898                         "Memory allocation failure while flashing\n");
2899                 goto be_fw_exit;
2900         }
2901
2902         if ((adapter->generation == BE_GEN3) &&
2903                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2904                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2905                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2906                 for (i = 0; i < num_imgs; i++) {
2907                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2908                                         (sizeof(struct flash_file_hdr_g3) +
2909                                          i * sizeof(struct image_hdr)));
2910                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2911                                 status = be_flash_data(adapter, fw, &flash_cmd,
2912                                                         num_imgs);
2913                 }
2914         } else if ((adapter->generation == BE_GEN2) &&
2915                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2916                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2917         } else {
2918                 dev_err(&adapter->pdev->dev,
2919                         "UFI and Interface are not compatible for flashing\n");
2920                 status = -1;
2921         }
2922
2923         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2924                           flash_cmd.dma);
2925         if (status) {
2926                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2927                 goto be_fw_exit;
2928         }
2929
2930         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2931
2932 be_fw_exit:
2933         return status;
2934 }
2935
2936 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2937 {
2938         const struct firmware *fw;
2939         int status;
2940
2941         if (!netif_running(adapter->netdev)) {
2942                 dev_err(&adapter->pdev->dev,
2943                         "Firmware load not allowed (interface is down)\n");
2944                 return -1;
2945         }
2946
2947         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2948         if (status)
2949                 goto fw_exit;
2950
2951         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2952
2953         if (lancer_chip(adapter))
2954                 status = lancer_fw_download(adapter, fw);
2955         else
2956                 status = be_fw_download(adapter, fw);
2957
2958 fw_exit:
2959         release_firmware(fw);
2960         return status;
2961 }
2962
2963 static struct net_device_ops be_netdev_ops = {
2964         .ndo_open               = be_open,
2965         .ndo_stop               = be_close,
2966         .ndo_start_xmit         = be_xmit,
2967         .ndo_set_rx_mode        = be_set_rx_mode,
2968         .ndo_set_mac_address    = be_mac_addr_set,
2969         .ndo_change_mtu         = be_change_mtu,
2970         .ndo_get_stats64        = be_get_stats64,
2971         .ndo_validate_addr      = eth_validate_addr,
2972         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2973         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2974         .ndo_set_vf_mac         = be_set_vf_mac,
2975         .ndo_set_vf_vlan        = be_set_vf_vlan,
2976         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2977         .ndo_get_vf_config      = be_get_vf_config
2978 };
2979
2980 static void be_netdev_init(struct net_device *netdev)
2981 {
2982         struct be_adapter *adapter = netdev_priv(netdev);
2983         struct be_rx_obj *rxo;
2984         int i;
2985
2986         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2987                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2988                 NETIF_F_HW_VLAN_TX;
2989         if (be_multi_rxq(adapter))
2990                 netdev->hw_features |= NETIF_F_RXHASH;
2991
2992         netdev->features |= netdev->hw_features |
2993                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2994
2995         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2996                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2997
2998         netdev->flags |= IFF_MULTICAST;
2999
3000         netif_set_gso_max_size(netdev, 65535);
3001
3002         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3003
3004         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3005
3006         for_all_rx_queues(adapter, rxo, i)
3007                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3008                                 BE_NAPI_WEIGHT);
3009
3010         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3011                 BE_NAPI_WEIGHT);
3012 }
3013
3014 static void be_unmap_pci_bars(struct be_adapter *adapter)
3015 {
3016         if (adapter->csr)
3017                 iounmap(adapter->csr);
3018         if (adapter->db)
3019                 iounmap(adapter->db);
3020 }
3021
3022 static int be_map_pci_bars(struct be_adapter *adapter)
3023 {
3024         u8 __iomem *addr;
3025         int db_reg;
3026
3027         if (lancer_chip(adapter)) {
3028                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3029                         pci_resource_len(adapter->pdev, 0));
3030                 if (addr == NULL)
3031                         return -ENOMEM;
3032                 adapter->db = addr;
3033                 return 0;
3034         }
3035
3036         if (be_physfn(adapter)) {
3037                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3038                                 pci_resource_len(adapter->pdev, 2));
3039                 if (addr == NULL)
3040                         return -ENOMEM;
3041                 adapter->csr = addr;
3042         }
3043
3044         if (adapter->generation == BE_GEN2) {
3045                 db_reg = 4;
3046         } else {
3047                 if (be_physfn(adapter))
3048                         db_reg = 4;
3049                 else
3050                         db_reg = 0;
3051         }
3052         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3053                                 pci_resource_len(adapter->pdev, db_reg));
3054         if (addr == NULL)
3055                 goto pci_map_err;
3056         adapter->db = addr;
3057
3058         return 0;
3059 pci_map_err:
3060         be_unmap_pci_bars(adapter);
3061         return -ENOMEM;
3062 }
3063
3064
3065 static void be_ctrl_cleanup(struct be_adapter *adapter)
3066 {
3067         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3068
3069         be_unmap_pci_bars(adapter);
3070
3071         if (mem->va)
3072                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3073                                   mem->dma);
3074
3075         mem = &adapter->rx_filter;
3076         if (mem->va)
3077                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3078                                   mem->dma);
3079 }
3080
3081 static int be_ctrl_init(struct be_adapter *adapter)
3082 {
3083         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3084         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3085         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3086         int status;
3087
3088         status = be_map_pci_bars(adapter);
3089         if (status)
3090                 goto done;
3091
3092         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3093         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3094                                                 mbox_mem_alloc->size,
3095                                                 &mbox_mem_alloc->dma,
3096                                                 GFP_KERNEL);
3097         if (!mbox_mem_alloc->va) {
3098                 status = -ENOMEM;
3099                 goto unmap_pci_bars;
3100         }
3101         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3102         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3103         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3104         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3105
3106         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3107         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3108                                         &rx_filter->dma, GFP_KERNEL);
3109         if (rx_filter->va == NULL) {
3110                 status = -ENOMEM;
3111                 goto free_mbox;
3112         }
3113         memset(rx_filter->va, 0, rx_filter->size);
3114
3115         mutex_init(&adapter->mbox_lock);
3116         spin_lock_init(&adapter->mcc_lock);
3117         spin_lock_init(&adapter->mcc_cq_lock);
3118
3119         init_completion(&adapter->flash_compl);
3120         pci_save_state(adapter->pdev);
3121         return 0;
3122
3123 free_mbox:
3124         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3125                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3126
3127 unmap_pci_bars:
3128         be_unmap_pci_bars(adapter);
3129
3130 done:
3131         return status;
3132 }
3133
3134 static void be_stats_cleanup(struct be_adapter *adapter)
3135 {
3136         struct be_dma_mem *cmd = &adapter->stats_cmd;
3137
3138         if (cmd->va)
3139                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3140                                   cmd->va, cmd->dma);
3141 }
3142
3143 static int be_stats_init(struct be_adapter *adapter)
3144 {
3145         struct be_dma_mem *cmd = &adapter->stats_cmd;
3146
3147         if (adapter->generation == BE_GEN2) {
3148                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3149         } else {
3150                 if (lancer_chip(adapter))
3151                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3152                 else
3153                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3154         }
3155         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3156                                      GFP_KERNEL);
3157         if (cmd->va == NULL)
3158                 return -1;
3159         memset(cmd->va, 0, cmd->size);
3160         return 0;
3161 }
3162
3163 static void __devexit be_remove(struct pci_dev *pdev)
3164 {
3165         struct be_adapter *adapter = pci_get_drvdata(pdev);
3166
3167         if (!adapter)
3168                 return;
3169
3170         cancel_delayed_work_sync(&adapter->work);
3171
3172         unregister_netdev(adapter->netdev);
3173
3174         be_clear(adapter);
3175
3176         be_stats_cleanup(adapter);
3177
3178         be_ctrl_cleanup(adapter);
3179
3180         be_sriov_disable(adapter);
3181
3182         be_msix_disable(adapter);
3183
3184         pci_set_drvdata(pdev, NULL);
3185         pci_release_regions(pdev);
3186         pci_disable_device(pdev);
3187
3188         free_netdev(adapter->netdev);
3189 }
3190
3191 static int be_get_config(struct be_adapter *adapter)
3192 {
3193         int status;
3194
3195         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3196                         &adapter->function_mode, &adapter->function_caps);
3197         if (status)
3198                 return status;
3199
3200         if (adapter->function_mode & FLEX10_MODE)
3201                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3202         else
3203                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3204
3205         status = be_cmd_get_cntl_attributes(adapter);
3206         if (status)
3207                 return status;
3208
3209         return 0;
3210 }
3211
3212 static int be_dev_family_check(struct be_adapter *adapter)
3213 {
3214         struct pci_dev *pdev = adapter->pdev;
3215         u32 sli_intf = 0, if_type;
3216
3217         switch (pdev->device) {
3218         case BE_DEVICE_ID1:
3219         case OC_DEVICE_ID1:
3220                 adapter->generation = BE_GEN2;
3221                 break;
3222         case BE_DEVICE_ID2:
3223         case OC_DEVICE_ID2:
3224                 adapter->generation = BE_GEN3;
3225                 break;
3226         case OC_DEVICE_ID3:
3227         case OC_DEVICE_ID4:
3228                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3229                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3230                                                 SLI_INTF_IF_TYPE_SHIFT;
3231
3232                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3233                         if_type != 0x02) {
3234                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3235                         return -EINVAL;
3236                 }
3237                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3238                                          SLI_INTF_FAMILY_SHIFT);
3239                 adapter->generation = BE_GEN3;
3240                 break;
3241         default:
3242                 adapter->generation = 0;
3243         }
3244         return 0;
3245 }
3246
3247 static int lancer_wait_ready(struct be_adapter *adapter)
3248 {
3249 #define SLIPORT_READY_TIMEOUT 500
3250         u32 sliport_status;
3251         int status = 0, i;
3252
3253         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3254                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3255                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3256                         break;
3257
3258                 msleep(20);
3259         }
3260
3261         if (i == SLIPORT_READY_TIMEOUT)
3262                 status = -1;
3263
3264         return status;
3265 }
3266
3267 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3268 {
3269         int status;
3270         u32 sliport_status, err, reset_needed;
3271         status = lancer_wait_ready(adapter);
3272         if (!status) {
3273                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3274                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3275                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3276                 if (err && reset_needed) {
3277                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3278                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3279
3280                         /* check adapter has corrected the error */
3281                         status = lancer_wait_ready(adapter);
3282                         sliport_status = ioread32(adapter->db +
3283                                                         SLIPORT_STATUS_OFFSET);
3284                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3285                                                 SLIPORT_STATUS_RN_MASK);
3286                         if (status || sliport_status)
3287                                 status = -1;
3288                 } else if (err || reset_needed) {
3289                         status = -1;
3290                 }
3291         }
3292         return status;
3293 }
3294
3295 static int __devinit be_probe(struct pci_dev *pdev,
3296                         const struct pci_device_id *pdev_id)
3297 {
3298         int status = 0;
3299         struct be_adapter *adapter;
3300         struct net_device *netdev;
3301
3302         status = pci_enable_device(pdev);
3303         if (status)
3304                 goto do_none;
3305
3306         status = pci_request_regions(pdev, DRV_NAME);
3307         if (status)
3308                 goto disable_dev;
3309         pci_set_master(pdev);
3310
3311         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3312         if (netdev == NULL) {
3313                 status = -ENOMEM;
3314                 goto rel_reg;
3315         }
3316         adapter = netdev_priv(netdev);
3317         adapter->pdev = pdev;
3318         pci_set_drvdata(pdev, adapter);
3319
3320         status = be_dev_family_check(adapter);
3321         if (status)
3322                 goto free_netdev;
3323
3324         adapter->netdev = netdev;
3325         SET_NETDEV_DEV(netdev, &pdev->dev);
3326
3327         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3328         if (!status) {
3329                 netdev->features |= NETIF_F_HIGHDMA;
3330         } else {
3331                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3332                 if (status) {
3333                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3334                         goto free_netdev;
3335                 }
3336         }
3337
3338         status = be_sriov_enable(adapter);
3339         if (status)
3340                 goto free_netdev;
3341
3342         status = be_ctrl_init(adapter);
3343         if (status)
3344                 goto disable_sriov;
3345
3346         if (lancer_chip(adapter)) {
3347                 status = lancer_test_and_set_rdy_state(adapter);
3348                 if (status) {
3349                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3350                         goto ctrl_clean;
3351                 }
3352         }
3353
3354         /* sync up with fw's ready state */
3355         if (be_physfn(adapter)) {
3356                 status = be_cmd_POST(adapter);
3357                 if (status)
3358                         goto ctrl_clean;
3359         }
3360
3361         /* tell fw we're ready to fire cmds */
3362         status = be_cmd_fw_init(adapter);
3363         if (status)
3364                 goto ctrl_clean;
3365
3366         status = be_cmd_reset_function(adapter);
3367         if (status)
3368                 goto ctrl_clean;
3369
3370         status = be_stats_init(adapter);
3371         if (status)
3372                 goto ctrl_clean;
3373
3374         status = be_get_config(adapter);
3375         if (status)
3376                 goto stats_clean;
3377
3378         /* The INTR bit may be set in the card when probed by a kdump kernel
3379          * after a crash.
3380          */
3381         if (!lancer_chip(adapter))
3382                 be_intr_set(adapter, false);
3383
3384         be_msix_enable(adapter);
3385
3386         INIT_DELAYED_WORK(&adapter->work, be_worker);
3387         adapter->rx_fc = adapter->tx_fc = true;
3388
3389         status = be_setup(adapter);
3390         if (status)
3391                 goto msix_disable;
3392
3393         be_netdev_init(netdev);
3394         status = register_netdev(netdev);
3395         if (status != 0)
3396                 goto unsetup;
3397
3398         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3399
3400         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3401         return 0;
3402
3403 unsetup:
3404         be_clear(adapter);
3405 msix_disable:
3406         be_msix_disable(adapter);
3407 stats_clean:
3408         be_stats_cleanup(adapter);
3409 ctrl_clean:
3410         be_ctrl_cleanup(adapter);
3411 disable_sriov:
3412         be_sriov_disable(adapter);
3413 free_netdev:
3414         free_netdev(netdev);
3415         pci_set_drvdata(pdev, NULL);
3416 rel_reg:
3417         pci_release_regions(pdev);
3418 disable_dev:
3419         pci_disable_device(pdev);
3420 do_none:
3421         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3422         return status;
3423 }
3424
3425 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3426 {
3427         struct be_adapter *adapter = pci_get_drvdata(pdev);
3428         struct net_device *netdev =  adapter->netdev;
3429
3430         cancel_delayed_work_sync(&adapter->work);
3431         if (adapter->wol)
3432                 be_setup_wol(adapter, true);
3433
3434         netif_device_detach(netdev);
3435         if (netif_running(netdev)) {
3436                 rtnl_lock();
3437                 be_close(netdev);
3438                 rtnl_unlock();
3439         }
3440         be_clear(adapter);
3441
3442         be_msix_disable(adapter);
3443         pci_save_state(pdev);
3444         pci_disable_device(pdev);
3445         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3446         return 0;
3447 }
3448
3449 static int be_resume(struct pci_dev *pdev)
3450 {
3451         int status = 0;
3452         struct be_adapter *adapter = pci_get_drvdata(pdev);
3453         struct net_device *netdev =  adapter->netdev;
3454
3455         netif_device_detach(netdev);
3456
3457         status = pci_enable_device(pdev);
3458         if (status)
3459                 return status;
3460
3461         pci_set_power_state(pdev, 0);
3462         pci_restore_state(pdev);
3463
3464         be_msix_enable(adapter);
3465         /* tell fw we're ready to fire cmds */
3466         status = be_cmd_fw_init(adapter);
3467         if (status)
3468                 return status;
3469
3470         be_setup(adapter);
3471         if (netif_running(netdev)) {
3472                 rtnl_lock();
3473                 be_open(netdev);
3474                 rtnl_unlock();
3475         }
3476         netif_device_attach(netdev);
3477
3478         if (adapter->wol)
3479                 be_setup_wol(adapter, false);
3480
3481         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3482         return 0;
3483 }
3484
3485 /*
3486  * An FLR will stop BE from DMAing any data.
3487  */
3488 static void be_shutdown(struct pci_dev *pdev)
3489 {
3490         struct be_adapter *adapter = pci_get_drvdata(pdev);
3491
3492         if (!adapter)
3493                 return;
3494
3495         cancel_delayed_work_sync(&adapter->work);
3496
3497         netif_device_detach(adapter->netdev);
3498
3499         if (adapter->wol)
3500                 be_setup_wol(adapter, true);
3501
3502         be_cmd_reset_function(adapter);
3503
3504         pci_disable_device(pdev);
3505 }
3506
3507 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3508                                 pci_channel_state_t state)
3509 {
3510         struct be_adapter *adapter = pci_get_drvdata(pdev);
3511         struct net_device *netdev =  adapter->netdev;
3512
3513         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3514
3515         adapter->eeh_err = true;
3516
3517         netif_device_detach(netdev);
3518
3519         if (netif_running(netdev)) {
3520                 rtnl_lock();
3521                 be_close(netdev);
3522                 rtnl_unlock();
3523         }
3524         be_clear(adapter);
3525
3526         if (state == pci_channel_io_perm_failure)
3527                 return PCI_ERS_RESULT_DISCONNECT;
3528
3529         pci_disable_device(pdev);
3530
3531         return PCI_ERS_RESULT_NEED_RESET;
3532 }
3533
3534 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3535 {
3536         struct be_adapter *adapter = pci_get_drvdata(pdev);
3537         int status;
3538
3539         dev_info(&adapter->pdev->dev, "EEH reset\n");
3540         adapter->eeh_err = false;
3541
3542         status = pci_enable_device(pdev);
3543         if (status)
3544                 return PCI_ERS_RESULT_DISCONNECT;
3545
3546         pci_set_master(pdev);
3547         pci_set_power_state(pdev, 0);
3548         pci_restore_state(pdev);
3549
3550         /* Check if card is ok and fw is ready */
3551         status = be_cmd_POST(adapter);
3552         if (status)
3553                 return PCI_ERS_RESULT_DISCONNECT;
3554
3555         return PCI_ERS_RESULT_RECOVERED;
3556 }
3557
3558 static void be_eeh_resume(struct pci_dev *pdev)
3559 {
3560         int status = 0;
3561         struct be_adapter *adapter = pci_get_drvdata(pdev);
3562         struct net_device *netdev =  adapter->netdev;
3563
3564         dev_info(&adapter->pdev->dev, "EEH resume\n");
3565
3566         pci_save_state(pdev);
3567
3568         /* tell fw we're ready to fire cmds */
3569         status = be_cmd_fw_init(adapter);
3570         if (status)
3571                 goto err;
3572
3573         status = be_setup(adapter);
3574         if (status)
3575                 goto err;
3576
3577         if (netif_running(netdev)) {
3578                 status = be_open(netdev);
3579                 if (status)
3580                         goto err;
3581         }
3582         netif_device_attach(netdev);
3583         return;
3584 err:
3585         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3586 }
3587
3588 static struct pci_error_handlers be_eeh_handlers = {
3589         .error_detected = be_eeh_err_detected,
3590         .slot_reset = be_eeh_reset,
3591         .resume = be_eeh_resume,
3592 };
3593
3594 static struct pci_driver be_driver = {
3595         .name = DRV_NAME,
3596         .id_table = be_dev_ids,
3597         .probe = be_probe,
3598         .remove = be_remove,
3599         .suspend = be_suspend,
3600         .resume = be_resume,
3601         .shutdown = be_shutdown,
3602         .err_handler = &be_eeh_handlers
3603 };
3604
3605 static int __init be_init_module(void)
3606 {
3607         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3608             rx_frag_size != 2048) {
3609                 printk(KERN_WARNING DRV_NAME
3610                         " : Module param rx_frag_size must be 2048/4096/8192."
3611                         " Using 2048\n");
3612                 rx_frag_size = 2048;
3613         }
3614
3615         return pci_register_driver(&be_driver);
3616 }
3617 module_init(be_init_module);
3618
3619 static void __exit be_exit_module(void)
3620 {
3621         pci_unregister_driver(&be_driver);
3622 }
3623 module_exit(be_exit_module);