Merge git://github.com/Jkirsher/net-next
[pandora-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119         struct be_dma_mem *mem = &q->dma_mem;
120         if (mem->va)
121                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122                                   mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126                 u16 len, u16 entry_size)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129
130         memset(q, 0, sizeof(*q));
131         q->len = len;
132         q->entry_size = entry_size;
133         mem->size = len * entry_size;
134         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135                                      GFP_KERNEL);
136         if (!mem->va)
137                 return -1;
138         memset(mem->va, 0, mem->size);
139         return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144         u32 reg, enabled;
145
146         if (adapter->eeh_err)
147                 return;
148
149         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150                                 &reg);
151         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153         if (!enabled && enable)
154                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else if (enabled && !enable)
156                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else
158                 return;
159
160         pci_write_config_dword(adapter->pdev,
161                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
162 }
163
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
165 {
166         u32 val = 0;
167         val |= qid & DB_RQ_RING_ID_MASK;
168         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
169
170         wmb();
171         iowrite32(val, adapter->db + DB_RQ_OFFSET);
172 }
173
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176         u32 val = 0;
177         val |= qid & DB_TXULP_RING_ID_MASK;
178         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
179
180         wmb();
181         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
182 }
183
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185                 bool arm, bool clear_int, u16 num_popped)
186 {
187         u32 val = 0;
188         val |= qid & DB_EQ_RING_ID_MASK;
189         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
210
211         if (adapter->eeh_err)
212                 return;
213
214         if (arm)
215                 val |= 1 << DB_CQ_REARM_SHIFT;
216         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217         iowrite32(val, adapter->db + DB_CQ_OFFSET);
218 }
219
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
221 {
222         struct be_adapter *adapter = netdev_priv(netdev);
223         struct sockaddr *addr = p;
224         int status = 0;
225
226         if (!is_valid_ether_addr(addr->sa_data))
227                 return -EADDRNOTAVAIL;
228
229         /* MAC addr configuration will be done in hardware for VFs
230          * by their corresponding PFs. Just copy to netdev addr here
231          */
232         if (!be_physfn(adapter))
233                 goto netdev_addr;
234
235         status = be_cmd_pmac_del(adapter, adapter->if_handle,
236                                 adapter->pmac_id, 0);
237         if (status)
238                 return status;
239
240         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241                                 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243         if (!status)
244                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246         return status;
247 }
248
249 static void populate_be2_stats(struct be_adapter *adapter)
250 {
251         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
254         struct be_port_rxf_stats_v0 *port_stats =
255                                         &rxf_stats->port[adapter->port_num];
256         struct be_drv_stats *drvs = &adapter->drv_stats;
257
258         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
259         drvs->rx_pause_frames = port_stats->rx_pause_frames;
260         drvs->rx_crc_errors = port_stats->rx_crc_errors;
261         drvs->rx_control_frames = port_stats->rx_control_frames;
262         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
274         drvs->rx_dropped_header_too_small =
275                 port_stats->rx_dropped_header_too_small;
276         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
277         drvs->rx_alignment_symbol_errors =
278                 port_stats->rx_alignment_symbol_errors;
279
280         drvs->tx_pauseframes = port_stats->tx_pauseframes;
281         drvs->tx_controlframes = port_stats->tx_controlframes;
282
283         if (adapter->port_num)
284                 drvs->jabber_events = rxf_stats->port1_jabber_events;
285         else
286                 drvs->jabber_events = rxf_stats->port0_jabber_events;
287         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291         drvs->forwarded_packets = rxf_stats->forwarded_packets;
292         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
293         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
295         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
296 }
297
298 static void populate_be3_stats(struct be_adapter *adapter)
299 {
300         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
303         struct be_port_rxf_stats_v1 *port_stats =
304                                         &rxf_stats->port[adapter->port_num];
305         struct be_drv_stats *drvs = &adapter->drv_stats;
306
307         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
308         drvs->rx_pause_frames = port_stats->rx_pause_frames;
309         drvs->rx_crc_errors = port_stats->rx_crc_errors;
310         drvs->rx_control_frames = port_stats->rx_control_frames;
311         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321         drvs->rx_dropped_header_too_small =
322                 port_stats->rx_dropped_header_too_small;
323         drvs->rx_input_fifo_overflow_drop =
324                 port_stats->rx_input_fifo_overflow_drop;
325         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
326         drvs->rx_alignment_symbol_errors =
327                 port_stats->rx_alignment_symbol_errors;
328         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
329         drvs->tx_pauseframes = port_stats->tx_pauseframes;
330         drvs->tx_controlframes = port_stats->tx_controlframes;
331         drvs->jabber_events = port_stats->jabber_events;
332         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336         drvs->forwarded_packets = rxf_stats->forwarded_packets;
337         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
338         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
340         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
341 }
342
343 static void populate_lancer_stats(struct be_adapter *adapter)
344 {
345
346         struct be_drv_stats *drvs = &adapter->drv_stats;
347         struct lancer_pport_stats *pport_stats =
348                                         pport_stats_from_cmd(adapter);
349
350         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
354         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
355         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
356         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360         drvs->rx_dropped_tcp_length =
361                                 pport_stats->rx_dropped_invalid_tcp_length;
362         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365         drvs->rx_dropped_header_too_small =
366                                 pport_stats->rx_dropped_header_too_small;
367         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
369         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
370         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
371         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
373         drvs->jabber_events = pport_stats->rx_jabbers;
374         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
375         drvs->forwarded_packets = pport_stats->num_forwards_lo;
376         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
377         drvs->rx_drops_too_many_frags =
378                                 pport_stats->rx_drops_too_many_frags_lo;
379 }
380
381 static void accumulate_16bit_val(u32 *acc, u16 val)
382 {
383 #define lo(x)                   (x & 0xFFFF)
384 #define hi(x)                   (x & 0xFFFF0000)
385         bool wrapped = val < lo(*acc);
386         u32 newacc = hi(*acc) + val;
387
388         if (wrapped)
389                 newacc += 65536;
390         ACCESS_ONCE(*acc) = newacc;
391 }
392
393 void be_parse_stats(struct be_adapter *adapter)
394 {
395         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396         struct be_rx_obj *rxo;
397         int i;
398
399         if (adapter->generation == BE_GEN3) {
400                 if (lancer_chip(adapter))
401                         populate_lancer_stats(adapter);
402                  else
403                         populate_be3_stats(adapter);
404         } else {
405                 populate_be2_stats(adapter);
406         }
407
408         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
409         for_all_rx_queues(adapter, rxo, i) {
410                 /* below erx HW counter can actually wrap around after
411                  * 65535. Driver accumulates a 32-bit value
412                  */
413                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
415         }
416 }
417
418 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419                                         struct rtnl_link_stats64 *stats)
420 {
421         struct be_adapter *adapter = netdev_priv(netdev);
422         struct be_drv_stats *drvs = &adapter->drv_stats;
423         struct be_rx_obj *rxo;
424         struct be_tx_obj *txo;
425         u64 pkts, bytes;
426         unsigned int start;
427         int i;
428
429         for_all_rx_queues(adapter, rxo, i) {
430                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431                 do {
432                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433                         pkts = rx_stats(rxo)->rx_pkts;
434                         bytes = rx_stats(rxo)->rx_bytes;
435                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436                 stats->rx_packets += pkts;
437                 stats->rx_bytes += bytes;
438                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440                                         rx_stats(rxo)->rx_drops_no_frags;
441         }
442
443         for_all_tx_queues(adapter, txo, i) {
444                 const struct be_tx_stats *tx_stats = tx_stats(txo);
445                 do {
446                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447                         pkts = tx_stats(txo)->tx_pkts;
448                         bytes = tx_stats(txo)->tx_bytes;
449                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450                 stats->tx_packets += pkts;
451                 stats->tx_bytes += bytes;
452         }
453
454         /* bad pkts received */
455         stats->rx_errors = drvs->rx_crc_errors +
456                 drvs->rx_alignment_symbol_errors +
457                 drvs->rx_in_range_errors +
458                 drvs->rx_out_range_errors +
459                 drvs->rx_frame_too_long +
460                 drvs->rx_dropped_too_small +
461                 drvs->rx_dropped_too_short +
462                 drvs->rx_dropped_header_too_small +
463                 drvs->rx_dropped_tcp_length +
464                 drvs->rx_dropped_runt;
465
466         /* detailed rx errors */
467         stats->rx_length_errors = drvs->rx_in_range_errors +
468                 drvs->rx_out_range_errors +
469                 drvs->rx_frame_too_long;
470
471         stats->rx_crc_errors = drvs->rx_crc_errors;
472
473         /* frame alignment errors */
474         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
475
476         /* receiver fifo overrun */
477         /* drops_no_pbuf is no per i/f, it's per BE card */
478         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
479                                 drvs->rx_input_fifo_overflow_drop +
480                                 drvs->rx_drops_no_pbuf;
481         return stats;
482 }
483
484 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
485 {
486         struct net_device *netdev = adapter->netdev;
487
488         /* when link status changes, link speed must be re-queried from card */
489         adapter->link_speed = -1;
490         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491                 netif_carrier_on(netdev);
492                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493         } else {
494                 netif_carrier_off(netdev);
495                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
496         }
497 }
498
499 static void be_tx_stats_update(struct be_tx_obj *txo,
500                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
501 {
502         struct be_tx_stats *stats = tx_stats(txo);
503
504         u64_stats_update_begin(&stats->sync);
505         stats->tx_reqs++;
506         stats->tx_wrbs += wrb_cnt;
507         stats->tx_bytes += copied;
508         stats->tx_pkts += (gso_segs ? gso_segs : 1);
509         if (stopped)
510                 stats->tx_stops++;
511         u64_stats_update_end(&stats->sync);
512 }
513
514 /* Determine number of WRB entries needed to xmit data in an skb */
515 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516                                                                 bool *dummy)
517 {
518         int cnt = (skb->len > skb->data_len);
519
520         cnt += skb_shinfo(skb)->nr_frags;
521
522         /* to account for hdr wrb */
523         cnt++;
524         if (lancer_chip(adapter) || !(cnt & 1)) {
525                 *dummy = false;
526         } else {
527                 /* add a dummy to make it an even num */
528                 cnt++;
529                 *dummy = true;
530         }
531         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532         return cnt;
533 }
534
535 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
536 {
537         wrb->frag_pa_hi = upper_32_bits(addr);
538         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
540 }
541
542 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
544 {
545         u8 vlan_prio = 0;
546         u16 vlan_tag = 0;
547
548         memset(hdr, 0, sizeof(*hdr));
549
550         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
551
552         if (skb_is_gso(skb)) {
553                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555                         hdr, skb_shinfo(skb)->gso_size);
556                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
557                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
558                 if (lancer_chip(adapter) && adapter->sli_family  ==
559                                                         LANCER_A0_SLI_FAMILY) {
560                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561                         if (is_tcp_pkt(skb))
562                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563                                                                 tcpcs, hdr, 1);
564                         else if (is_udp_pkt(skb))
565                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566                                                                 udpcs, hdr, 1);
567                 }
568         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569                 if (is_tcp_pkt(skb))
570                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571                 else if (is_udp_pkt(skb))
572                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
573         }
574
575         if (vlan_tx_tag_present(skb)) {
576                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
577                 vlan_tag = vlan_tx_tag_get(skb);
578                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579                 /* If vlan priority provided by OS is NOT in available bmap */
580                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582                                         adapter->recommended_prio;
583                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
584         }
585
586         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
590 }
591
592 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
593                 bool unmap_single)
594 {
595         dma_addr_t dma;
596
597         be_dws_le_to_cpu(wrb, sizeof(*wrb));
598
599         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
600         if (wrb->frag_len) {
601                 if (unmap_single)
602                         dma_unmap_single(dev, dma, wrb->frag_len,
603                                          DMA_TO_DEVICE);
604                 else
605                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
606         }
607 }
608
609 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
610                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
611 {
612         dma_addr_t busaddr;
613         int i, copied = 0;
614         struct device *dev = &adapter->pdev->dev;
615         struct sk_buff *first_skb = skb;
616         struct be_eth_wrb *wrb;
617         struct be_eth_hdr_wrb *hdr;
618         bool map_single = false;
619         u16 map_head;
620
621         hdr = queue_head_node(txq);
622         queue_head_inc(txq);
623         map_head = txq->head;
624
625         if (skb->len > skb->data_len) {
626                 int len = skb_headlen(skb);
627                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628                 if (dma_mapping_error(dev, busaddr))
629                         goto dma_err;
630                 map_single = true;
631                 wrb = queue_head_node(txq);
632                 wrb_fill(wrb, busaddr, len);
633                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634                 queue_head_inc(txq);
635                 copied += len;
636         }
637
638         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639                 struct skb_frag_struct *frag =
640                         &skb_shinfo(skb)->frags[i];
641                 busaddr = skb_frag_dma_map(dev, frag, 0,
642                                            frag->size, DMA_TO_DEVICE);
643                 if (dma_mapping_error(dev, busaddr))
644                         goto dma_err;
645                 wrb = queue_head_node(txq);
646                 wrb_fill(wrb, busaddr, frag->size);
647                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648                 queue_head_inc(txq);
649                 copied += frag->size;
650         }
651
652         if (dummy_wrb) {
653                 wrb = queue_head_node(txq);
654                 wrb_fill(wrb, 0, 0);
655                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656                 queue_head_inc(txq);
657         }
658
659         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
660         be_dws_cpu_to_le(hdr, sizeof(*hdr));
661
662         return copied;
663 dma_err:
664         txq->head = map_head;
665         while (copied) {
666                 wrb = queue_head_node(txq);
667                 unmap_tx_frag(dev, wrb, map_single);
668                 map_single = false;
669                 copied -= wrb->frag_len;
670                 queue_head_inc(txq);
671         }
672         return 0;
673 }
674
675 static netdev_tx_t be_xmit(struct sk_buff *skb,
676                         struct net_device *netdev)
677 {
678         struct be_adapter *adapter = netdev_priv(netdev);
679         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680         struct be_queue_info *txq = &txo->q;
681         u32 wrb_cnt = 0, copied = 0;
682         u32 start = txq->head;
683         bool dummy_wrb, stopped = false;
684
685         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
686
687         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
688         if (copied) {
689                 /* record the sent skb in the sent_skb table */
690                 BUG_ON(txo->sent_skb_list[start]);
691                 txo->sent_skb_list[start] = skb;
692
693                 /* Ensure txq has space for the next skb; Else stop the queue
694                  * *BEFORE* ringing the tx doorbell, so that we serialze the
695                  * tx compls of the current transmit which'll wake up the queue
696                  */
697                 atomic_add(wrb_cnt, &txq->used);
698                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699                                                                 txq->len) {
700                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
701                         stopped = true;
702                 }
703
704                 be_txq_notify(adapter, txq->id, wrb_cnt);
705
706                 be_tx_stats_update(txo, wrb_cnt, copied,
707                                 skb_shinfo(skb)->gso_segs, stopped);
708         } else {
709                 txq->head = start;
710                 dev_kfree_skb_any(skb);
711         }
712         return NETDEV_TX_OK;
713 }
714
715 static int be_change_mtu(struct net_device *netdev, int new_mtu)
716 {
717         struct be_adapter *adapter = netdev_priv(netdev);
718         if (new_mtu < BE_MIN_MTU ||
719                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720                                         (ETH_HLEN + ETH_FCS_LEN))) {
721                 dev_info(&adapter->pdev->dev,
722                         "MTU must be between %d and %d bytes\n",
723                         BE_MIN_MTU,
724                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
725                 return -EINVAL;
726         }
727         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728                         netdev->mtu, new_mtu);
729         netdev->mtu = new_mtu;
730         return 0;
731 }
732
733 /*
734  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735  * If the user configures more, place BE in vlan promiscuous mode.
736  */
737 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
738 {
739         u16 vtag[BE_NUM_VLANS_SUPPORTED];
740         u16 ntags = 0, i;
741         int status = 0;
742         u32 if_handle;
743
744         if (vf) {
745                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
748         }
749
750         /* No need to further configure vids if in promiscuous mode */
751         if (adapter->promiscuous)
752                 return 0;
753
754         if (adapter->vlans_added <= adapter->max_vlans)  {
755                 /* Construct VLAN Table to give to HW */
756                 for (i = 0; i < VLAN_N_VID; i++) {
757                         if (adapter->vlan_tag[i]) {
758                                 vtag[ntags] = cpu_to_le16(i);
759                                 ntags++;
760                         }
761                 }
762                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763                                         vtag, ntags, 1, 0);
764         } else {
765                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766                                         NULL, 0, 1, 1);
767         }
768
769         return status;
770 }
771
772 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
773 {
774         struct be_adapter *adapter = netdev_priv(netdev);
775
776         adapter->vlans_added++;
777         if (!be_physfn(adapter))
778                 return;
779
780         adapter->vlan_tag[vid] = 1;
781         if (adapter->vlans_added <= (adapter->max_vlans + 1))
782                 be_vid_config(adapter, false, 0);
783 }
784
785 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
786 {
787         struct be_adapter *adapter = netdev_priv(netdev);
788
789         adapter->vlans_added--;
790
791         if (!be_physfn(adapter))
792                 return;
793
794         adapter->vlan_tag[vid] = 0;
795         if (adapter->vlans_added <= adapter->max_vlans)
796                 be_vid_config(adapter, false, 0);
797 }
798
799 static void be_set_multicast_list(struct net_device *netdev)
800 {
801         struct be_adapter *adapter = netdev_priv(netdev);
802
803         if (netdev->flags & IFF_PROMISC) {
804                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
805                 adapter->promiscuous = true;
806                 goto done;
807         }
808
809         /* BE was previously in promiscuous mode; disable it */
810         if (adapter->promiscuous) {
811                 adapter->promiscuous = false;
812                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
813
814                 if (adapter->vlans_added)
815                         be_vid_config(adapter, false, 0);
816         }
817
818         /* Enable multicast promisc if num configured exceeds what we support */
819         if (netdev->flags & IFF_ALLMULTI ||
820                         netdev_mc_count(netdev) > BE_MAX_MC) {
821                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
822                 goto done;
823         }
824
825         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
826 done:
827         return;
828 }
829
830 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
831 {
832         struct be_adapter *adapter = netdev_priv(netdev);
833         int status;
834
835         if (!adapter->sriov_enabled)
836                 return -EPERM;
837
838         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839                 return -EINVAL;
840
841         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842                 status = be_cmd_pmac_del(adapter,
843                                         adapter->vf_cfg[vf].vf_if_handle,
844                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
845
846         status = be_cmd_pmac_add(adapter, mac,
847                                 adapter->vf_cfg[vf].vf_if_handle,
848                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
849
850         if (status)
851                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852                                 mac, vf);
853         else
854                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
855
856         return status;
857 }
858
859 static int be_get_vf_config(struct net_device *netdev, int vf,
860                         struct ifla_vf_info *vi)
861 {
862         struct be_adapter *adapter = netdev_priv(netdev);
863
864         if (!adapter->sriov_enabled)
865                 return -EPERM;
866
867         if (vf >= num_vfs)
868                 return -EINVAL;
869
870         vi->vf = vf;
871         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
872         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
873         vi->qos = 0;
874         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
875
876         return 0;
877 }
878
879 static int be_set_vf_vlan(struct net_device *netdev,
880                         int vf, u16 vlan, u8 qos)
881 {
882         struct be_adapter *adapter = netdev_priv(netdev);
883         int status = 0;
884
885         if (!adapter->sriov_enabled)
886                 return -EPERM;
887
888         if ((vf >= num_vfs) || (vlan > 4095))
889                 return -EINVAL;
890
891         if (vlan) {
892                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893                 adapter->vlans_added++;
894         } else {
895                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896                 adapter->vlans_added--;
897         }
898
899         status = be_vid_config(adapter, true, vf);
900
901         if (status)
902                 dev_info(&adapter->pdev->dev,
903                                 "VLAN %d config on VF %d failed\n", vlan, vf);
904         return status;
905 }
906
907 static int be_set_vf_tx_rate(struct net_device *netdev,
908                         int vf, int rate)
909 {
910         struct be_adapter *adapter = netdev_priv(netdev);
911         int status = 0;
912
913         if (!adapter->sriov_enabled)
914                 return -EPERM;
915
916         if ((vf >= num_vfs) || (rate < 0))
917                 return -EINVAL;
918
919         if (rate > 10000)
920                 rate = 10000;
921
922         adapter->vf_cfg[vf].vf_tx_rate = rate;
923         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
924
925         if (status)
926                 dev_info(&adapter->pdev->dev,
927                                 "tx rate %d on VF %d failed\n", rate, vf);
928         return status;
929 }
930
931 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
932 {
933         struct be_eq_obj *rx_eq = &rxo->rx_eq;
934         struct be_rx_stats *stats = rx_stats(rxo);
935         ulong now = jiffies;
936         ulong delta = now - stats->rx_jiffies;
937         u64 pkts;
938         unsigned int start, eqd;
939
940         if (!rx_eq->enable_aic)
941                 return;
942
943         /* Wrapped around */
944         if (time_before(now, stats->rx_jiffies)) {
945                 stats->rx_jiffies = now;
946                 return;
947         }
948
949         /* Update once a second */
950         if (delta < HZ)
951                 return;
952
953         do {
954                 start = u64_stats_fetch_begin_bh(&stats->sync);
955                 pkts = stats->rx_pkts;
956         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
957
958         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
959         stats->rx_pkts_prev = pkts;
960         stats->rx_jiffies = now;
961         eqd = stats->rx_pps / 110000;
962         eqd = eqd << 3;
963         if (eqd > rx_eq->max_eqd)
964                 eqd = rx_eq->max_eqd;
965         if (eqd < rx_eq->min_eqd)
966                 eqd = rx_eq->min_eqd;
967         if (eqd < 10)
968                 eqd = 0;
969         if (eqd != rx_eq->cur_eqd) {
970                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971                 rx_eq->cur_eqd = eqd;
972         }
973 }
974
975 static void be_rx_stats_update(struct be_rx_obj *rxo,
976                 struct be_rx_compl_info *rxcp)
977 {
978         struct be_rx_stats *stats = rx_stats(rxo);
979
980         u64_stats_update_begin(&stats->sync);
981         stats->rx_compl++;
982         stats->rx_bytes += rxcp->pkt_size;
983         stats->rx_pkts++;
984         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
985                 stats->rx_mcast_pkts++;
986         if (rxcp->err)
987                 stats->rx_compl_err++;
988         u64_stats_update_end(&stats->sync);
989 }
990
991 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
992 {
993         /* L4 checksum is not reliable for non TCP/UDP packets.
994          * Also ignore ipcksm for ipv6 pkts */
995         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996                                 (rxcp->ip_csum || rxcp->ipv6);
997 }
998
999 static struct be_rx_page_info *
1000 get_rx_page_info(struct be_adapter *adapter,
1001                 struct be_rx_obj *rxo,
1002                 u16 frag_idx)
1003 {
1004         struct be_rx_page_info *rx_page_info;
1005         struct be_queue_info *rxq = &rxo->q;
1006
1007         rx_page_info = &rxo->page_info_tbl[frag_idx];
1008         BUG_ON(!rx_page_info->page);
1009
1010         if (rx_page_info->last_page_user) {
1011                 dma_unmap_page(&adapter->pdev->dev,
1012                                dma_unmap_addr(rx_page_info, bus),
1013                                adapter->big_page_size, DMA_FROM_DEVICE);
1014                 rx_page_info->last_page_user = false;
1015         }
1016
1017         atomic_dec(&rxq->used);
1018         return rx_page_info;
1019 }
1020
1021 /* Throwaway the data in the Rx completion */
1022 static void be_rx_compl_discard(struct be_adapter *adapter,
1023                 struct be_rx_obj *rxo,
1024                 struct be_rx_compl_info *rxcp)
1025 {
1026         struct be_queue_info *rxq = &rxo->q;
1027         struct be_rx_page_info *page_info;
1028         u16 i, num_rcvd = rxcp->num_rcvd;
1029
1030         for (i = 0; i < num_rcvd; i++) {
1031                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1032                 put_page(page_info->page);
1033                 memset(page_info, 0, sizeof(*page_info));
1034                 index_inc(&rxcp->rxq_idx, rxq->len);
1035         }
1036 }
1037
1038 /*
1039  * skb_fill_rx_data forms a complete skb for an ether frame
1040  * indicated by rxcp.
1041  */
1042 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1043                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1044 {
1045         struct be_queue_info *rxq = &rxo->q;
1046         struct be_rx_page_info *page_info;
1047         u16 i, j;
1048         u16 hdr_len, curr_frag_len, remaining;
1049         u8 *start;
1050
1051         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1052         start = page_address(page_info->page) + page_info->page_offset;
1053         prefetch(start);
1054
1055         /* Copy data in the first descriptor of this completion */
1056         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1057
1058         /* Copy the header portion into skb_data */
1059         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1060         memcpy(skb->data, start, hdr_len);
1061         skb->len = curr_frag_len;
1062         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063                 /* Complete packet has now been moved to data */
1064                 put_page(page_info->page);
1065                 skb->data_len = 0;
1066                 skb->tail += curr_frag_len;
1067         } else {
1068                 skb_shinfo(skb)->nr_frags = 1;
1069                 skb_frag_set_page(skb, 0, page_info->page);
1070                 skb_shinfo(skb)->frags[0].page_offset =
1071                                         page_info->page_offset + hdr_len;
1072                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1073                 skb->data_len = curr_frag_len - hdr_len;
1074                 skb->tail += hdr_len;
1075         }
1076         page_info->page = NULL;
1077
1078         if (rxcp->pkt_size <= rx_frag_size) {
1079                 BUG_ON(rxcp->num_rcvd != 1);
1080                 return;
1081         }
1082
1083         /* More frags present for this completion */
1084         index_inc(&rxcp->rxq_idx, rxq->len);
1085         remaining = rxcp->pkt_size - curr_frag_len;
1086         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1087                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1088                 curr_frag_len = min(remaining, rx_frag_size);
1089
1090                 /* Coalesce all frags from the same physical page in one slot */
1091                 if (page_info->page_offset == 0) {
1092                         /* Fresh page */
1093                         j++;
1094                         skb_frag_set_page(skb, j, page_info->page);
1095                         skb_shinfo(skb)->frags[j].page_offset =
1096                                                         page_info->page_offset;
1097                         skb_shinfo(skb)->frags[j].size = 0;
1098                         skb_shinfo(skb)->nr_frags++;
1099                 } else {
1100                         put_page(page_info->page);
1101                 }
1102
1103                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1104                 skb->len += curr_frag_len;
1105                 skb->data_len += curr_frag_len;
1106
1107                 remaining -= curr_frag_len;
1108                 index_inc(&rxcp->rxq_idx, rxq->len);
1109                 page_info->page = NULL;
1110         }
1111         BUG_ON(j > MAX_SKB_FRAGS);
1112 }
1113
1114 /* Process the RX completion indicated by rxcp when GRO is disabled */
1115 static void be_rx_compl_process(struct be_adapter *adapter,
1116                         struct be_rx_obj *rxo,
1117                         struct be_rx_compl_info *rxcp)
1118 {
1119         struct net_device *netdev = adapter->netdev;
1120         struct sk_buff *skb;
1121
1122         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1123         if (unlikely(!skb)) {
1124                 rx_stats(rxo)->rx_drops_no_skbs++;
1125                 be_rx_compl_discard(adapter, rxo, rxcp);
1126                 return;
1127         }
1128
1129         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1130
1131         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1132                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1133         else
1134                 skb_checksum_none_assert(skb);
1135
1136         skb->truesize = skb->len + sizeof(struct sk_buff);
1137         skb->protocol = eth_type_trans(skb, netdev);
1138         if (adapter->netdev->features & NETIF_F_RXHASH)
1139                 skb->rxhash = rxcp->rss_hash;
1140
1141
1142         if (rxcp->vlanf)
1143                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1144
1145         netif_receive_skb(skb);
1146 }
1147
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150                 struct be_rx_obj *rxo,
1151                 struct be_rx_compl_info *rxcp)
1152 {
1153         struct be_rx_page_info *page_info;
1154         struct sk_buff *skb = NULL;
1155         struct be_queue_info *rxq = &rxo->q;
1156         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1157         u16 remaining, curr_frag_len;
1158         u16 i, j;
1159
1160         skb = napi_get_frags(&eq_obj->napi);
1161         if (!skb) {
1162                 be_rx_compl_discard(adapter, rxo, rxcp);
1163                 return;
1164         }
1165
1166         remaining = rxcp->pkt_size;
1167         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1169
1170                 curr_frag_len = min(remaining, rx_frag_size);
1171
1172                 /* Coalesce all frags from the same physical page in one slot */
1173                 if (i == 0 || page_info->page_offset == 0) {
1174                         /* First frag or Fresh page */
1175                         j++;
1176                         skb_frag_set_page(skb, j, page_info->page);
1177                         skb_shinfo(skb)->frags[j].page_offset =
1178                                                         page_info->page_offset;
1179                         skb_shinfo(skb)->frags[j].size = 0;
1180                 } else {
1181                         put_page(page_info->page);
1182                 }
1183                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1184
1185                 remaining -= curr_frag_len;
1186                 index_inc(&rxcp->rxq_idx, rxq->len);
1187                 memset(page_info, 0, sizeof(*page_info));
1188         }
1189         BUG_ON(j > MAX_SKB_FRAGS);
1190
1191         skb_shinfo(skb)->nr_frags = j + 1;
1192         skb->len = rxcp->pkt_size;
1193         skb->data_len = rxcp->pkt_size;
1194         skb->truesize += rxcp->pkt_size;
1195         skb->ip_summed = CHECKSUM_UNNECESSARY;
1196         if (adapter->netdev->features & NETIF_F_RXHASH)
1197                 skb->rxhash = rxcp->rss_hash;
1198
1199         if (rxcp->vlanf)
1200                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1201
1202         napi_gro_frags(&eq_obj->napi);
1203 }
1204
1205 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1206                                 struct be_eth_rx_compl *compl,
1207                                 struct be_rx_compl_info *rxcp)
1208 {
1209         rxcp->pkt_size =
1210                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1211         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1212         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1213         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1214         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1215         rxcp->ip_csum =
1216                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1217         rxcp->l4_csum =
1218                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1219         rxcp->ipv6 =
1220                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1221         rxcp->rxq_idx =
1222                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1223         rxcp->num_rcvd =
1224                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1225         rxcp->pkt_type =
1226                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1227         rxcp->rss_hash =
1228                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1229         if (rxcp->vlanf) {
1230                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1231                                           compl);
1232                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1233                                                compl);
1234         }
1235         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1236 }
1237
1238 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1239                                 struct be_eth_rx_compl *compl,
1240                                 struct be_rx_compl_info *rxcp)
1241 {
1242         rxcp->pkt_size =
1243                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1244         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1245         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1246         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1247         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1248         rxcp->ip_csum =
1249                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1250         rxcp->l4_csum =
1251                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1252         rxcp->ipv6 =
1253                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1254         rxcp->rxq_idx =
1255                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1256         rxcp->num_rcvd =
1257                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1258         rxcp->pkt_type =
1259                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1260         rxcp->rss_hash =
1261                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1262         if (rxcp->vlanf) {
1263                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1264                                           compl);
1265                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1266                                                compl);
1267         }
1268         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1269 }
1270
1271 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1272 {
1273         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1274         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1275         struct be_adapter *adapter = rxo->adapter;
1276
1277         /* For checking the valid bit it is Ok to use either definition as the
1278          * valid bit is at the same position in both v0 and v1 Rx compl */
1279         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1280                 return NULL;
1281
1282         rmb();
1283         be_dws_le_to_cpu(compl, sizeof(*compl));
1284
1285         if (adapter->be3_native)
1286                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1287         else
1288                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1289
1290         if (rxcp->vlanf) {
1291                 /* vlanf could be wrongly set in some cards.
1292                  * ignore if vtm is not set */
1293                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1294                         rxcp->vlanf = 0;
1295
1296                 if (!lancer_chip(adapter))
1297                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1298
1299                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1300                     !adapter->vlan_tag[rxcp->vlan_tag])
1301                         rxcp->vlanf = 0;
1302         }
1303
1304         /* As the compl has been parsed, reset it; we wont touch it again */
1305         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1306
1307         queue_tail_inc(&rxo->cq);
1308         return rxcp;
1309 }
1310
1311 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1312 {
1313         u32 order = get_order(size);
1314
1315         if (order > 0)
1316                 gfp |= __GFP_COMP;
1317         return  alloc_pages(gfp, order);
1318 }
1319
1320 /*
1321  * Allocate a page, split it to fragments of size rx_frag_size and post as
1322  * receive buffers to BE
1323  */
1324 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1325 {
1326         struct be_adapter *adapter = rxo->adapter;
1327         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1328         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1329         struct be_queue_info *rxq = &rxo->q;
1330         struct page *pagep = NULL;
1331         struct be_eth_rx_d *rxd;
1332         u64 page_dmaaddr = 0, frag_dmaaddr;
1333         u32 posted, page_offset = 0;
1334
1335         page_info = &rxo->page_info_tbl[rxq->head];
1336         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1337                 if (!pagep) {
1338                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1339                         if (unlikely(!pagep)) {
1340                                 rx_stats(rxo)->rx_post_fail++;
1341                                 break;
1342                         }
1343                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1344                                                     0, adapter->big_page_size,
1345                                                     DMA_FROM_DEVICE);
1346                         page_info->page_offset = 0;
1347                 } else {
1348                         get_page(pagep);
1349                         page_info->page_offset = page_offset + rx_frag_size;
1350                 }
1351                 page_offset = page_info->page_offset;
1352                 page_info->page = pagep;
1353                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1354                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1355
1356                 rxd = queue_head_node(rxq);
1357                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1358                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1359
1360                 /* Any space left in the current big page for another frag? */
1361                 if ((page_offset + rx_frag_size + rx_frag_size) >
1362                                         adapter->big_page_size) {
1363                         pagep = NULL;
1364                         page_info->last_page_user = true;
1365                 }
1366
1367                 prev_page_info = page_info;
1368                 queue_head_inc(rxq);
1369                 page_info = &page_info_tbl[rxq->head];
1370         }
1371         if (pagep)
1372                 prev_page_info->last_page_user = true;
1373
1374         if (posted) {
1375                 atomic_add(posted, &rxq->used);
1376                 be_rxq_notify(adapter, rxq->id, posted);
1377         } else if (atomic_read(&rxq->used) == 0) {
1378                 /* Let be_worker replenish when memory is available */
1379                 rxo->rx_post_starved = true;
1380         }
1381 }
1382
1383 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1384 {
1385         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1386
1387         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1388                 return NULL;
1389
1390         rmb();
1391         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1392
1393         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1394
1395         queue_tail_inc(tx_cq);
1396         return txcp;
1397 }
1398
1399 static u16 be_tx_compl_process(struct be_adapter *adapter,
1400                 struct be_tx_obj *txo, u16 last_index)
1401 {
1402         struct be_queue_info *txq = &txo->q;
1403         struct be_eth_wrb *wrb;
1404         struct sk_buff **sent_skbs = txo->sent_skb_list;
1405         struct sk_buff *sent_skb;
1406         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1407         bool unmap_skb_hdr = true;
1408
1409         sent_skb = sent_skbs[txq->tail];
1410         BUG_ON(!sent_skb);
1411         sent_skbs[txq->tail] = NULL;
1412
1413         /* skip header wrb */
1414         queue_tail_inc(txq);
1415
1416         do {
1417                 cur_index = txq->tail;
1418                 wrb = queue_tail_node(txq);
1419                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1420                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1421                 unmap_skb_hdr = false;
1422
1423                 num_wrbs++;
1424                 queue_tail_inc(txq);
1425         } while (cur_index != last_index);
1426
1427         kfree_skb(sent_skb);
1428         return num_wrbs;
1429 }
1430
1431 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1432 {
1433         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1434
1435         if (!eqe->evt)
1436                 return NULL;
1437
1438         rmb();
1439         eqe->evt = le32_to_cpu(eqe->evt);
1440         queue_tail_inc(&eq_obj->q);
1441         return eqe;
1442 }
1443
1444 static int event_handle(struct be_adapter *adapter,
1445                         struct be_eq_obj *eq_obj,
1446                         bool rearm)
1447 {
1448         struct be_eq_entry *eqe;
1449         u16 num = 0;
1450
1451         while ((eqe = event_get(eq_obj)) != NULL) {
1452                 eqe->evt = 0;
1453                 num++;
1454         }
1455
1456         /* Deal with any spurious interrupts that come
1457          * without events
1458          */
1459         if (!num)
1460                 rearm = true;
1461
1462         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1463         if (num)
1464                 napi_schedule(&eq_obj->napi);
1465
1466         return num;
1467 }
1468
1469 /* Just read and notify events without processing them.
1470  * Used at the time of destroying event queues */
1471 static void be_eq_clean(struct be_adapter *adapter,
1472                         struct be_eq_obj *eq_obj)
1473 {
1474         struct be_eq_entry *eqe;
1475         u16 num = 0;
1476
1477         while ((eqe = event_get(eq_obj)) != NULL) {
1478                 eqe->evt = 0;
1479                 num++;
1480         }
1481
1482         if (num)
1483                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1484 }
1485
1486 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1487 {
1488         struct be_rx_page_info *page_info;
1489         struct be_queue_info *rxq = &rxo->q;
1490         struct be_queue_info *rx_cq = &rxo->cq;
1491         struct be_rx_compl_info *rxcp;
1492         u16 tail;
1493
1494         /* First cleanup pending rx completions */
1495         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1496                 be_rx_compl_discard(adapter, rxo, rxcp);
1497                 be_cq_notify(adapter, rx_cq->id, false, 1);
1498         }
1499
1500         /* Then free posted rx buffer that were not used */
1501         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1502         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1503                 page_info = get_rx_page_info(adapter, rxo, tail);
1504                 put_page(page_info->page);
1505                 memset(page_info, 0, sizeof(*page_info));
1506         }
1507         BUG_ON(atomic_read(&rxq->used));
1508         rxq->tail = rxq->head = 0;
1509 }
1510
1511 static void be_tx_compl_clean(struct be_adapter *adapter,
1512                                 struct be_tx_obj *txo)
1513 {
1514         struct be_queue_info *tx_cq = &txo->cq;
1515         struct be_queue_info *txq = &txo->q;
1516         struct be_eth_tx_compl *txcp;
1517         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1518         struct sk_buff **sent_skbs = txo->sent_skb_list;
1519         struct sk_buff *sent_skb;
1520         bool dummy_wrb;
1521
1522         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1523         do {
1524                 while ((txcp = be_tx_compl_get(tx_cq))) {
1525                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1526                                         wrb_index, txcp);
1527                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1528                         cmpl++;
1529                 }
1530                 if (cmpl) {
1531                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1532                         atomic_sub(num_wrbs, &txq->used);
1533                         cmpl = 0;
1534                         num_wrbs = 0;
1535                 }
1536
1537                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1538                         break;
1539
1540                 mdelay(1);
1541         } while (true);
1542
1543         if (atomic_read(&txq->used))
1544                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1545                         atomic_read(&txq->used));
1546
1547         /* free posted tx for which compls will never arrive */
1548         while (atomic_read(&txq->used)) {
1549                 sent_skb = sent_skbs[txq->tail];
1550                 end_idx = txq->tail;
1551                 index_adv(&end_idx,
1552                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1553                         txq->len);
1554                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1555                 atomic_sub(num_wrbs, &txq->used);
1556         }
1557 }
1558
1559 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1560 {
1561         struct be_queue_info *q;
1562
1563         q = &adapter->mcc_obj.q;
1564         if (q->created)
1565                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1566         be_queue_free(adapter, q);
1567
1568         q = &adapter->mcc_obj.cq;
1569         if (q->created)
1570                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1571         be_queue_free(adapter, q);
1572 }
1573
1574 /* Must be called only after TX qs are created as MCC shares TX EQ */
1575 static int be_mcc_queues_create(struct be_adapter *adapter)
1576 {
1577         struct be_queue_info *q, *cq;
1578
1579         /* Alloc MCC compl queue */
1580         cq = &adapter->mcc_obj.cq;
1581         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1582                         sizeof(struct be_mcc_compl)))
1583                 goto err;
1584
1585         /* Ask BE to create MCC compl queue; share TX's eq */
1586         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1587                 goto mcc_cq_free;
1588
1589         /* Alloc MCC queue */
1590         q = &adapter->mcc_obj.q;
1591         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1592                 goto mcc_cq_destroy;
1593
1594         /* Ask BE to create MCC queue */
1595         if (be_cmd_mccq_create(adapter, q, cq))
1596                 goto mcc_q_free;
1597
1598         return 0;
1599
1600 mcc_q_free:
1601         be_queue_free(adapter, q);
1602 mcc_cq_destroy:
1603         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1604 mcc_cq_free:
1605         be_queue_free(adapter, cq);
1606 err:
1607         return -1;
1608 }
1609
1610 static void be_tx_queues_destroy(struct be_adapter *adapter)
1611 {
1612         struct be_queue_info *q;
1613         struct be_tx_obj *txo;
1614         u8 i;
1615
1616         for_all_tx_queues(adapter, txo, i) {
1617                 q = &txo->q;
1618                 if (q->created)
1619                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1620                 be_queue_free(adapter, q);
1621
1622                 q = &txo->cq;
1623                 if (q->created)
1624                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1625                 be_queue_free(adapter, q);
1626         }
1627
1628         /* Clear any residual events */
1629         be_eq_clean(adapter, &adapter->tx_eq);
1630
1631         q = &adapter->tx_eq.q;
1632         if (q->created)
1633                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1634         be_queue_free(adapter, q);
1635 }
1636
1637 /* One TX event queue is shared by all TX compl qs */
1638 static int be_tx_queues_create(struct be_adapter *adapter)
1639 {
1640         struct be_queue_info *eq, *q, *cq;
1641         struct be_tx_obj *txo;
1642         u8 i;
1643
1644         adapter->tx_eq.max_eqd = 0;
1645         adapter->tx_eq.min_eqd = 0;
1646         adapter->tx_eq.cur_eqd = 96;
1647         adapter->tx_eq.enable_aic = false;
1648
1649         eq = &adapter->tx_eq.q;
1650         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1651                 sizeof(struct be_eq_entry)))
1652                 return -1;
1653
1654         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1655                 goto err;
1656         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1657
1658         for_all_tx_queues(adapter, txo, i) {
1659                 cq = &txo->cq;
1660                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1661                         sizeof(struct be_eth_tx_compl)))
1662                         goto err;
1663
1664                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1665                         goto err;
1666
1667                 q = &txo->q;
1668                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1669                         sizeof(struct be_eth_wrb)))
1670                         goto err;
1671
1672                 if (be_cmd_txq_create(adapter, q, cq))
1673                         goto err;
1674         }
1675         return 0;
1676
1677 err:
1678         be_tx_queues_destroy(adapter);
1679         return -1;
1680 }
1681
1682 static void be_rx_queues_destroy(struct be_adapter *adapter)
1683 {
1684         struct be_queue_info *q;
1685         struct be_rx_obj *rxo;
1686         int i;
1687
1688         for_all_rx_queues(adapter, rxo, i) {
1689                 be_queue_free(adapter, &rxo->q);
1690
1691                 q = &rxo->cq;
1692                 if (q->created)
1693                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1694                 be_queue_free(adapter, q);
1695
1696                 q = &rxo->rx_eq.q;
1697                 if (q->created)
1698                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1699                 be_queue_free(adapter, q);
1700         }
1701 }
1702
1703 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1704 {
1705         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1706                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1707                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1708         } else {
1709                 dev_warn(&adapter->pdev->dev,
1710                         "No support for multiple RX queues\n");
1711                 return 1;
1712         }
1713 }
1714
1715 static int be_rx_queues_create(struct be_adapter *adapter)
1716 {
1717         struct be_queue_info *eq, *q, *cq;
1718         struct be_rx_obj *rxo;
1719         int rc, i;
1720
1721         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1722                                 msix_enabled(adapter) ?
1723                                         adapter->num_msix_vec - 1 : 1);
1724         if (adapter->num_rx_qs != MAX_RX_QS)
1725                 dev_warn(&adapter->pdev->dev,
1726                         "Can create only %d RX queues", adapter->num_rx_qs);
1727
1728         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1729         for_all_rx_queues(adapter, rxo, i) {
1730                 rxo->adapter = adapter;
1731                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1732                 rxo->rx_eq.enable_aic = true;
1733
1734                 /* EQ */
1735                 eq = &rxo->rx_eq.q;
1736                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737                                         sizeof(struct be_eq_entry));
1738                 if (rc)
1739                         goto err;
1740
1741                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1742                 if (rc)
1743                         goto err;
1744
1745                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1746
1747                 /* CQ */
1748                 cq = &rxo->cq;
1749                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1750                                 sizeof(struct be_eth_rx_compl));
1751                 if (rc)
1752                         goto err;
1753
1754                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1755                 if (rc)
1756                         goto err;
1757
1758                 /* Rx Q - will be created in be_open() */
1759                 q = &rxo->q;
1760                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1761                                 sizeof(struct be_eth_rx_d));
1762                 if (rc)
1763                         goto err;
1764
1765         }
1766
1767         return 0;
1768 err:
1769         be_rx_queues_destroy(adapter);
1770         return -1;
1771 }
1772
1773 static bool event_peek(struct be_eq_obj *eq_obj)
1774 {
1775         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1776         if (!eqe->evt)
1777                 return false;
1778         else
1779                 return true;
1780 }
1781
1782 static irqreturn_t be_intx(int irq, void *dev)
1783 {
1784         struct be_adapter *adapter = dev;
1785         struct be_rx_obj *rxo;
1786         int isr, i, tx = 0 , rx = 0;
1787
1788         if (lancer_chip(adapter)) {
1789                 if (event_peek(&adapter->tx_eq))
1790                         tx = event_handle(adapter, &adapter->tx_eq, false);
1791                 for_all_rx_queues(adapter, rxo, i) {
1792                         if (event_peek(&rxo->rx_eq))
1793                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1794                 }
1795
1796                 if (!(tx || rx))
1797                         return IRQ_NONE;
1798
1799         } else {
1800                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1801                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1802                 if (!isr)
1803                         return IRQ_NONE;
1804
1805                 if ((1 << adapter->tx_eq.eq_idx & isr))
1806                         event_handle(adapter, &adapter->tx_eq, false);
1807
1808                 for_all_rx_queues(adapter, rxo, i) {
1809                         if ((1 << rxo->rx_eq.eq_idx & isr))
1810                                 event_handle(adapter, &rxo->rx_eq, true);
1811                 }
1812         }
1813
1814         return IRQ_HANDLED;
1815 }
1816
1817 static irqreturn_t be_msix_rx(int irq, void *dev)
1818 {
1819         struct be_rx_obj *rxo = dev;
1820         struct be_adapter *adapter = rxo->adapter;
1821
1822         event_handle(adapter, &rxo->rx_eq, true);
1823
1824         return IRQ_HANDLED;
1825 }
1826
1827 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1828 {
1829         struct be_adapter *adapter = dev;
1830
1831         event_handle(adapter, &adapter->tx_eq, false);
1832
1833         return IRQ_HANDLED;
1834 }
1835
1836 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1837 {
1838         return (rxcp->tcpf && !rxcp->err) ? true : false;
1839 }
1840
1841 static int be_poll_rx(struct napi_struct *napi, int budget)
1842 {
1843         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1844         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1845         struct be_adapter *adapter = rxo->adapter;
1846         struct be_queue_info *rx_cq = &rxo->cq;
1847         struct be_rx_compl_info *rxcp;
1848         u32 work_done;
1849
1850         rx_stats(rxo)->rx_polls++;
1851         for (work_done = 0; work_done < budget; work_done++) {
1852                 rxcp = be_rx_compl_get(rxo);
1853                 if (!rxcp)
1854                         break;
1855
1856                 /* Is it a flush compl that has no data */
1857                 if (unlikely(rxcp->num_rcvd == 0))
1858                         goto loop_continue;
1859
1860                 /* Discard compl with partial DMA Lancer B0 */
1861                 if (unlikely(!rxcp->pkt_size)) {
1862                         be_rx_compl_discard(adapter, rxo, rxcp);
1863                         goto loop_continue;
1864                 }
1865
1866                 /* On BE drop pkts that arrive due to imperfect filtering in
1867                  * promiscuous mode on some skews
1868                  */
1869                 if (unlikely(rxcp->port != adapter->port_num &&
1870                                 !lancer_chip(adapter))) {
1871                         be_rx_compl_discard(adapter, rxo, rxcp);
1872                         goto loop_continue;
1873                 }
1874
1875                 if (do_gro(rxcp))
1876                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1877                 else
1878                         be_rx_compl_process(adapter, rxo, rxcp);
1879 loop_continue:
1880                 be_rx_stats_update(rxo, rxcp);
1881         }
1882
1883         /* Refill the queue */
1884         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1885                 be_post_rx_frags(rxo, GFP_ATOMIC);
1886
1887         /* All consumed */
1888         if (work_done < budget) {
1889                 napi_complete(napi);
1890                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1891         } else {
1892                 /* More to be consumed; continue with interrupts disabled */
1893                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1894         }
1895         return work_done;
1896 }
1897
1898 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1899  * For TX/MCC we don't honour budget; consume everything
1900  */
1901 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1902 {
1903         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1904         struct be_adapter *adapter =
1905                 container_of(tx_eq, struct be_adapter, tx_eq);
1906         struct be_tx_obj *txo;
1907         struct be_eth_tx_compl *txcp;
1908         int tx_compl, mcc_compl, status = 0;
1909         u8 i;
1910         u16 num_wrbs;
1911
1912         for_all_tx_queues(adapter, txo, i) {
1913                 tx_compl = 0;
1914                 num_wrbs = 0;
1915                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1916                         num_wrbs += be_tx_compl_process(adapter, txo,
1917                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1918                                         wrb_index, txcp));
1919                         tx_compl++;
1920                 }
1921                 if (tx_compl) {
1922                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1923
1924                         atomic_sub(num_wrbs, &txo->q.used);
1925
1926                         /* As Tx wrbs have been freed up, wake up netdev queue
1927                          * if it was stopped due to lack of tx wrbs.  */
1928                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1929                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1930                                 netif_wake_subqueue(adapter->netdev, i);
1931                         }
1932
1933                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1934                         tx_stats(txo)->tx_compl += tx_compl;
1935                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1936                 }
1937         }
1938
1939         mcc_compl = be_process_mcc(adapter, &status);
1940
1941         if (mcc_compl) {
1942                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1943                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1944         }
1945
1946         napi_complete(napi);
1947
1948         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1949         adapter->drv_stats.tx_events++;
1950         return 1;
1951 }
1952
1953 void be_detect_dump_ue(struct be_adapter *adapter)
1954 {
1955         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1956         u32 i;
1957
1958         pci_read_config_dword(adapter->pdev,
1959                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1960         pci_read_config_dword(adapter->pdev,
1961                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1962         pci_read_config_dword(adapter->pdev,
1963                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1964         pci_read_config_dword(adapter->pdev,
1965                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1966
1967         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1968         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1969
1970         if (ue_status_lo || ue_status_hi) {
1971                 adapter->ue_detected = true;
1972                 adapter->eeh_err = true;
1973                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1974         }
1975
1976         if (ue_status_lo) {
1977                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1978                         if (ue_status_lo & 1)
1979                                 dev_err(&adapter->pdev->dev,
1980                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1981                 }
1982         }
1983         if (ue_status_hi) {
1984                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1985                         if (ue_status_hi & 1)
1986                                 dev_err(&adapter->pdev->dev,
1987                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1988                 }
1989         }
1990
1991 }
1992
1993 static void be_worker(struct work_struct *work)
1994 {
1995         struct be_adapter *adapter =
1996                 container_of(work, struct be_adapter, work.work);
1997         struct be_rx_obj *rxo;
1998         int i;
1999
2000         if (!adapter->ue_detected && !lancer_chip(adapter))
2001                 be_detect_dump_ue(adapter);
2002
2003         /* when interrupts are not yet enabled, just reap any pending
2004         * mcc completions */
2005         if (!netif_running(adapter->netdev)) {
2006                 int mcc_compl, status = 0;
2007
2008                 mcc_compl = be_process_mcc(adapter, &status);
2009
2010                 if (mcc_compl) {
2011                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2012                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2013                 }
2014
2015                 goto reschedule;
2016         }
2017
2018         if (!adapter->stats_cmd_sent) {
2019                 if (lancer_chip(adapter))
2020                         lancer_cmd_get_pport_stats(adapter,
2021                                                 &adapter->stats_cmd);
2022                 else
2023                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2024         }
2025
2026         for_all_rx_queues(adapter, rxo, i) {
2027                 be_rx_eqd_update(adapter, rxo);
2028
2029                 if (rxo->rx_post_starved) {
2030                         rxo->rx_post_starved = false;
2031                         be_post_rx_frags(rxo, GFP_KERNEL);
2032                 }
2033         }
2034
2035 reschedule:
2036         adapter->work_counter++;
2037         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2038 }
2039
2040 static void be_msix_disable(struct be_adapter *adapter)
2041 {
2042         if (msix_enabled(adapter)) {
2043                 pci_disable_msix(adapter->pdev);
2044                 adapter->num_msix_vec = 0;
2045         }
2046 }
2047
2048 static void be_msix_enable(struct be_adapter *adapter)
2049 {
2050 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2051         int i, status, num_vec;
2052
2053         num_vec = be_num_rxqs_want(adapter) + 1;
2054
2055         for (i = 0; i < num_vec; i++)
2056                 adapter->msix_entries[i].entry = i;
2057
2058         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2059         if (status == 0) {
2060                 goto done;
2061         } else if (status >= BE_MIN_MSIX_VECTORS) {
2062                 num_vec = status;
2063                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2064                                 num_vec) == 0)
2065                         goto done;
2066         }
2067         return;
2068 done:
2069         adapter->num_msix_vec = num_vec;
2070         return;
2071 }
2072
2073 static void be_sriov_enable(struct be_adapter *adapter)
2074 {
2075         be_check_sriov_fn_type(adapter);
2076 #ifdef CONFIG_PCI_IOV
2077         if (be_physfn(adapter) && num_vfs) {
2078                 int status, pos;
2079                 u16 nvfs;
2080
2081                 pos = pci_find_ext_capability(adapter->pdev,
2082                                                 PCI_EXT_CAP_ID_SRIOV);
2083                 pci_read_config_word(adapter->pdev,
2084                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2085
2086                 if (num_vfs > nvfs) {
2087                         dev_info(&adapter->pdev->dev,
2088                                         "Device supports %d VFs and not %d\n",
2089                                         nvfs, num_vfs);
2090                         num_vfs = nvfs;
2091                 }
2092
2093                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2094                 adapter->sriov_enabled = status ? false : true;
2095         }
2096 #endif
2097 }
2098
2099 static void be_sriov_disable(struct be_adapter *adapter)
2100 {
2101 #ifdef CONFIG_PCI_IOV
2102         if (adapter->sriov_enabled) {
2103                 pci_disable_sriov(adapter->pdev);
2104                 adapter->sriov_enabled = false;
2105         }
2106 #endif
2107 }
2108
2109 static inline int be_msix_vec_get(struct be_adapter *adapter,
2110                                         struct be_eq_obj *eq_obj)
2111 {
2112         return adapter->msix_entries[eq_obj->eq_idx].vector;
2113 }
2114
2115 static int be_request_irq(struct be_adapter *adapter,
2116                 struct be_eq_obj *eq_obj,
2117                 void *handler, char *desc, void *context)
2118 {
2119         struct net_device *netdev = adapter->netdev;
2120         int vec;
2121
2122         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2123         vec = be_msix_vec_get(adapter, eq_obj);
2124         return request_irq(vec, handler, 0, eq_obj->desc, context);
2125 }
2126
2127 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2128                         void *context)
2129 {
2130         int vec = be_msix_vec_get(adapter, eq_obj);
2131         free_irq(vec, context);
2132 }
2133
2134 static int be_msix_register(struct be_adapter *adapter)
2135 {
2136         struct be_rx_obj *rxo;
2137         int status, i;
2138         char qname[10];
2139
2140         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2141                                 adapter);
2142         if (status)
2143                 goto err;
2144
2145         for_all_rx_queues(adapter, rxo, i) {
2146                 sprintf(qname, "rxq%d", i);
2147                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2148                                 qname, rxo);
2149                 if (status)
2150                         goto err_msix;
2151         }
2152
2153         return 0;
2154
2155 err_msix:
2156         be_free_irq(adapter, &adapter->tx_eq, adapter);
2157
2158         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2159                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2160
2161 err:
2162         dev_warn(&adapter->pdev->dev,
2163                 "MSIX Request IRQ failed - err %d\n", status);
2164         be_msix_disable(adapter);
2165         return status;
2166 }
2167
2168 static int be_irq_register(struct be_adapter *adapter)
2169 {
2170         struct net_device *netdev = adapter->netdev;
2171         int status;
2172
2173         if (msix_enabled(adapter)) {
2174                 status = be_msix_register(adapter);
2175                 if (status == 0)
2176                         goto done;
2177                 /* INTx is not supported for VF */
2178                 if (!be_physfn(adapter))
2179                         return status;
2180         }
2181
2182         /* INTx */
2183         netdev->irq = adapter->pdev->irq;
2184         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2185                         adapter);
2186         if (status) {
2187                 dev_err(&adapter->pdev->dev,
2188                         "INTx request IRQ failed - err %d\n", status);
2189                 return status;
2190         }
2191 done:
2192         adapter->isr_registered = true;
2193         return 0;
2194 }
2195
2196 static void be_irq_unregister(struct be_adapter *adapter)
2197 {
2198         struct net_device *netdev = adapter->netdev;
2199         struct be_rx_obj *rxo;
2200         int i;
2201
2202         if (!adapter->isr_registered)
2203                 return;
2204
2205         /* INTx */
2206         if (!msix_enabled(adapter)) {
2207                 free_irq(netdev->irq, adapter);
2208                 goto done;
2209         }
2210
2211         /* MSIx */
2212         be_free_irq(adapter, &adapter->tx_eq, adapter);
2213
2214         for_all_rx_queues(adapter, rxo, i)
2215                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2216
2217 done:
2218         adapter->isr_registered = false;
2219 }
2220
2221 static void be_rx_queues_clear(struct be_adapter *adapter)
2222 {
2223         struct be_queue_info *q;
2224         struct be_rx_obj *rxo;
2225         int i;
2226
2227         for_all_rx_queues(adapter, rxo, i) {
2228                 q = &rxo->q;
2229                 if (q->created) {
2230                         be_cmd_rxq_destroy(adapter, q);
2231                         /* After the rxq is invalidated, wait for a grace time
2232                          * of 1ms for all dma to end and the flush compl to
2233                          * arrive
2234                          */
2235                         mdelay(1);
2236                         be_rx_q_clean(adapter, rxo);
2237                 }
2238
2239                 /* Clear any residual events */
2240                 q = &rxo->rx_eq.q;
2241                 if (q->created)
2242                         be_eq_clean(adapter, &rxo->rx_eq);
2243         }
2244 }
2245
2246 static int be_close(struct net_device *netdev)
2247 {
2248         struct be_adapter *adapter = netdev_priv(netdev);
2249         struct be_rx_obj *rxo;
2250         struct be_tx_obj *txo;
2251         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2252         int vec, i;
2253
2254         be_async_mcc_disable(adapter);
2255
2256         if (!lancer_chip(adapter))
2257                 be_intr_set(adapter, false);
2258
2259         for_all_rx_queues(adapter, rxo, i)
2260                 napi_disable(&rxo->rx_eq.napi);
2261
2262         napi_disable(&tx_eq->napi);
2263
2264         if (lancer_chip(adapter)) {
2265                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2266                 for_all_rx_queues(adapter, rxo, i)
2267                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2268                 for_all_tx_queues(adapter, txo, i)
2269                          be_cq_notify(adapter, txo->cq.id, false, 0);
2270         }
2271
2272         if (msix_enabled(adapter)) {
2273                 vec = be_msix_vec_get(adapter, tx_eq);
2274                 synchronize_irq(vec);
2275
2276                 for_all_rx_queues(adapter, rxo, i) {
2277                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2278                         synchronize_irq(vec);
2279                 }
2280         } else {
2281                 synchronize_irq(netdev->irq);
2282         }
2283         be_irq_unregister(adapter);
2284
2285         /* Wait for all pending tx completions to arrive so that
2286          * all tx skbs are freed.
2287          */
2288         for_all_tx_queues(adapter, txo, i)
2289                 be_tx_compl_clean(adapter, txo);
2290
2291         be_rx_queues_clear(adapter);
2292         return 0;
2293 }
2294
2295 static int be_rx_queues_setup(struct be_adapter *adapter)
2296 {
2297         struct be_rx_obj *rxo;
2298         int rc, i;
2299         u8 rsstable[MAX_RSS_QS];
2300
2301         for_all_rx_queues(adapter, rxo, i) {
2302                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2303                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2304                         adapter->if_handle,
2305                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2306                 if (rc)
2307                         return rc;
2308         }
2309
2310         if (be_multi_rxq(adapter)) {
2311                 for_all_rss_queues(adapter, rxo, i)
2312                         rsstable[i] = rxo->rss_id;
2313
2314                 rc = be_cmd_rss_config(adapter, rsstable,
2315                         adapter->num_rx_qs - 1);
2316                 if (rc)
2317                         return rc;
2318         }
2319
2320         /* First time posting */
2321         for_all_rx_queues(adapter, rxo, i) {
2322                 be_post_rx_frags(rxo, GFP_KERNEL);
2323                 napi_enable(&rxo->rx_eq.napi);
2324         }
2325         return 0;
2326 }
2327
2328 static int be_open(struct net_device *netdev)
2329 {
2330         struct be_adapter *adapter = netdev_priv(netdev);
2331         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2332         struct be_rx_obj *rxo;
2333         int status, i;
2334
2335         status = be_rx_queues_setup(adapter);
2336         if (status)
2337                 goto err;
2338
2339         napi_enable(&tx_eq->napi);
2340
2341         be_irq_register(adapter);
2342
2343         if (!lancer_chip(adapter))
2344                 be_intr_set(adapter, true);
2345
2346         /* The evt queues are created in unarmed state; arm them */
2347         for_all_rx_queues(adapter, rxo, i) {
2348                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2349                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2350         }
2351         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2352
2353         /* Now that interrupts are on we can process async mcc */
2354         be_async_mcc_enable(adapter);
2355
2356         if (be_physfn(adapter)) {
2357                 status = be_vid_config(adapter, false, 0);
2358                 if (status)
2359                         goto err;
2360
2361                 status = be_cmd_set_flow_control(adapter,
2362                                 adapter->tx_fc, adapter->rx_fc);
2363                 if (status)
2364                         goto err;
2365         }
2366
2367         return 0;
2368 err:
2369         be_close(adapter->netdev);
2370         return -EIO;
2371 }
2372
2373 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2374 {
2375         struct be_dma_mem cmd;
2376         int status = 0;
2377         u8 mac[ETH_ALEN];
2378
2379         memset(mac, 0, ETH_ALEN);
2380
2381         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2382         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2383                                     GFP_KERNEL);
2384         if (cmd.va == NULL)
2385                 return -1;
2386         memset(cmd.va, 0, cmd.size);
2387
2388         if (enable) {
2389                 status = pci_write_config_dword(adapter->pdev,
2390                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2391                 if (status) {
2392                         dev_err(&adapter->pdev->dev,
2393                                 "Could not enable Wake-on-lan\n");
2394                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2395                                           cmd.dma);
2396                         return status;
2397                 }
2398                 status = be_cmd_enable_magic_wol(adapter,
2399                                 adapter->netdev->dev_addr, &cmd);
2400                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2401                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2402         } else {
2403                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2404                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2405                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2406         }
2407
2408         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2409         return status;
2410 }
2411
2412 /*
2413  * Generate a seed MAC address from the PF MAC Address using jhash.
2414  * MAC Address for VFs are assigned incrementally starting from the seed.
2415  * These addresses are programmed in the ASIC by the PF and the VF driver
2416  * queries for the MAC address during its probe.
2417  */
2418 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2419 {
2420         u32 vf = 0;
2421         int status = 0;
2422         u8 mac[ETH_ALEN];
2423
2424         be_vf_eth_addr_generate(adapter, mac);
2425
2426         for (vf = 0; vf < num_vfs; vf++) {
2427                 status = be_cmd_pmac_add(adapter, mac,
2428                                         adapter->vf_cfg[vf].vf_if_handle,
2429                                         &adapter->vf_cfg[vf].vf_pmac_id,
2430                                         vf + 1);
2431                 if (status)
2432                         dev_err(&adapter->pdev->dev,
2433                                 "Mac address add failed for VF %d\n", vf);
2434                 else
2435                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2436
2437                 mac[5] += 1;
2438         }
2439         return status;
2440 }
2441
2442 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2443 {
2444         u32 vf;
2445
2446         for (vf = 0; vf < num_vfs; vf++) {
2447                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2448                         be_cmd_pmac_del(adapter,
2449                                         adapter->vf_cfg[vf].vf_if_handle,
2450                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2451         }
2452 }
2453
2454 static int be_setup(struct be_adapter *adapter)
2455 {
2456         struct net_device *netdev = adapter->netdev;
2457         u32 cap_flags, en_flags, vf = 0;
2458         int status;
2459         u8 mac[ETH_ALEN];
2460
2461         be_cmd_req_native_mode(adapter);
2462
2463         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2464                                 BE_IF_FLAGS_BROADCAST |
2465                                 BE_IF_FLAGS_MULTICAST;
2466
2467         if (be_physfn(adapter)) {
2468                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2469                                 BE_IF_FLAGS_PROMISCUOUS |
2470                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2471                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2472
2473                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2474                         cap_flags |= BE_IF_FLAGS_RSS;
2475                         en_flags |= BE_IF_FLAGS_RSS;
2476                 }
2477         }
2478
2479         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2480                         netdev->dev_addr, false/* pmac_invalid */,
2481                         &adapter->if_handle, &adapter->pmac_id, 0);
2482         if (status != 0)
2483                 goto do_none;
2484
2485         if (be_physfn(adapter)) {
2486                 if (adapter->sriov_enabled) {
2487                         while (vf < num_vfs) {
2488                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2489                                                         BE_IF_FLAGS_BROADCAST;
2490                                 status = be_cmd_if_create(adapter, cap_flags,
2491                                         en_flags, mac, true,
2492                                         &adapter->vf_cfg[vf].vf_if_handle,
2493                                         NULL, vf+1);
2494                                 if (status) {
2495                                         dev_err(&adapter->pdev->dev,
2496                                         "Interface Create failed for VF %d\n",
2497                                         vf);
2498                                         goto if_destroy;
2499                                 }
2500                                 adapter->vf_cfg[vf].vf_pmac_id =
2501                                                         BE_INVALID_PMAC_ID;
2502                                 vf++;
2503                         }
2504                 }
2505         } else {
2506                 status = be_cmd_mac_addr_query(adapter, mac,
2507                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2508                 if (!status) {
2509                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2510                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2511                 }
2512         }
2513
2514         status = be_tx_queues_create(adapter);
2515         if (status != 0)
2516                 goto if_destroy;
2517
2518         status = be_rx_queues_create(adapter);
2519         if (status != 0)
2520                 goto tx_qs_destroy;
2521
2522         /* Allow all priorities by default. A GRP5 evt may modify this */
2523         adapter->vlan_prio_bmap = 0xff;
2524
2525         status = be_mcc_queues_create(adapter);
2526         if (status != 0)
2527                 goto rx_qs_destroy;
2528
2529         adapter->link_speed = -1;
2530
2531         return 0;
2532
2533 rx_qs_destroy:
2534         be_rx_queues_destroy(adapter);
2535 tx_qs_destroy:
2536         be_tx_queues_destroy(adapter);
2537 if_destroy:
2538         if (be_physfn(adapter) && adapter->sriov_enabled)
2539                 for (vf = 0; vf < num_vfs; vf++)
2540                         if (adapter->vf_cfg[vf].vf_if_handle)
2541                                 be_cmd_if_destroy(adapter,
2542                                         adapter->vf_cfg[vf].vf_if_handle,
2543                                         vf + 1);
2544         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2545 do_none:
2546         return status;
2547 }
2548
2549 static int be_clear(struct be_adapter *adapter)
2550 {
2551         int vf;
2552
2553         if (be_physfn(adapter) && adapter->sriov_enabled)
2554                 be_vf_eth_addr_rem(adapter);
2555
2556         be_mcc_queues_destroy(adapter);
2557         be_rx_queues_destroy(adapter);
2558         be_tx_queues_destroy(adapter);
2559         adapter->eq_next_idx = 0;
2560
2561         if (be_physfn(adapter) && adapter->sriov_enabled)
2562                 for (vf = 0; vf < num_vfs; vf++)
2563                         if (adapter->vf_cfg[vf].vf_if_handle)
2564                                 be_cmd_if_destroy(adapter,
2565                                         adapter->vf_cfg[vf].vf_if_handle,
2566                                         vf + 1);
2567
2568         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2569
2570         adapter->be3_native = 0;
2571
2572         /* tell fw we're done with firing cmds */
2573         be_cmd_fw_clean(adapter);
2574         return 0;
2575 }
2576
2577
2578 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2579 static bool be_flash_redboot(struct be_adapter *adapter,
2580                         const u8 *p, u32 img_start, int image_size,
2581                         int hdr_size)
2582 {
2583         u32 crc_offset;
2584         u8 flashed_crc[4];
2585         int status;
2586
2587         crc_offset = hdr_size + img_start + image_size - 4;
2588
2589         p += crc_offset;
2590
2591         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2592                         (image_size - 4));
2593         if (status) {
2594                 dev_err(&adapter->pdev->dev,
2595                 "could not get crc from flash, not flashing redboot\n");
2596                 return false;
2597         }
2598
2599         /*update redboot only if crc does not match*/
2600         if (!memcmp(flashed_crc, p, 4))
2601                 return false;
2602         else
2603                 return true;
2604 }
2605
2606 static bool phy_flashing_required(struct be_adapter *adapter)
2607 {
2608         int status = 0;
2609         struct be_phy_info phy_info;
2610
2611         status = be_cmd_get_phy_info(adapter, &phy_info);
2612         if (status)
2613                 return false;
2614         if ((phy_info.phy_type == TN_8022) &&
2615                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2616                 return true;
2617         }
2618         return false;
2619 }
2620
2621 static int be_flash_data(struct be_adapter *adapter,
2622                         const struct firmware *fw,
2623                         struct be_dma_mem *flash_cmd, int num_of_images)
2624
2625 {
2626         int status = 0, i, filehdr_size = 0;
2627         u32 total_bytes = 0, flash_op;
2628         int num_bytes;
2629         const u8 *p = fw->data;
2630         struct be_cmd_write_flashrom *req = flash_cmd->va;
2631         const struct flash_comp *pflashcomp;
2632         int num_comp;
2633
2634         static const struct flash_comp gen3_flash_types[10] = {
2635                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2636                         FLASH_IMAGE_MAX_SIZE_g3},
2637                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2638                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2639                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2640                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2641                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2642                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2644                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2645                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2646                         FLASH_IMAGE_MAX_SIZE_g3},
2647                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2648                         FLASH_IMAGE_MAX_SIZE_g3},
2649                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2650                         FLASH_IMAGE_MAX_SIZE_g3},
2651                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2652                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2653                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2654                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2655         };
2656         static const struct flash_comp gen2_flash_types[8] = {
2657                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2658                         FLASH_IMAGE_MAX_SIZE_g2},
2659                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2660                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2661                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2662                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2663                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2664                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2665                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2666                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2667                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2668                         FLASH_IMAGE_MAX_SIZE_g2},
2669                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2670                         FLASH_IMAGE_MAX_SIZE_g2},
2671                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2672                          FLASH_IMAGE_MAX_SIZE_g2}
2673         };
2674
2675         if (adapter->generation == BE_GEN3) {
2676                 pflashcomp = gen3_flash_types;
2677                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2678                 num_comp = ARRAY_SIZE(gen3_flash_types);
2679         } else {
2680                 pflashcomp = gen2_flash_types;
2681                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2682                 num_comp = ARRAY_SIZE(gen2_flash_types);
2683         }
2684         for (i = 0; i < num_comp; i++) {
2685                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2686                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2687                         continue;
2688                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2689                         if (!phy_flashing_required(adapter))
2690                                 continue;
2691                 }
2692                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2693                         (!be_flash_redboot(adapter, fw->data,
2694                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2695                         (num_of_images * sizeof(struct image_hdr)))))
2696                         continue;
2697                 p = fw->data;
2698                 p += filehdr_size + pflashcomp[i].offset
2699                         + (num_of_images * sizeof(struct image_hdr));
2700                 if (p + pflashcomp[i].size > fw->data + fw->size)
2701                         return -1;
2702                 total_bytes = pflashcomp[i].size;
2703                 while (total_bytes) {
2704                         if (total_bytes > 32*1024)
2705                                 num_bytes = 32*1024;
2706                         else
2707                                 num_bytes = total_bytes;
2708                         total_bytes -= num_bytes;
2709                         if (!total_bytes) {
2710                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2711                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2712                                 else
2713                                         flash_op = FLASHROM_OPER_FLASH;
2714                         } else {
2715                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2716                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2717                                 else
2718                                         flash_op = FLASHROM_OPER_SAVE;
2719                         }
2720                         memcpy(req->params.data_buf, p, num_bytes);
2721                         p += num_bytes;
2722                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2723                                 pflashcomp[i].optype, flash_op, num_bytes);
2724                         if (status) {
2725                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2726                                         (pflashcomp[i].optype ==
2727                                                 IMG_TYPE_PHY_FW))
2728                                         break;
2729                                 dev_err(&adapter->pdev->dev,
2730                                         "cmd to write to flash rom failed.\n");
2731                                 return -1;
2732                         }
2733                 }
2734         }
2735         return 0;
2736 }
2737
2738 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2739 {
2740         if (fhdr == NULL)
2741                 return 0;
2742         if (fhdr->build[0] == '3')
2743                 return BE_GEN3;
2744         else if (fhdr->build[0] == '2')
2745                 return BE_GEN2;
2746         else
2747                 return 0;
2748 }
2749
2750 static int lancer_fw_download(struct be_adapter *adapter,
2751                                 const struct firmware *fw)
2752 {
2753 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2754 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2755         struct be_dma_mem flash_cmd;
2756         const u8 *data_ptr = NULL;
2757         u8 *dest_image_ptr = NULL;
2758         size_t image_size = 0;
2759         u32 chunk_size = 0;
2760         u32 data_written = 0;
2761         u32 offset = 0;
2762         int status = 0;
2763         u8 add_status = 0;
2764
2765         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2766                 dev_err(&adapter->pdev->dev,
2767                         "FW Image not properly aligned. "
2768                         "Length must be 4 byte aligned.\n");
2769                 status = -EINVAL;
2770                 goto lancer_fw_exit;
2771         }
2772
2773         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2774                                 + LANCER_FW_DOWNLOAD_CHUNK;
2775         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2776                                                 &flash_cmd.dma, GFP_KERNEL);
2777         if (!flash_cmd.va) {
2778                 status = -ENOMEM;
2779                 dev_err(&adapter->pdev->dev,
2780                         "Memory allocation failure while flashing\n");
2781                 goto lancer_fw_exit;
2782         }
2783
2784         dest_image_ptr = flash_cmd.va +
2785                                 sizeof(struct lancer_cmd_req_write_object);
2786         image_size = fw->size;
2787         data_ptr = fw->data;
2788
2789         while (image_size) {
2790                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2791
2792                 /* Copy the image chunk content. */
2793                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2794
2795                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2796                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2797                                 &data_written, &add_status);
2798
2799                 if (status)
2800                         break;
2801
2802                 offset += data_written;
2803                 data_ptr += data_written;
2804                 image_size -= data_written;
2805         }
2806
2807         if (!status) {
2808                 /* Commit the FW written */
2809                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2810                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2811                                         &data_written, &add_status);
2812         }
2813
2814         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2815                                 flash_cmd.dma);
2816         if (status) {
2817                 dev_err(&adapter->pdev->dev,
2818                         "Firmware load error. "
2819                         "Status code: 0x%x Additional Status: 0x%x\n",
2820                         status, add_status);
2821                 goto lancer_fw_exit;
2822         }
2823
2824         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2825 lancer_fw_exit:
2826         return status;
2827 }
2828
2829 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2830 {
2831         struct flash_file_hdr_g2 *fhdr;
2832         struct flash_file_hdr_g3 *fhdr3;
2833         struct image_hdr *img_hdr_ptr = NULL;
2834         struct be_dma_mem flash_cmd;
2835         const u8 *p;
2836         int status = 0, i = 0, num_imgs = 0;
2837
2838         p = fw->data;
2839         fhdr = (struct flash_file_hdr_g2 *) p;
2840
2841         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2842         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2843                                           &flash_cmd.dma, GFP_KERNEL);
2844         if (!flash_cmd.va) {
2845                 status = -ENOMEM;
2846                 dev_err(&adapter->pdev->dev,
2847                         "Memory allocation failure while flashing\n");
2848                 goto be_fw_exit;
2849         }
2850
2851         if ((adapter->generation == BE_GEN3) &&
2852                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2853                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2854                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2855                 for (i = 0; i < num_imgs; i++) {
2856                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2857                                         (sizeof(struct flash_file_hdr_g3) +
2858                                          i * sizeof(struct image_hdr)));
2859                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2860                                 status = be_flash_data(adapter, fw, &flash_cmd,
2861                                                         num_imgs);
2862                 }
2863         } else if ((adapter->generation == BE_GEN2) &&
2864                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2865                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2866         } else {
2867                 dev_err(&adapter->pdev->dev,
2868                         "UFI and Interface are not compatible for flashing\n");
2869                 status = -1;
2870         }
2871
2872         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2873                           flash_cmd.dma);
2874         if (status) {
2875                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2876                 goto be_fw_exit;
2877         }
2878
2879         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2880
2881 be_fw_exit:
2882         return status;
2883 }
2884
2885 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2886 {
2887         const struct firmware *fw;
2888         int status;
2889
2890         if (!netif_running(adapter->netdev)) {
2891                 dev_err(&adapter->pdev->dev,
2892                         "Firmware load not allowed (interface is down)\n");
2893                 return -1;
2894         }
2895
2896         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2897         if (status)
2898                 goto fw_exit;
2899
2900         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2901
2902         if (lancer_chip(adapter))
2903                 status = lancer_fw_download(adapter, fw);
2904         else
2905                 status = be_fw_download(adapter, fw);
2906
2907 fw_exit:
2908         release_firmware(fw);
2909         return status;
2910 }
2911
2912 static struct net_device_ops be_netdev_ops = {
2913         .ndo_open               = be_open,
2914         .ndo_stop               = be_close,
2915         .ndo_start_xmit         = be_xmit,
2916         .ndo_set_rx_mode        = be_set_multicast_list,
2917         .ndo_set_mac_address    = be_mac_addr_set,
2918         .ndo_change_mtu         = be_change_mtu,
2919         .ndo_get_stats64        = be_get_stats64,
2920         .ndo_validate_addr      = eth_validate_addr,
2921         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2922         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2923         .ndo_set_vf_mac         = be_set_vf_mac,
2924         .ndo_set_vf_vlan        = be_set_vf_vlan,
2925         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2926         .ndo_get_vf_config      = be_get_vf_config
2927 };
2928
2929 static void be_netdev_init(struct net_device *netdev)
2930 {
2931         struct be_adapter *adapter = netdev_priv(netdev);
2932         struct be_rx_obj *rxo;
2933         int i;
2934
2935         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2936                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2937                 NETIF_F_HW_VLAN_TX;
2938         if (be_multi_rxq(adapter))
2939                 netdev->hw_features |= NETIF_F_RXHASH;
2940
2941         netdev->features |= netdev->hw_features |
2942                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2943
2944         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2945                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2946
2947         netdev->flags |= IFF_MULTICAST;
2948
2949         /* Default settings for Rx and Tx flow control */
2950         adapter->rx_fc = true;
2951         adapter->tx_fc = true;
2952
2953         netif_set_gso_max_size(netdev, 65535);
2954
2955         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2956
2957         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2958
2959         for_all_rx_queues(adapter, rxo, i)
2960                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2961                                 BE_NAPI_WEIGHT);
2962
2963         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2964                 BE_NAPI_WEIGHT);
2965 }
2966
2967 static void be_unmap_pci_bars(struct be_adapter *adapter)
2968 {
2969         if (adapter->csr)
2970                 iounmap(adapter->csr);
2971         if (adapter->db)
2972                 iounmap(adapter->db);
2973 }
2974
2975 static int be_map_pci_bars(struct be_adapter *adapter)
2976 {
2977         u8 __iomem *addr;
2978         int db_reg;
2979
2980         if (lancer_chip(adapter)) {
2981                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2982                         pci_resource_len(adapter->pdev, 0));
2983                 if (addr == NULL)
2984                         return -ENOMEM;
2985                 adapter->db = addr;
2986                 return 0;
2987         }
2988
2989         if (be_physfn(adapter)) {
2990                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2991                                 pci_resource_len(adapter->pdev, 2));
2992                 if (addr == NULL)
2993                         return -ENOMEM;
2994                 adapter->csr = addr;
2995         }
2996
2997         if (adapter->generation == BE_GEN2) {
2998                 db_reg = 4;
2999         } else {
3000                 if (be_physfn(adapter))
3001                         db_reg = 4;
3002                 else
3003                         db_reg = 0;
3004         }
3005         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3006                                 pci_resource_len(adapter->pdev, db_reg));
3007         if (addr == NULL)
3008                 goto pci_map_err;
3009         adapter->db = addr;
3010
3011         return 0;
3012 pci_map_err:
3013         be_unmap_pci_bars(adapter);
3014         return -ENOMEM;
3015 }
3016
3017
3018 static void be_ctrl_cleanup(struct be_adapter *adapter)
3019 {
3020         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3021
3022         be_unmap_pci_bars(adapter);
3023
3024         if (mem->va)
3025                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3026                                   mem->dma);
3027
3028         mem = &adapter->rx_filter;
3029         if (mem->va)
3030                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3031                                   mem->dma);
3032 }
3033
3034 static int be_ctrl_init(struct be_adapter *adapter)
3035 {
3036         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3037         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3038         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3039         int status;
3040
3041         status = be_map_pci_bars(adapter);
3042         if (status)
3043                 goto done;
3044
3045         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3046         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3047                                                 mbox_mem_alloc->size,
3048                                                 &mbox_mem_alloc->dma,
3049                                                 GFP_KERNEL);
3050         if (!mbox_mem_alloc->va) {
3051                 status = -ENOMEM;
3052                 goto unmap_pci_bars;
3053         }
3054         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3055         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3056         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3057         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3058
3059         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3060         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3061                                         &rx_filter->dma, GFP_KERNEL);
3062         if (rx_filter->va == NULL) {
3063                 status = -ENOMEM;
3064                 goto free_mbox;
3065         }
3066         memset(rx_filter->va, 0, rx_filter->size);
3067
3068         mutex_init(&adapter->mbox_lock);
3069         spin_lock_init(&adapter->mcc_lock);
3070         spin_lock_init(&adapter->mcc_cq_lock);
3071
3072         init_completion(&adapter->flash_compl);
3073         pci_save_state(adapter->pdev);
3074         return 0;
3075
3076 free_mbox:
3077         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3078                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3079
3080 unmap_pci_bars:
3081         be_unmap_pci_bars(adapter);
3082
3083 done:
3084         return status;
3085 }
3086
3087 static void be_stats_cleanup(struct be_adapter *adapter)
3088 {
3089         struct be_dma_mem *cmd = &adapter->stats_cmd;
3090
3091         if (cmd->va)
3092                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3093                                   cmd->va, cmd->dma);
3094 }
3095
3096 static int be_stats_init(struct be_adapter *adapter)
3097 {
3098         struct be_dma_mem *cmd = &adapter->stats_cmd;
3099
3100         if (adapter->generation == BE_GEN2) {
3101                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3102         } else {
3103                 if (lancer_chip(adapter))
3104                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3105                 else
3106                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3107         }
3108         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3109                                      GFP_KERNEL);
3110         if (cmd->va == NULL)
3111                 return -1;
3112         memset(cmd->va, 0, cmd->size);
3113         return 0;
3114 }
3115
3116 static void __devexit be_remove(struct pci_dev *pdev)
3117 {
3118         struct be_adapter *adapter = pci_get_drvdata(pdev);
3119
3120         if (!adapter)
3121                 return;
3122
3123         cancel_delayed_work_sync(&adapter->work);
3124
3125         unregister_netdev(adapter->netdev);
3126
3127         be_clear(adapter);
3128
3129         be_stats_cleanup(adapter);
3130
3131         be_ctrl_cleanup(adapter);
3132
3133         kfree(adapter->vf_cfg);
3134         be_sriov_disable(adapter);
3135
3136         be_msix_disable(adapter);
3137
3138         pci_set_drvdata(pdev, NULL);
3139         pci_release_regions(pdev);
3140         pci_disable_device(pdev);
3141
3142         free_netdev(adapter->netdev);
3143 }
3144
3145 static int be_get_config(struct be_adapter *adapter)
3146 {
3147         int status;
3148         u8 mac[ETH_ALEN];
3149
3150         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3151         if (status)
3152                 return status;
3153
3154         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3155                         &adapter->function_mode, &adapter->function_caps);
3156         if (status)
3157                 return status;
3158
3159         memset(mac, 0, ETH_ALEN);
3160
3161         /* A default permanent address is given to each VF for Lancer*/
3162         if (be_physfn(adapter) || lancer_chip(adapter)) {
3163                 status = be_cmd_mac_addr_query(adapter, mac,
3164                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3165
3166                 if (status)
3167                         return status;
3168
3169                 if (!is_valid_ether_addr(mac))
3170                         return -EADDRNOTAVAIL;
3171
3172                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3173                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3174         }
3175
3176         if (adapter->function_mode & 0x400)
3177                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3178         else
3179                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3180
3181         status = be_cmd_get_cntl_attributes(adapter);
3182         if (status)
3183                 return status;
3184
3185         if ((num_vfs && adapter->sriov_enabled) ||
3186                 (adapter->function_mode & 0x400) ||
3187                 lancer_chip(adapter) || !be_physfn(adapter)) {
3188                 adapter->num_tx_qs = 1;
3189                 netif_set_real_num_tx_queues(adapter->netdev,
3190                         adapter->num_tx_qs);
3191         } else {
3192                 adapter->num_tx_qs = MAX_TX_QS;
3193         }
3194
3195         return 0;
3196 }
3197
3198 static int be_dev_family_check(struct be_adapter *adapter)
3199 {
3200         struct pci_dev *pdev = adapter->pdev;
3201         u32 sli_intf = 0, if_type;
3202
3203         switch (pdev->device) {
3204         case BE_DEVICE_ID1:
3205         case OC_DEVICE_ID1:
3206                 adapter->generation = BE_GEN2;
3207                 break;
3208         case BE_DEVICE_ID2:
3209         case OC_DEVICE_ID2:
3210                 adapter->generation = BE_GEN3;
3211                 break;
3212         case OC_DEVICE_ID3:
3213         case OC_DEVICE_ID4:
3214                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3215                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3216                                                 SLI_INTF_IF_TYPE_SHIFT;
3217
3218                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3219                         if_type != 0x02) {
3220                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3221                         return -EINVAL;
3222                 }
3223                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3224                                          SLI_INTF_FAMILY_SHIFT);
3225                 adapter->generation = BE_GEN3;
3226                 break;
3227         default:
3228                 adapter->generation = 0;
3229         }
3230         return 0;
3231 }
3232
3233 static int lancer_wait_ready(struct be_adapter *adapter)
3234 {
3235 #define SLIPORT_READY_TIMEOUT 500
3236         u32 sliport_status;
3237         int status = 0, i;
3238
3239         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3240                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3241                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3242                         break;
3243
3244                 msleep(20);
3245         }
3246
3247         if (i == SLIPORT_READY_TIMEOUT)
3248                 status = -1;
3249
3250         return status;
3251 }
3252
3253 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3254 {
3255         int status;
3256         u32 sliport_status, err, reset_needed;
3257         status = lancer_wait_ready(adapter);
3258         if (!status) {
3259                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3260                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3261                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3262                 if (err && reset_needed) {
3263                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3264                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3265
3266                         /* check adapter has corrected the error */
3267                         status = lancer_wait_ready(adapter);
3268                         sliport_status = ioread32(adapter->db +
3269                                                         SLIPORT_STATUS_OFFSET);
3270                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3271                                                 SLIPORT_STATUS_RN_MASK);
3272                         if (status || sliport_status)
3273                                 status = -1;
3274                 } else if (err || reset_needed) {
3275                         status = -1;
3276                 }
3277         }
3278         return status;
3279 }
3280
3281 static int __devinit be_probe(struct pci_dev *pdev,
3282                         const struct pci_device_id *pdev_id)
3283 {
3284         int status = 0;
3285         struct be_adapter *adapter;
3286         struct net_device *netdev;
3287
3288         status = pci_enable_device(pdev);
3289         if (status)
3290                 goto do_none;
3291
3292         status = pci_request_regions(pdev, DRV_NAME);
3293         if (status)
3294                 goto disable_dev;
3295         pci_set_master(pdev);
3296
3297         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3298         if (netdev == NULL) {
3299                 status = -ENOMEM;
3300                 goto rel_reg;
3301         }
3302         adapter = netdev_priv(netdev);
3303         adapter->pdev = pdev;
3304         pci_set_drvdata(pdev, adapter);
3305
3306         status = be_dev_family_check(adapter);
3307         if (status)
3308                 goto free_netdev;
3309
3310         adapter->netdev = netdev;
3311         SET_NETDEV_DEV(netdev, &pdev->dev);
3312
3313         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3314         if (!status) {
3315                 netdev->features |= NETIF_F_HIGHDMA;
3316         } else {
3317                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3318                 if (status) {
3319                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3320                         goto free_netdev;
3321                 }
3322         }
3323
3324         be_sriov_enable(adapter);
3325         if (adapter->sriov_enabled) {
3326                 adapter->vf_cfg = kcalloc(num_vfs,
3327                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3328
3329                 if (!adapter->vf_cfg)
3330                         goto free_netdev;
3331         }
3332
3333         status = be_ctrl_init(adapter);
3334         if (status)
3335                 goto free_vf_cfg;
3336
3337         if (lancer_chip(adapter)) {
3338                 status = lancer_test_and_set_rdy_state(adapter);
3339                 if (status) {
3340                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3341                         goto ctrl_clean;
3342                 }
3343         }
3344
3345         /* sync up with fw's ready state */
3346         if (be_physfn(adapter)) {
3347                 status = be_cmd_POST(adapter);
3348                 if (status)
3349                         goto ctrl_clean;
3350         }
3351
3352         /* tell fw we're ready to fire cmds */
3353         status = be_cmd_fw_init(adapter);
3354         if (status)
3355                 goto ctrl_clean;
3356
3357         status = be_cmd_reset_function(adapter);
3358         if (status)
3359                 goto ctrl_clean;
3360
3361         status = be_stats_init(adapter);
3362         if (status)
3363                 goto ctrl_clean;
3364
3365         status = be_get_config(adapter);
3366         if (status)
3367                 goto stats_clean;
3368
3369         /* The INTR bit may be set in the card when probed by a kdump kernel
3370          * after a crash.
3371          */
3372         if (!lancer_chip(adapter))
3373                 be_intr_set(adapter, false);
3374
3375         be_msix_enable(adapter);
3376
3377         INIT_DELAYED_WORK(&adapter->work, be_worker);
3378
3379         status = be_setup(adapter);
3380         if (status)
3381                 goto msix_disable;
3382
3383         be_netdev_init(netdev);
3384         status = register_netdev(netdev);
3385         if (status != 0)
3386                 goto unsetup;
3387
3388         if (be_physfn(adapter) && adapter->sriov_enabled) {
3389                 u8 mac_speed;
3390                 u16 vf, lnk_speed;
3391
3392                 if (!lancer_chip(adapter)) {
3393                         status = be_vf_eth_addr_config(adapter);
3394                         if (status)
3395                                 goto unreg_netdev;
3396                 }
3397
3398                 for (vf = 0; vf < num_vfs; vf++) {
3399                         status = be_cmd_link_status_query(adapter, &mac_speed,
3400                                                 &lnk_speed, vf + 1);
3401                         if (!status)
3402                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3403                         else
3404                                 goto unreg_netdev;
3405                 }
3406         }
3407
3408         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3409
3410         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3411         return 0;
3412
3413 unreg_netdev:
3414         unregister_netdev(netdev);
3415 unsetup:
3416         be_clear(adapter);
3417 msix_disable:
3418         be_msix_disable(adapter);
3419 stats_clean:
3420         be_stats_cleanup(adapter);
3421 ctrl_clean:
3422         be_ctrl_cleanup(adapter);
3423 free_vf_cfg:
3424         kfree(adapter->vf_cfg);
3425 free_netdev:
3426         be_sriov_disable(adapter);
3427         free_netdev(netdev);
3428         pci_set_drvdata(pdev, NULL);
3429 rel_reg:
3430         pci_release_regions(pdev);
3431 disable_dev:
3432         pci_disable_device(pdev);
3433 do_none:
3434         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3435         return status;
3436 }
3437
3438 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3439 {
3440         struct be_adapter *adapter = pci_get_drvdata(pdev);
3441         struct net_device *netdev =  adapter->netdev;
3442
3443         cancel_delayed_work_sync(&adapter->work);
3444         if (adapter->wol)
3445                 be_setup_wol(adapter, true);
3446
3447         netif_device_detach(netdev);
3448         if (netif_running(netdev)) {
3449                 rtnl_lock();
3450                 be_close(netdev);
3451                 rtnl_unlock();
3452         }
3453         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3454         be_clear(adapter);
3455
3456         be_msix_disable(adapter);
3457         pci_save_state(pdev);
3458         pci_disable_device(pdev);
3459         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3460         return 0;
3461 }
3462
3463 static int be_resume(struct pci_dev *pdev)
3464 {
3465         int status = 0;
3466         struct be_adapter *adapter = pci_get_drvdata(pdev);
3467         struct net_device *netdev =  adapter->netdev;
3468
3469         netif_device_detach(netdev);
3470
3471         status = pci_enable_device(pdev);
3472         if (status)
3473                 return status;
3474
3475         pci_set_power_state(pdev, 0);
3476         pci_restore_state(pdev);
3477
3478         be_msix_enable(adapter);
3479         /* tell fw we're ready to fire cmds */
3480         status = be_cmd_fw_init(adapter);
3481         if (status)
3482                 return status;
3483
3484         be_setup(adapter);
3485         if (netif_running(netdev)) {
3486                 rtnl_lock();
3487                 be_open(netdev);
3488                 rtnl_unlock();
3489         }
3490         netif_device_attach(netdev);
3491
3492         if (adapter->wol)
3493                 be_setup_wol(adapter, false);
3494
3495         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3496         return 0;
3497 }
3498
3499 /*
3500  * An FLR will stop BE from DMAing any data.
3501  */
3502 static void be_shutdown(struct pci_dev *pdev)
3503 {
3504         struct be_adapter *adapter = pci_get_drvdata(pdev);
3505
3506         if (!adapter)
3507                 return;
3508
3509         cancel_delayed_work_sync(&adapter->work);
3510
3511         netif_device_detach(adapter->netdev);
3512
3513         if (adapter->wol)
3514                 be_setup_wol(adapter, true);
3515
3516         be_cmd_reset_function(adapter);
3517
3518         pci_disable_device(pdev);
3519 }
3520
3521 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3522                                 pci_channel_state_t state)
3523 {
3524         struct be_adapter *adapter = pci_get_drvdata(pdev);
3525         struct net_device *netdev =  adapter->netdev;
3526
3527         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3528
3529         adapter->eeh_err = true;
3530
3531         netif_device_detach(netdev);
3532
3533         if (netif_running(netdev)) {
3534                 rtnl_lock();
3535                 be_close(netdev);
3536                 rtnl_unlock();
3537         }
3538         be_clear(adapter);
3539
3540         if (state == pci_channel_io_perm_failure)
3541                 return PCI_ERS_RESULT_DISCONNECT;
3542
3543         pci_disable_device(pdev);
3544
3545         return PCI_ERS_RESULT_NEED_RESET;
3546 }
3547
3548 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3549 {
3550         struct be_adapter *adapter = pci_get_drvdata(pdev);
3551         int status;
3552
3553         dev_info(&adapter->pdev->dev, "EEH reset\n");
3554         adapter->eeh_err = false;
3555
3556         status = pci_enable_device(pdev);
3557         if (status)
3558                 return PCI_ERS_RESULT_DISCONNECT;
3559
3560         pci_set_master(pdev);
3561         pci_set_power_state(pdev, 0);
3562         pci_restore_state(pdev);
3563
3564         /* Check if card is ok and fw is ready */
3565         status = be_cmd_POST(adapter);
3566         if (status)
3567                 return PCI_ERS_RESULT_DISCONNECT;
3568
3569         return PCI_ERS_RESULT_RECOVERED;
3570 }
3571
3572 static void be_eeh_resume(struct pci_dev *pdev)
3573 {
3574         int status = 0;
3575         struct be_adapter *adapter = pci_get_drvdata(pdev);
3576         struct net_device *netdev =  adapter->netdev;
3577
3578         dev_info(&adapter->pdev->dev, "EEH resume\n");
3579
3580         pci_save_state(pdev);
3581
3582         /* tell fw we're ready to fire cmds */
3583         status = be_cmd_fw_init(adapter);
3584         if (status)
3585                 goto err;
3586
3587         status = be_setup(adapter);
3588         if (status)
3589                 goto err;
3590
3591         if (netif_running(netdev)) {
3592                 status = be_open(netdev);
3593                 if (status)
3594                         goto err;
3595         }
3596         netif_device_attach(netdev);
3597         return;
3598 err:
3599         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3600 }
3601
3602 static struct pci_error_handlers be_eeh_handlers = {
3603         .error_detected = be_eeh_err_detected,
3604         .slot_reset = be_eeh_reset,
3605         .resume = be_eeh_resume,
3606 };
3607
3608 static struct pci_driver be_driver = {
3609         .name = DRV_NAME,
3610         .id_table = be_dev_ids,
3611         .probe = be_probe,
3612         .remove = be_remove,
3613         .suspend = be_suspend,
3614         .resume = be_resume,
3615         .shutdown = be_shutdown,
3616         .err_handler = &be_eeh_handlers
3617 };
3618
3619 static int __init be_init_module(void)
3620 {
3621         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3622             rx_frag_size != 2048) {
3623                 printk(KERN_WARNING DRV_NAME
3624                         " : Module param rx_frag_size must be 2048/4096/8192."
3625                         " Using 2048\n");
3626                 rx_frag_size = 2048;
3627         }
3628
3629         return pci_register_driver(&be_driver);
3630 }
3631 module_init(be_init_module);
3632
3633 static void __exit be_exit_module(void)
3634 {
3635         pci_unregister_driver(&be_driver);
3636 }
3637 module_exit(be_exit_module);