Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119         struct be_dma_mem *mem = &q->dma_mem;
120         if (mem->va)
121                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122                                   mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126                 u16 len, u16 entry_size)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129
130         memset(q, 0, sizeof(*q));
131         q->len = len;
132         q->entry_size = entry_size;
133         mem->size = len * entry_size;
134         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135                                      GFP_KERNEL);
136         if (!mem->va)
137                 return -1;
138         memset(mem->va, 0, mem->size);
139         return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145         u32 reg = ioread32(addr);
146         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
147
148         if (adapter->eeh_err)
149                 return;
150
151         if (!enabled && enable)
152                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153         else if (enabled && !enable)
154                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else
156                 return;
157
158         iowrite32(reg, addr);
159 }
160
161 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
162 {
163         u32 val = 0;
164         val |= qid & DB_RQ_RING_ID_MASK;
165         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
166
167         wmb();
168         iowrite32(val, adapter->db + DB_RQ_OFFSET);
169 }
170
171 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172 {
173         u32 val = 0;
174         val |= qid & DB_TXULP_RING_ID_MASK;
175         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
176
177         wmb();
178         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
179 }
180
181 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182                 bool arm, bool clear_int, u16 num_popped)
183 {
184         u32 val = 0;
185         val |= qid & DB_EQ_RING_ID_MASK;
186         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
188
189         if (adapter->eeh_err)
190                 return;
191
192         if (arm)
193                 val |= 1 << DB_EQ_REARM_SHIFT;
194         if (clear_int)
195                 val |= 1 << DB_EQ_CLR_SHIFT;
196         val |= 1 << DB_EQ_EVNT_SHIFT;
197         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198         iowrite32(val, adapter->db + DB_EQ_OFFSET);
199 }
200
201 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
202 {
203         u32 val = 0;
204         val |= qid & DB_CQ_RING_ID_MASK;
205         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
207
208         if (adapter->eeh_err)
209                 return;
210
211         if (arm)
212                 val |= 1 << DB_CQ_REARM_SHIFT;
213         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_CQ_OFFSET);
215 }
216
217 static int be_mac_addr_set(struct net_device *netdev, void *p)
218 {
219         struct be_adapter *adapter = netdev_priv(netdev);
220         struct sockaddr *addr = p;
221         int status = 0;
222
223         if (!is_valid_ether_addr(addr->sa_data))
224                 return -EADDRNOTAVAIL;
225
226         /* MAC addr configuration will be done in hardware for VFs
227          * by their corresponding PFs. Just copy to netdev addr here
228          */
229         if (!be_physfn(adapter))
230                 goto netdev_addr;
231
232         status = be_cmd_pmac_del(adapter, adapter->if_handle,
233                                 adapter->pmac_id, 0);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                                 adapter->if_handle, &adapter->pmac_id, 0);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 static void populate_be2_stats(struct be_adapter *adapter)
247 {
248
249         struct be_drv_stats *drvs = &adapter->drv_stats;
250         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251         struct be_port_rxf_stats_v0 *port_stats =
252                 be_port_rxf_stats_from_cmd(adapter);
253         struct be_rxf_stats_v0 *rxf_stats =
254                 be_rxf_stats_from_cmd(adapter);
255
256         drvs->rx_pause_frames = port_stats->rx_pause_frames;
257         drvs->rx_crc_errors = port_stats->rx_crc_errors;
258         drvs->rx_control_frames = port_stats->rx_control_frames;
259         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270         drvs->rx_input_fifo_overflow_drop =
271                 port_stats->rx_input_fifo_overflow;
272         drvs->rx_dropped_header_too_small =
273                 port_stats->rx_dropped_header_too_small;
274         drvs->rx_address_match_errors =
275                 port_stats->rx_address_match_errors;
276         drvs->rx_alignment_symbol_errors =
277                 port_stats->rx_alignment_symbol_errors;
278
279         drvs->tx_pauseframes = port_stats->tx_pauseframes;
280         drvs->tx_controlframes = port_stats->tx_controlframes;
281
282         if (adapter->port_num)
283                 drvs->jabber_events =
284                         rxf_stats->port1_jabber_events;
285         else
286                 drvs->jabber_events =
287                         rxf_stats->port0_jabber_events;
288         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292         drvs->forwarded_packets = rxf_stats->forwarded_packets;
293         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294         drvs->rx_drops_no_tpre_descr =
295                 rxf_stats->rx_drops_no_tpre_descr;
296         drvs->rx_drops_too_many_frags =
297                 rxf_stats->rx_drops_too_many_frags;
298         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299 }
300
301 static void populate_be3_stats(struct be_adapter *adapter)
302 {
303         struct be_drv_stats *drvs = &adapter->drv_stats;
304         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306         struct be_rxf_stats_v1 *rxf_stats =
307                 be_rxf_stats_from_cmd(adapter);
308         struct be_port_rxf_stats_v1 *port_stats =
309                 be_port_rxf_stats_from_cmd(adapter);
310
311         drvs->rx_priority_pause_frames = 0;
312         drvs->pmem_fifo_overflow_drop = 0;
313         drvs->rx_pause_frames = port_stats->rx_pause_frames;
314         drvs->rx_crc_errors = port_stats->rx_crc_errors;
315         drvs->rx_control_frames = port_stats->rx_control_frames;
316         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326         drvs->rx_dropped_header_too_small =
327                 port_stats->rx_dropped_header_too_small;
328         drvs->rx_input_fifo_overflow_drop =
329                 port_stats->rx_input_fifo_overflow_drop;
330         drvs->rx_address_match_errors =
331                 port_stats->rx_address_match_errors;
332         drvs->rx_alignment_symbol_errors =
333                 port_stats->rx_alignment_symbol_errors;
334         drvs->rxpp_fifo_overflow_drop =
335                 port_stats->rxpp_fifo_overflow_drop;
336         drvs->tx_pauseframes = port_stats->tx_pauseframes;
337         drvs->tx_controlframes = port_stats->tx_controlframes;
338         drvs->jabber_events = port_stats->jabber_events;
339         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343         drvs->forwarded_packets = rxf_stats->forwarded_packets;
344         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345         drvs->rx_drops_no_tpre_descr =
346                 rxf_stats->rx_drops_no_tpre_descr;
347         drvs->rx_drops_too_many_frags =
348                 rxf_stats->rx_drops_too_many_frags;
349         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350 }
351
352 static void populate_lancer_stats(struct be_adapter *adapter)
353 {
354
355         struct be_drv_stats *drvs = &adapter->drv_stats;
356         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357                                                 (adapter);
358         drvs->rx_priority_pause_frames = 0;
359         drvs->pmem_fifo_overflow_drop = 0;
360         drvs->rx_pause_frames =
361                 make_64bit_val(pport_stats->rx_pause_frames_hi,
362                                  pport_stats->rx_pause_frames_lo);
363         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364                                                 pport_stats->rx_crc_errors_lo);
365         drvs->rx_control_frames =
366                         make_64bit_val(pport_stats->rx_control_frames_hi,
367                         pport_stats->rx_control_frames_lo);
368         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369         drvs->rx_frame_too_long =
370                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371                                         pport_stats->rx_frames_too_long_lo);
372         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376         drvs->rx_dropped_tcp_length =
377                                 pport_stats->rx_dropped_invalid_tcp_length;
378         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381         drvs->rx_dropped_header_too_small =
382                                 pport_stats->rx_dropped_header_too_small;
383         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385         drvs->rx_alignment_symbol_errors =
386                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387                                 pport_stats->rx_symbol_errors_lo);
388         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390                                         pport_stats->tx_pause_frames_lo);
391         drvs->tx_controlframes =
392                 make_64bit_val(pport_stats->tx_control_frames_hi,
393                                 pport_stats->tx_control_frames_lo);
394         drvs->jabber_events = pport_stats->rx_jabbers;
395         drvs->rx_drops_no_pbuf = 0;
396         drvs->rx_drops_no_txpb = 0;
397         drvs->rx_drops_no_erx_descr = 0;
398         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400                                                 pport_stats->num_forwards_lo);
401         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402                                                 pport_stats->rx_drops_mtu_lo);
403         drvs->rx_drops_no_tpre_descr = 0;
404         drvs->rx_drops_too_many_frags =
405                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406                                 pport_stats->rx_drops_too_many_frags_lo);
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         if (adapter->generation == BE_GEN3) {
412                 if (lancer_chip(adapter))
413                         populate_lancer_stats(adapter);
414                  else
415                         populate_be3_stats(adapter);
416         } else {
417                 populate_be2_stats(adapter);
418         }
419 }
420
421 void netdev_stats_update(struct be_adapter *adapter)
422 {
423         struct be_drv_stats *drvs = &adapter->drv_stats;
424         struct net_device_stats *dev_stats = &adapter->netdev->stats;
425         struct be_rx_obj *rxo;
426         struct be_tx_obj *txo;
427         unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
428         int i;
429
430         for_all_rx_queues(adapter, rxo, i) {
431                 pkts += rx_stats(rxo)->rx_pkts;
432                 bytes += rx_stats(rxo)->rx_bytes;
433                 mcast += rx_stats(rxo)->rx_mcast_pkts;
434                 drops += rx_stats(rxo)->rx_dropped;
435                 /*  no space in linux buffers: best possible approximation */
436                 if (adapter->generation == BE_GEN3) {
437                         if (!(lancer_chip(adapter))) {
438                                 struct be_erx_stats_v1 *erx =
439                                         be_erx_stats_from_cmd(adapter);
440                                 drops += erx->rx_drops_no_fragments[rxo->q.id];
441                         }
442                 } else {
443                         struct be_erx_stats_v0 *erx =
444                                         be_erx_stats_from_cmd(adapter);
445                         drops += erx->rx_drops_no_fragments[rxo->q.id];
446                 }
447         }
448         dev_stats->rx_packets = pkts;
449         dev_stats->rx_bytes = bytes;
450         dev_stats->multicast = mcast;
451         dev_stats->rx_dropped = drops;
452
453         pkts = bytes = 0;
454         for_all_tx_queues(adapter, txo, i) {
455                 pkts += tx_stats(txo)->be_tx_pkts;
456                 bytes += tx_stats(txo)->be_tx_bytes;
457         }
458         dev_stats->tx_packets = pkts;
459         dev_stats->tx_bytes = bytes;
460
461         /* bad pkts received */
462         dev_stats->rx_errors = drvs->rx_crc_errors +
463                 drvs->rx_alignment_symbol_errors +
464                 drvs->rx_in_range_errors +
465                 drvs->rx_out_range_errors +
466                 drvs->rx_frame_too_long +
467                 drvs->rx_dropped_too_small +
468                 drvs->rx_dropped_too_short +
469                 drvs->rx_dropped_header_too_small +
470                 drvs->rx_dropped_tcp_length +
471                 drvs->rx_dropped_runt +
472                 drvs->rx_tcp_checksum_errs +
473                 drvs->rx_ip_checksum_errs +
474                 drvs->rx_udp_checksum_errs;
475
476         /* detailed rx errors */
477         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
478                 drvs->rx_out_range_errors +
479                 drvs->rx_frame_too_long;
480
481         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
482
483         /* frame alignment errors */
484         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485
486         /* receiver fifo overrun */
487         /* drops_no_pbuf is no per i/f, it's per BE card */
488         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489                                 drvs->rx_input_fifo_overflow_drop +
490                                 drvs->rx_drops_no_pbuf;
491 }
492
493 void be_link_status_update(struct be_adapter *adapter, bool link_up)
494 {
495         struct net_device *netdev = adapter->netdev;
496
497         /* If link came up or went down */
498         if (adapter->link_up != link_up) {
499                 adapter->link_speed = -1;
500                 if (link_up) {
501                         netif_carrier_on(netdev);
502                         printk(KERN_INFO "%s: Link up\n", netdev->name);
503                 } else {
504                         netif_carrier_off(netdev);
505                         printk(KERN_INFO "%s: Link down\n", netdev->name);
506                 }
507                 adapter->link_up = link_up;
508         }
509 }
510
511 /* Update the EQ delay n BE based on the RX frags consumed / sec */
512 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
513 {
514         struct be_eq_obj *rx_eq = &rxo->rx_eq;
515         struct be_rx_stats *stats = &rxo->stats;
516         ulong now = jiffies;
517         u32 eqd;
518
519         if (!rx_eq->enable_aic)
520                 return;
521
522         /* Wrapped around */
523         if (time_before(now, stats->rx_fps_jiffies)) {
524                 stats->rx_fps_jiffies = now;
525                 return;
526         }
527
528         /* Update once a second */
529         if ((now - stats->rx_fps_jiffies) < HZ)
530                 return;
531
532         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
533                         ((now - stats->rx_fps_jiffies) / HZ);
534
535         stats->rx_fps_jiffies = now;
536         stats->prev_rx_frags = stats->rx_frags;
537         eqd = stats->rx_fps / 110000;
538         eqd = eqd << 3;
539         if (eqd > rx_eq->max_eqd)
540                 eqd = rx_eq->max_eqd;
541         if (eqd < rx_eq->min_eqd)
542                 eqd = rx_eq->min_eqd;
543         if (eqd < 10)
544                 eqd = 0;
545         if (eqd != rx_eq->cur_eqd)
546                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
547
548         rx_eq->cur_eqd = eqd;
549 }
550
551 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
552 {
553         u64 rate = bytes;
554
555         do_div(rate, ticks / HZ);
556         rate <<= 3;                     /* bytes/sec -> bits/sec */
557         do_div(rate, 1000000ul);        /* MB/Sec */
558
559         return rate;
560 }
561
562 static void be_tx_rate_update(struct be_tx_obj *txo)
563 {
564         struct be_tx_stats *stats = tx_stats(txo);
565         ulong now = jiffies;
566
567         /* Wrapped around? */
568         if (time_before(now, stats->be_tx_jiffies)) {
569                 stats->be_tx_jiffies = now;
570                 return;
571         }
572
573         /* Update tx rate once in two seconds */
574         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
575                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
576                                                   - stats->be_tx_bytes_prev,
577                                                  now - stats->be_tx_jiffies);
578                 stats->be_tx_jiffies = now;
579                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
580         }
581 }
582
583 static void be_tx_stats_update(struct be_tx_obj *txo,
584                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
585 {
586         struct be_tx_stats *stats = tx_stats(txo);
587
588         stats->be_tx_reqs++;
589         stats->be_tx_wrbs += wrb_cnt;
590         stats->be_tx_bytes += copied;
591         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
592         if (stopped)
593                 stats->be_tx_stops++;
594 }
595
596 /* Determine number of WRB entries needed to xmit data in an skb */
597 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598                                                                 bool *dummy)
599 {
600         int cnt = (skb->len > skb->data_len);
601
602         cnt += skb_shinfo(skb)->nr_frags;
603
604         /* to account for hdr wrb */
605         cnt++;
606         if (lancer_chip(adapter) || !(cnt & 1)) {
607                 *dummy = false;
608         } else {
609                 /* add a dummy to make it an even num */
610                 cnt++;
611                 *dummy = true;
612         }
613         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614         return cnt;
615 }
616
617 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618 {
619         wrb->frag_pa_hi = upper_32_bits(addr);
620         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
622 }
623
624 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
625                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
626 {
627         u8 vlan_prio = 0;
628         u16 vlan_tag = 0;
629
630         memset(hdr, 0, sizeof(*hdr));
631
632         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
633
634         if (skb_is_gso(skb)) {
635                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
636                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
637                         hdr, skb_shinfo(skb)->gso_size);
638                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
639                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
640                 if (lancer_chip(adapter) && adapter->sli_family  ==
641                                                         LANCER_A0_SLI_FAMILY) {
642                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
643                         if (is_tcp_pkt(skb))
644                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
645                                                                 tcpcs, hdr, 1);
646                         else if (is_udp_pkt(skb))
647                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
648                                                                 udpcs, hdr, 1);
649                 }
650         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
651                 if (is_tcp_pkt(skb))
652                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
653                 else if (is_udp_pkt(skb))
654                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
655         }
656
657         if (vlan_tx_tag_present(skb)) {
658                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
659                 vlan_tag = vlan_tx_tag_get(skb);
660                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
661                 /* If vlan priority provided by OS is NOT in available bmap */
662                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
663                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
664                                         adapter->recommended_prio;
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
666         }
667
668         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
669         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
670         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
671         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
672 }
673
674 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
675                 bool unmap_single)
676 {
677         dma_addr_t dma;
678
679         be_dws_le_to_cpu(wrb, sizeof(*wrb));
680
681         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
682         if (wrb->frag_len) {
683                 if (unmap_single)
684                         dma_unmap_single(dev, dma, wrb->frag_len,
685                                          DMA_TO_DEVICE);
686                 else
687                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
688         }
689 }
690
691 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
692                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
693 {
694         dma_addr_t busaddr;
695         int i, copied = 0;
696         struct device *dev = &adapter->pdev->dev;
697         struct sk_buff *first_skb = skb;
698         struct be_eth_wrb *wrb;
699         struct be_eth_hdr_wrb *hdr;
700         bool map_single = false;
701         u16 map_head;
702
703         hdr = queue_head_node(txq);
704         queue_head_inc(txq);
705         map_head = txq->head;
706
707         if (skb->len > skb->data_len) {
708                 int len = skb_headlen(skb);
709                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
710                 if (dma_mapping_error(dev, busaddr))
711                         goto dma_err;
712                 map_single = true;
713                 wrb = queue_head_node(txq);
714                 wrb_fill(wrb, busaddr, len);
715                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716                 queue_head_inc(txq);
717                 copied += len;
718         }
719
720         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
721                 struct skb_frag_struct *frag =
722                         &skb_shinfo(skb)->frags[i];
723                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
724                                        frag->size, DMA_TO_DEVICE);
725                 if (dma_mapping_error(dev, busaddr))
726                         goto dma_err;
727                 wrb = queue_head_node(txq);
728                 wrb_fill(wrb, busaddr, frag->size);
729                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
730                 queue_head_inc(txq);
731                 copied += frag->size;
732         }
733
734         if (dummy_wrb) {
735                 wrb = queue_head_node(txq);
736                 wrb_fill(wrb, 0, 0);
737                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
738                 queue_head_inc(txq);
739         }
740
741         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
742         be_dws_cpu_to_le(hdr, sizeof(*hdr));
743
744         return copied;
745 dma_err:
746         txq->head = map_head;
747         while (copied) {
748                 wrb = queue_head_node(txq);
749                 unmap_tx_frag(dev, wrb, map_single);
750                 map_single = false;
751                 copied -= wrb->frag_len;
752                 queue_head_inc(txq);
753         }
754         return 0;
755 }
756
757 static netdev_tx_t be_xmit(struct sk_buff *skb,
758                         struct net_device *netdev)
759 {
760         struct be_adapter *adapter = netdev_priv(netdev);
761         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
762         struct be_queue_info *txq = &txo->q;
763         u32 wrb_cnt = 0, copied = 0;
764         u32 start = txq->head;
765         bool dummy_wrb, stopped = false;
766
767         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
768
769         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
770         if (copied) {
771                 /* record the sent skb in the sent_skb table */
772                 BUG_ON(txo->sent_skb_list[start]);
773                 txo->sent_skb_list[start] = skb;
774
775                 /* Ensure txq has space for the next skb; Else stop the queue
776                  * *BEFORE* ringing the tx doorbell, so that we serialze the
777                  * tx compls of the current transmit which'll wake up the queue
778                  */
779                 atomic_add(wrb_cnt, &txq->used);
780                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781                                                                 txq->len) {
782                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
783                         stopped = true;
784                 }
785
786                 be_txq_notify(adapter, txq->id, wrb_cnt);
787
788                 be_tx_stats_update(txo, wrb_cnt, copied,
789                                 skb_shinfo(skb)->gso_segs, stopped);
790         } else {
791                 txq->head = start;
792                 dev_kfree_skb_any(skb);
793         }
794         return NETDEV_TX_OK;
795 }
796
797 static int be_change_mtu(struct net_device *netdev, int new_mtu)
798 {
799         struct be_adapter *adapter = netdev_priv(netdev);
800         if (new_mtu < BE_MIN_MTU ||
801                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802                                         (ETH_HLEN + ETH_FCS_LEN))) {
803                 dev_info(&adapter->pdev->dev,
804                         "MTU must be between %d and %d bytes\n",
805                         BE_MIN_MTU,
806                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
807                 return -EINVAL;
808         }
809         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810                         netdev->mtu, new_mtu);
811         netdev->mtu = new_mtu;
812         return 0;
813 }
814
815 /*
816  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817  * If the user configures more, place BE in vlan promiscuous mode.
818  */
819 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
820 {
821         u16 vtag[BE_NUM_VLANS_SUPPORTED];
822         u16 ntags = 0, i;
823         int status = 0;
824         u32 if_handle;
825
826         if (vf) {
827                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
828                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
829                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
830         }
831
832         if (adapter->vlans_added <= adapter->max_vlans)  {
833                 /* Construct VLAN Table to give to HW */
834                 for (i = 0; i < VLAN_N_VID; i++) {
835                         if (adapter->vlan_tag[i]) {
836                                 vtag[ntags] = cpu_to_le16(i);
837                                 ntags++;
838                         }
839                 }
840                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
841                                         vtag, ntags, 1, 0);
842         } else {
843                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
844                                         NULL, 0, 1, 1);
845         }
846
847         return status;
848 }
849
850 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851 {
852         struct be_adapter *adapter = netdev_priv(netdev);
853
854         adapter->vlans_added++;
855         if (!be_physfn(adapter))
856                 return;
857
858         adapter->vlan_tag[vid] = 1;
859         if (adapter->vlans_added <= (adapter->max_vlans + 1))
860                 be_vid_config(adapter, false, 0);
861 }
862
863 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864 {
865         struct be_adapter *adapter = netdev_priv(netdev);
866
867         adapter->vlans_added--;
868
869         if (!be_physfn(adapter))
870                 return;
871
872         adapter->vlan_tag[vid] = 0;
873         if (adapter->vlans_added <= adapter->max_vlans)
874                 be_vid_config(adapter, false, 0);
875 }
876
877 static void be_set_multicast_list(struct net_device *netdev)
878 {
879         struct be_adapter *adapter = netdev_priv(netdev);
880
881         if (netdev->flags & IFF_PROMISC) {
882                 be_cmd_promiscuous_config(adapter, true);
883                 adapter->promiscuous = true;
884                 goto done;
885         }
886
887         /* BE was previously in promiscuous mode; disable it */
888         if (adapter->promiscuous) {
889                 adapter->promiscuous = false;
890                 be_cmd_promiscuous_config(adapter, false);
891         }
892
893         /* Enable multicast promisc if num configured exceeds what we support */
894         if (netdev->flags & IFF_ALLMULTI ||
895             netdev_mc_count(netdev) > BE_MAX_MC) {
896                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
897                                 &adapter->mc_cmd_mem);
898                 goto done;
899         }
900
901         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
902                 &adapter->mc_cmd_mem);
903 done:
904         return;
905 }
906
907 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
908 {
909         struct be_adapter *adapter = netdev_priv(netdev);
910         int status;
911
912         if (!adapter->sriov_enabled)
913                 return -EPERM;
914
915         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
916                 return -EINVAL;
917
918         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
919                 status = be_cmd_pmac_del(adapter,
920                                         adapter->vf_cfg[vf].vf_if_handle,
921                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
922
923         status = be_cmd_pmac_add(adapter, mac,
924                                 adapter->vf_cfg[vf].vf_if_handle,
925                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
926
927         if (status)
928                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
929                                 mac, vf);
930         else
931                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
932
933         return status;
934 }
935
936 static int be_get_vf_config(struct net_device *netdev, int vf,
937                         struct ifla_vf_info *vi)
938 {
939         struct be_adapter *adapter = netdev_priv(netdev);
940
941         if (!adapter->sriov_enabled)
942                 return -EPERM;
943
944         if (vf >= num_vfs)
945                 return -EINVAL;
946
947         vi->vf = vf;
948         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
949         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
950         vi->qos = 0;
951         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
952
953         return 0;
954 }
955
956 static int be_set_vf_vlan(struct net_device *netdev,
957                         int vf, u16 vlan, u8 qos)
958 {
959         struct be_adapter *adapter = netdev_priv(netdev);
960         int status = 0;
961
962         if (!adapter->sriov_enabled)
963                 return -EPERM;
964
965         if ((vf >= num_vfs) || (vlan > 4095))
966                 return -EINVAL;
967
968         if (vlan) {
969                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
970                 adapter->vlans_added++;
971         } else {
972                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
973                 adapter->vlans_added--;
974         }
975
976         status = be_vid_config(adapter, true, vf);
977
978         if (status)
979                 dev_info(&adapter->pdev->dev,
980                                 "VLAN %d config on VF %d failed\n", vlan, vf);
981         return status;
982 }
983
984 static int be_set_vf_tx_rate(struct net_device *netdev,
985                         int vf, int rate)
986 {
987         struct be_adapter *adapter = netdev_priv(netdev);
988         int status = 0;
989
990         if (!adapter->sriov_enabled)
991                 return -EPERM;
992
993         if ((vf >= num_vfs) || (rate < 0))
994                 return -EINVAL;
995
996         if (rate > 10000)
997                 rate = 10000;
998
999         adapter->vf_cfg[vf].vf_tx_rate = rate;
1000         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1001
1002         if (status)
1003                 dev_info(&adapter->pdev->dev,
1004                                 "tx rate %d on VF %d failed\n", rate, vf);
1005         return status;
1006 }
1007
1008 static void be_rx_rate_update(struct be_rx_obj *rxo)
1009 {
1010         struct be_rx_stats *stats = &rxo->stats;
1011         ulong now = jiffies;
1012
1013         /* Wrapped around */
1014         if (time_before(now, stats->rx_jiffies)) {
1015                 stats->rx_jiffies = now;
1016                 return;
1017         }
1018
1019         /* Update the rate once in two seconds */
1020         if ((now - stats->rx_jiffies) < 2 * HZ)
1021                 return;
1022
1023         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1024                                 now - stats->rx_jiffies);
1025         stats->rx_jiffies = now;
1026         stats->rx_bytes_prev = stats->rx_bytes;
1027 }
1028
1029 static void be_rx_stats_update(struct be_rx_obj *rxo,
1030                 struct be_rx_compl_info *rxcp)
1031 {
1032         struct be_rx_stats *stats = &rxo->stats;
1033
1034         stats->rx_compl++;
1035         stats->rx_frags += rxcp->num_rcvd;
1036         stats->rx_bytes += rxcp->pkt_size;
1037         stats->rx_pkts++;
1038         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1039                 stats->rx_mcast_pkts++;
1040         if (rxcp->err)
1041                 stats->rxcp_err++;
1042 }
1043
1044 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1045 {
1046         /* L4 checksum is not reliable for non TCP/UDP packets.
1047          * Also ignore ipcksm for ipv6 pkts */
1048         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1049                                 (rxcp->ip_csum || rxcp->ipv6);
1050 }
1051
1052 static struct be_rx_page_info *
1053 get_rx_page_info(struct be_adapter *adapter,
1054                 struct be_rx_obj *rxo,
1055                 u16 frag_idx)
1056 {
1057         struct be_rx_page_info *rx_page_info;
1058         struct be_queue_info *rxq = &rxo->q;
1059
1060         rx_page_info = &rxo->page_info_tbl[frag_idx];
1061         BUG_ON(!rx_page_info->page);
1062
1063         if (rx_page_info->last_page_user) {
1064                 dma_unmap_page(&adapter->pdev->dev,
1065                                dma_unmap_addr(rx_page_info, bus),
1066                                adapter->big_page_size, DMA_FROM_DEVICE);
1067                 rx_page_info->last_page_user = false;
1068         }
1069
1070         atomic_dec(&rxq->used);
1071         return rx_page_info;
1072 }
1073
1074 /* Throwaway the data in the Rx completion */
1075 static void be_rx_compl_discard(struct be_adapter *adapter,
1076                 struct be_rx_obj *rxo,
1077                 struct be_rx_compl_info *rxcp)
1078 {
1079         struct be_queue_info *rxq = &rxo->q;
1080         struct be_rx_page_info *page_info;
1081         u16 i, num_rcvd = rxcp->num_rcvd;
1082
1083         for (i = 0; i < num_rcvd; i++) {
1084                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1085                 put_page(page_info->page);
1086                 memset(page_info, 0, sizeof(*page_info));
1087                 index_inc(&rxcp->rxq_idx, rxq->len);
1088         }
1089 }
1090
1091 /*
1092  * skb_fill_rx_data forms a complete skb for an ether frame
1093  * indicated by rxcp.
1094  */
1095 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1096                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1097 {
1098         struct be_queue_info *rxq = &rxo->q;
1099         struct be_rx_page_info *page_info;
1100         u16 i, j;
1101         u16 hdr_len, curr_frag_len, remaining;
1102         u8 *start;
1103
1104         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1105         start = page_address(page_info->page) + page_info->page_offset;
1106         prefetch(start);
1107
1108         /* Copy data in the first descriptor of this completion */
1109         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1110
1111         /* Copy the header portion into skb_data */
1112         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1113         memcpy(skb->data, start, hdr_len);
1114         skb->len = curr_frag_len;
1115         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1116                 /* Complete packet has now been moved to data */
1117                 put_page(page_info->page);
1118                 skb->data_len = 0;
1119                 skb->tail += curr_frag_len;
1120         } else {
1121                 skb_shinfo(skb)->nr_frags = 1;
1122                 skb_shinfo(skb)->frags[0].page = page_info->page;
1123                 skb_shinfo(skb)->frags[0].page_offset =
1124                                         page_info->page_offset + hdr_len;
1125                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1126                 skb->data_len = curr_frag_len - hdr_len;
1127                 skb->tail += hdr_len;
1128         }
1129         page_info->page = NULL;
1130
1131         if (rxcp->pkt_size <= rx_frag_size) {
1132                 BUG_ON(rxcp->num_rcvd != 1);
1133                 return;
1134         }
1135
1136         /* More frags present for this completion */
1137         index_inc(&rxcp->rxq_idx, rxq->len);
1138         remaining = rxcp->pkt_size - curr_frag_len;
1139         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141                 curr_frag_len = min(remaining, rx_frag_size);
1142
1143                 /* Coalesce all frags from the same physical page in one slot */
1144                 if (page_info->page_offset == 0) {
1145                         /* Fresh page */
1146                         j++;
1147                         skb_shinfo(skb)->frags[j].page = page_info->page;
1148                         skb_shinfo(skb)->frags[j].page_offset =
1149                                                         page_info->page_offset;
1150                         skb_shinfo(skb)->frags[j].size = 0;
1151                         skb_shinfo(skb)->nr_frags++;
1152                 } else {
1153                         put_page(page_info->page);
1154                 }
1155
1156                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1157                 skb->len += curr_frag_len;
1158                 skb->data_len += curr_frag_len;
1159
1160                 remaining -= curr_frag_len;
1161                 index_inc(&rxcp->rxq_idx, rxq->len);
1162                 page_info->page = NULL;
1163         }
1164         BUG_ON(j > MAX_SKB_FRAGS);
1165 }
1166
1167 /* Process the RX completion indicated by rxcp when GRO is disabled */
1168 static void be_rx_compl_process(struct be_adapter *adapter,
1169                         struct be_rx_obj *rxo,
1170                         struct be_rx_compl_info *rxcp)
1171 {
1172         struct net_device *netdev = adapter->netdev;
1173         struct sk_buff *skb;
1174
1175         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1176         if (unlikely(!skb)) {
1177                 rxo->stats.rx_dropped++;
1178                 be_rx_compl_discard(adapter, rxo, rxcp);
1179                 return;
1180         }
1181
1182         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1183
1184         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1185                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1186         else
1187                 skb_checksum_none_assert(skb);
1188
1189         skb->truesize = skb->len + sizeof(struct sk_buff);
1190         skb->protocol = eth_type_trans(skb, netdev);
1191         if (adapter->netdev->features & NETIF_F_RXHASH)
1192                 skb->rxhash = rxcp->rss_hash;
1193
1194
1195         if (unlikely(rxcp->vlanf))
1196                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1197
1198         netif_receive_skb(skb);
1199 }
1200
1201 /* Process the RX completion indicated by rxcp when GRO is enabled */
1202 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1203                 struct be_rx_obj *rxo,
1204                 struct be_rx_compl_info *rxcp)
1205 {
1206         struct be_rx_page_info *page_info;
1207         struct sk_buff *skb = NULL;
1208         struct be_queue_info *rxq = &rxo->q;
1209         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1210         u16 remaining, curr_frag_len;
1211         u16 i, j;
1212
1213         skb = napi_get_frags(&eq_obj->napi);
1214         if (!skb) {
1215                 be_rx_compl_discard(adapter, rxo, rxcp);
1216                 return;
1217         }
1218
1219         remaining = rxcp->pkt_size;
1220         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1221                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1222
1223                 curr_frag_len = min(remaining, rx_frag_size);
1224
1225                 /* Coalesce all frags from the same physical page in one slot */
1226                 if (i == 0 || page_info->page_offset == 0) {
1227                         /* First frag or Fresh page */
1228                         j++;
1229                         skb_shinfo(skb)->frags[j].page = page_info->page;
1230                         skb_shinfo(skb)->frags[j].page_offset =
1231                                                         page_info->page_offset;
1232                         skb_shinfo(skb)->frags[j].size = 0;
1233                 } else {
1234                         put_page(page_info->page);
1235                 }
1236                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1237
1238                 remaining -= curr_frag_len;
1239                 index_inc(&rxcp->rxq_idx, rxq->len);
1240                 memset(page_info, 0, sizeof(*page_info));
1241         }
1242         BUG_ON(j > MAX_SKB_FRAGS);
1243
1244         skb_shinfo(skb)->nr_frags = j + 1;
1245         skb->len = rxcp->pkt_size;
1246         skb->data_len = rxcp->pkt_size;
1247         skb->truesize += rxcp->pkt_size;
1248         skb->ip_summed = CHECKSUM_UNNECESSARY;
1249         if (adapter->netdev->features & NETIF_F_RXHASH)
1250                 skb->rxhash = rxcp->rss_hash;
1251
1252         if (unlikely(rxcp->vlanf))
1253                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1254
1255         napi_gro_frags(&eq_obj->napi);
1256 }
1257
1258 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1259                                 struct be_eth_rx_compl *compl,
1260                                 struct be_rx_compl_info *rxcp)
1261 {
1262         rxcp->pkt_size =
1263                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1264         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1265         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1266         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1267         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1268         rxcp->ip_csum =
1269                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1270         rxcp->l4_csum =
1271                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1272         rxcp->ipv6 =
1273                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1274         rxcp->rxq_idx =
1275                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1276         rxcp->num_rcvd =
1277                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1278         rxcp->pkt_type =
1279                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1280         rxcp->rss_hash =
1281                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1282         if (rxcp->vlanf) {
1283                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1284                                           compl);
1285                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1286                                                compl);
1287         }
1288 }
1289
1290 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1291                                 struct be_eth_rx_compl *compl,
1292                                 struct be_rx_compl_info *rxcp)
1293 {
1294         rxcp->pkt_size =
1295                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1296         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1297         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1298         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1299         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1300         rxcp->ip_csum =
1301                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1302         rxcp->l4_csum =
1303                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1304         rxcp->ipv6 =
1305                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1306         rxcp->rxq_idx =
1307                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1308         rxcp->num_rcvd =
1309                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1310         rxcp->pkt_type =
1311                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1312         rxcp->rss_hash =
1313                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1314         if (rxcp->vlanf) {
1315                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1316                                           compl);
1317                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1318                                                compl);
1319         }
1320 }
1321
1322 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323 {
1324         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326         struct be_adapter *adapter = rxo->adapter;
1327
1328         /* For checking the valid bit it is Ok to use either definition as the
1329          * valid bit is at the same position in both v0 and v1 Rx compl */
1330         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1331                 return NULL;
1332
1333         rmb();
1334         be_dws_le_to_cpu(compl, sizeof(*compl));
1335
1336         if (adapter->be3_native)
1337                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338         else
1339                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1340
1341         if (rxcp->vlanf) {
1342                 /* vlanf could be wrongly set in some cards.
1343                  * ignore if vtm is not set */
1344                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1345                         rxcp->vlanf = 0;
1346
1347                 if (!lancer_chip(adapter))
1348                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1349
1350                 if (((adapter->pvid & VLAN_VID_MASK) ==
1351                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1352                     !adapter->vlan_tag[rxcp->vlan_tag])
1353                         rxcp->vlanf = 0;
1354         }
1355
1356         /* As the compl has been parsed, reset it; we wont touch it again */
1357         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1358
1359         queue_tail_inc(&rxo->cq);
1360         return rxcp;
1361 }
1362
1363 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1364 {
1365         u32 order = get_order(size);
1366
1367         if (order > 0)
1368                 gfp |= __GFP_COMP;
1369         return  alloc_pages(gfp, order);
1370 }
1371
1372 /*
1373  * Allocate a page, split it to fragments of size rx_frag_size and post as
1374  * receive buffers to BE
1375  */
1376 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1377 {
1378         struct be_adapter *adapter = rxo->adapter;
1379         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1380         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1381         struct be_queue_info *rxq = &rxo->q;
1382         struct page *pagep = NULL;
1383         struct be_eth_rx_d *rxd;
1384         u64 page_dmaaddr = 0, frag_dmaaddr;
1385         u32 posted, page_offset = 0;
1386
1387         page_info = &rxo->page_info_tbl[rxq->head];
1388         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1389                 if (!pagep) {
1390                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1391                         if (unlikely(!pagep)) {
1392                                 rxo->stats.rx_post_fail++;
1393                                 break;
1394                         }
1395                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1396                                                     0, adapter->big_page_size,
1397                                                     DMA_FROM_DEVICE);
1398                         page_info->page_offset = 0;
1399                 } else {
1400                         get_page(pagep);
1401                         page_info->page_offset = page_offset + rx_frag_size;
1402                 }
1403                 page_offset = page_info->page_offset;
1404                 page_info->page = pagep;
1405                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1406                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1407
1408                 rxd = queue_head_node(rxq);
1409                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1410                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1411
1412                 /* Any space left in the current big page for another frag? */
1413                 if ((page_offset + rx_frag_size + rx_frag_size) >
1414                                         adapter->big_page_size) {
1415                         pagep = NULL;
1416                         page_info->last_page_user = true;
1417                 }
1418
1419                 prev_page_info = page_info;
1420                 queue_head_inc(rxq);
1421                 page_info = &page_info_tbl[rxq->head];
1422         }
1423         if (pagep)
1424                 prev_page_info->last_page_user = true;
1425
1426         if (posted) {
1427                 atomic_add(posted, &rxq->used);
1428                 be_rxq_notify(adapter, rxq->id, posted);
1429         } else if (atomic_read(&rxq->used) == 0) {
1430                 /* Let be_worker replenish when memory is available */
1431                 rxo->rx_post_starved = true;
1432         }
1433 }
1434
1435 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1436 {
1437         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1438
1439         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1440                 return NULL;
1441
1442         rmb();
1443         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1444
1445         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1446
1447         queue_tail_inc(tx_cq);
1448         return txcp;
1449 }
1450
1451 static u16 be_tx_compl_process(struct be_adapter *adapter,
1452                 struct be_tx_obj *txo, u16 last_index)
1453 {
1454         struct be_queue_info *txq = &txo->q;
1455         struct be_eth_wrb *wrb;
1456         struct sk_buff **sent_skbs = txo->sent_skb_list;
1457         struct sk_buff *sent_skb;
1458         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1459         bool unmap_skb_hdr = true;
1460
1461         sent_skb = sent_skbs[txq->tail];
1462         BUG_ON(!sent_skb);
1463         sent_skbs[txq->tail] = NULL;
1464
1465         /* skip header wrb */
1466         queue_tail_inc(txq);
1467
1468         do {
1469                 cur_index = txq->tail;
1470                 wrb = queue_tail_node(txq);
1471                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1472                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1473                 unmap_skb_hdr = false;
1474
1475                 num_wrbs++;
1476                 queue_tail_inc(txq);
1477         } while (cur_index != last_index);
1478
1479         kfree_skb(sent_skb);
1480         return num_wrbs;
1481 }
1482
1483 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1484 {
1485         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1486
1487         if (!eqe->evt)
1488                 return NULL;
1489
1490         rmb();
1491         eqe->evt = le32_to_cpu(eqe->evt);
1492         queue_tail_inc(&eq_obj->q);
1493         return eqe;
1494 }
1495
1496 static int event_handle(struct be_adapter *adapter,
1497                         struct be_eq_obj *eq_obj,
1498                         bool rearm)
1499 {
1500         struct be_eq_entry *eqe;
1501         u16 num = 0;
1502
1503         while ((eqe = event_get(eq_obj)) != NULL) {
1504                 eqe->evt = 0;
1505                 num++;
1506         }
1507
1508         /* Deal with any spurious interrupts that come
1509          * without events
1510          */
1511         if (!num)
1512                 rearm = true;
1513
1514         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1515         if (num)
1516                 napi_schedule(&eq_obj->napi);
1517
1518         return num;
1519 }
1520
1521 /* Just read and notify events without processing them.
1522  * Used at the time of destroying event queues */
1523 static void be_eq_clean(struct be_adapter *adapter,
1524                         struct be_eq_obj *eq_obj)
1525 {
1526         struct be_eq_entry *eqe;
1527         u16 num = 0;
1528
1529         while ((eqe = event_get(eq_obj)) != NULL) {
1530                 eqe->evt = 0;
1531                 num++;
1532         }
1533
1534         if (num)
1535                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1536 }
1537
1538 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1539 {
1540         struct be_rx_page_info *page_info;
1541         struct be_queue_info *rxq = &rxo->q;
1542         struct be_queue_info *rx_cq = &rxo->cq;
1543         struct be_rx_compl_info *rxcp;
1544         u16 tail;
1545
1546         /* First cleanup pending rx completions */
1547         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1548                 be_rx_compl_discard(adapter, rxo, rxcp);
1549                 be_cq_notify(adapter, rx_cq->id, false, 1);
1550         }
1551
1552         /* Then free posted rx buffer that were not used */
1553         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1554         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1555                 page_info = get_rx_page_info(adapter, rxo, tail);
1556                 put_page(page_info->page);
1557                 memset(page_info, 0, sizeof(*page_info));
1558         }
1559         BUG_ON(atomic_read(&rxq->used));
1560         rxq->tail = rxq->head = 0;
1561 }
1562
1563 static void be_tx_compl_clean(struct be_adapter *adapter,
1564                                 struct be_tx_obj *txo)
1565 {
1566         struct be_queue_info *tx_cq = &txo->cq;
1567         struct be_queue_info *txq = &txo->q;
1568         struct be_eth_tx_compl *txcp;
1569         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1570         struct sk_buff **sent_skbs = txo->sent_skb_list;
1571         struct sk_buff *sent_skb;
1572         bool dummy_wrb;
1573
1574         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1575         do {
1576                 while ((txcp = be_tx_compl_get(tx_cq))) {
1577                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1578                                         wrb_index, txcp);
1579                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1580                         cmpl++;
1581                 }
1582                 if (cmpl) {
1583                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1584                         atomic_sub(num_wrbs, &txq->used);
1585                         cmpl = 0;
1586                         num_wrbs = 0;
1587                 }
1588
1589                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1590                         break;
1591
1592                 mdelay(1);
1593         } while (true);
1594
1595         if (atomic_read(&txq->used))
1596                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1597                         atomic_read(&txq->used));
1598
1599         /* free posted tx for which compls will never arrive */
1600         while (atomic_read(&txq->used)) {
1601                 sent_skb = sent_skbs[txq->tail];
1602                 end_idx = txq->tail;
1603                 index_adv(&end_idx,
1604                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1605                         txq->len);
1606                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1607                 atomic_sub(num_wrbs, &txq->used);
1608         }
1609 }
1610
1611 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1612 {
1613         struct be_queue_info *q;
1614
1615         q = &adapter->mcc_obj.q;
1616         if (q->created)
1617                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1618         be_queue_free(adapter, q);
1619
1620         q = &adapter->mcc_obj.cq;
1621         if (q->created)
1622                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1623         be_queue_free(adapter, q);
1624 }
1625
1626 /* Must be called only after TX qs are created as MCC shares TX EQ */
1627 static int be_mcc_queues_create(struct be_adapter *adapter)
1628 {
1629         struct be_queue_info *q, *cq;
1630
1631         /* Alloc MCC compl queue */
1632         cq = &adapter->mcc_obj.cq;
1633         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1634                         sizeof(struct be_mcc_compl)))
1635                 goto err;
1636
1637         /* Ask BE to create MCC compl queue; share TX's eq */
1638         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1639                 goto mcc_cq_free;
1640
1641         /* Alloc MCC queue */
1642         q = &adapter->mcc_obj.q;
1643         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1644                 goto mcc_cq_destroy;
1645
1646         /* Ask BE to create MCC queue */
1647         if (be_cmd_mccq_create(adapter, q, cq))
1648                 goto mcc_q_free;
1649
1650         return 0;
1651
1652 mcc_q_free:
1653         be_queue_free(adapter, q);
1654 mcc_cq_destroy:
1655         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1656 mcc_cq_free:
1657         be_queue_free(adapter, cq);
1658 err:
1659         return -1;
1660 }
1661
1662 static void be_tx_queues_destroy(struct be_adapter *adapter)
1663 {
1664         struct be_queue_info *q;
1665         struct be_tx_obj *txo;
1666         u8 i;
1667
1668         for_all_tx_queues(adapter, txo, i) {
1669                 q = &txo->q;
1670                 if (q->created)
1671                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1672                 be_queue_free(adapter, q);
1673
1674                 q = &txo->cq;
1675                 if (q->created)
1676                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1677                 be_queue_free(adapter, q);
1678         }
1679
1680         /* Clear any residual events */
1681         be_eq_clean(adapter, &adapter->tx_eq);
1682
1683         q = &adapter->tx_eq.q;
1684         if (q->created)
1685                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1686         be_queue_free(adapter, q);
1687 }
1688
1689 /* One TX event queue is shared by all TX compl qs */
1690 static int be_tx_queues_create(struct be_adapter *adapter)
1691 {
1692         struct be_queue_info *eq, *q, *cq;
1693         struct be_tx_obj *txo;
1694         u8 i;
1695
1696         adapter->tx_eq.max_eqd = 0;
1697         adapter->tx_eq.min_eqd = 0;
1698         adapter->tx_eq.cur_eqd = 96;
1699         adapter->tx_eq.enable_aic = false;
1700
1701         eq = &adapter->tx_eq.q;
1702         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1703                 sizeof(struct be_eq_entry)))
1704                 return -1;
1705
1706         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1707                 goto err;
1708         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1709
1710         for_all_tx_queues(adapter, txo, i) {
1711                 cq = &txo->cq;
1712                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1713                         sizeof(struct be_eth_tx_compl)))
1714                         goto err;
1715
1716                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1717                         goto err;
1718
1719                 q = &txo->q;
1720                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1721                         sizeof(struct be_eth_wrb)))
1722                         goto err;
1723
1724                 if (be_cmd_txq_create(adapter, q, cq))
1725                         goto err;
1726         }
1727         return 0;
1728
1729 err:
1730         be_tx_queues_destroy(adapter);
1731         return -1;
1732 }
1733
1734 static void be_rx_queues_destroy(struct be_adapter *adapter)
1735 {
1736         struct be_queue_info *q;
1737         struct be_rx_obj *rxo;
1738         int i;
1739
1740         for_all_rx_queues(adapter, rxo, i) {
1741                 be_queue_free(adapter, &rxo->q);
1742
1743                 q = &rxo->cq;
1744                 if (q->created)
1745                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1746                 be_queue_free(adapter, q);
1747
1748                 q = &rxo->rx_eq.q;
1749                 if (q->created)
1750                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1751                 be_queue_free(adapter, q);
1752         }
1753 }
1754
1755 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1756 {
1757         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1758                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1759                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1760         } else {
1761                 dev_warn(&adapter->pdev->dev,
1762                         "No support for multiple RX queues\n");
1763                 return 1;
1764         }
1765 }
1766
1767 static int be_rx_queues_create(struct be_adapter *adapter)
1768 {
1769         struct be_queue_info *eq, *q, *cq;
1770         struct be_rx_obj *rxo;
1771         int rc, i;
1772
1773         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1774                                 msix_enabled(adapter) ?
1775                                         adapter->num_msix_vec - 1 : 1);
1776         if (adapter->num_rx_qs != MAX_RX_QS)
1777                 dev_warn(&adapter->pdev->dev,
1778                         "Can create only %d RX queues", adapter->num_rx_qs);
1779
1780         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1781         for_all_rx_queues(adapter, rxo, i) {
1782                 rxo->adapter = adapter;
1783                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1784                 rxo->rx_eq.enable_aic = true;
1785
1786                 /* EQ */
1787                 eq = &rxo->rx_eq.q;
1788                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1789                                         sizeof(struct be_eq_entry));
1790                 if (rc)
1791                         goto err;
1792
1793                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1794                 if (rc)
1795                         goto err;
1796
1797                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1798
1799                 /* CQ */
1800                 cq = &rxo->cq;
1801                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1802                                 sizeof(struct be_eth_rx_compl));
1803                 if (rc)
1804                         goto err;
1805
1806                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1807                 if (rc)
1808                         goto err;
1809
1810                 /* Rx Q - will be created in be_open() */
1811                 q = &rxo->q;
1812                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1813                                 sizeof(struct be_eth_rx_d));
1814                 if (rc)
1815                         goto err;
1816
1817         }
1818
1819         return 0;
1820 err:
1821         be_rx_queues_destroy(adapter);
1822         return -1;
1823 }
1824
1825 static bool event_peek(struct be_eq_obj *eq_obj)
1826 {
1827         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1828         if (!eqe->evt)
1829                 return false;
1830         else
1831                 return true;
1832 }
1833
1834 static irqreturn_t be_intx(int irq, void *dev)
1835 {
1836         struct be_adapter *adapter = dev;
1837         struct be_rx_obj *rxo;
1838         int isr, i, tx = 0 , rx = 0;
1839
1840         if (lancer_chip(adapter)) {
1841                 if (event_peek(&adapter->tx_eq))
1842                         tx = event_handle(adapter, &adapter->tx_eq, false);
1843                 for_all_rx_queues(adapter, rxo, i) {
1844                         if (event_peek(&rxo->rx_eq))
1845                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1846                 }
1847
1848                 if (!(tx || rx))
1849                         return IRQ_NONE;
1850
1851         } else {
1852                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1853                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1854                 if (!isr)
1855                         return IRQ_NONE;
1856
1857                 if ((1 << adapter->tx_eq.eq_idx & isr))
1858                         event_handle(adapter, &adapter->tx_eq, false);
1859
1860                 for_all_rx_queues(adapter, rxo, i) {
1861                         if ((1 << rxo->rx_eq.eq_idx & isr))
1862                                 event_handle(adapter, &rxo->rx_eq, true);
1863                 }
1864         }
1865
1866         return IRQ_HANDLED;
1867 }
1868
1869 static irqreturn_t be_msix_rx(int irq, void *dev)
1870 {
1871         struct be_rx_obj *rxo = dev;
1872         struct be_adapter *adapter = rxo->adapter;
1873
1874         event_handle(adapter, &rxo->rx_eq, true);
1875
1876         return IRQ_HANDLED;
1877 }
1878
1879 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1880 {
1881         struct be_adapter *adapter = dev;
1882
1883         event_handle(adapter, &adapter->tx_eq, false);
1884
1885         return IRQ_HANDLED;
1886 }
1887
1888 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1889 {
1890         return (rxcp->tcpf && !rxcp->err) ? true : false;
1891 }
1892
1893 static int be_poll_rx(struct napi_struct *napi, int budget)
1894 {
1895         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1896         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1897         struct be_adapter *adapter = rxo->adapter;
1898         struct be_queue_info *rx_cq = &rxo->cq;
1899         struct be_rx_compl_info *rxcp;
1900         u32 work_done;
1901
1902         rxo->stats.rx_polls++;
1903         for (work_done = 0; work_done < budget; work_done++) {
1904                 rxcp = be_rx_compl_get(rxo);
1905                 if (!rxcp)
1906                         break;
1907
1908                 /* Ignore flush completions */
1909                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1910                         if (do_gro(rxcp))
1911                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1912                         else
1913                                 be_rx_compl_process(adapter, rxo, rxcp);
1914                 } else if (rxcp->pkt_size == 0) {
1915                         be_rx_compl_discard(adapter, rxo, rxcp);
1916                 }
1917
1918                 be_rx_stats_update(rxo, rxcp);
1919         }
1920
1921         /* Refill the queue */
1922         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1923                 be_post_rx_frags(rxo, GFP_ATOMIC);
1924
1925         /* All consumed */
1926         if (work_done < budget) {
1927                 napi_complete(napi);
1928                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1929         } else {
1930                 /* More to be consumed; continue with interrupts disabled */
1931                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1932         }
1933         return work_done;
1934 }
1935
1936 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1937  * For TX/MCC we don't honour budget; consume everything
1938  */
1939 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1940 {
1941         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1942         struct be_adapter *adapter =
1943                 container_of(tx_eq, struct be_adapter, tx_eq);
1944         struct be_tx_obj *txo;
1945         struct be_eth_tx_compl *txcp;
1946         int tx_compl, mcc_compl, status = 0;
1947         u8 i;
1948         u16 num_wrbs;
1949
1950         for_all_tx_queues(adapter, txo, i) {
1951                 tx_compl = 0;
1952                 num_wrbs = 0;
1953                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1954                         num_wrbs += be_tx_compl_process(adapter, txo,
1955                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1956                                         wrb_index, txcp));
1957                         tx_compl++;
1958                 }
1959                 if (tx_compl) {
1960                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1961
1962                         atomic_sub(num_wrbs, &txo->q.used);
1963
1964                         /* As Tx wrbs have been freed up, wake up netdev queue
1965                          * if it was stopped due to lack of tx wrbs.  */
1966                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1967                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1968                                 netif_wake_subqueue(adapter->netdev, i);
1969                         }
1970
1971                         adapter->drv_stats.be_tx_events++;
1972                         txo->stats.be_tx_compl += tx_compl;
1973                 }
1974         }
1975
1976         mcc_compl = be_process_mcc(adapter, &status);
1977
1978         if (mcc_compl) {
1979                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1980                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1981         }
1982
1983         napi_complete(napi);
1984
1985         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1986         return 1;
1987 }
1988
1989 void be_detect_dump_ue(struct be_adapter *adapter)
1990 {
1991         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1992         u32 i;
1993
1994         pci_read_config_dword(adapter->pdev,
1995                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1996         pci_read_config_dword(adapter->pdev,
1997                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1998         pci_read_config_dword(adapter->pdev,
1999                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2000         pci_read_config_dword(adapter->pdev,
2001                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2002
2003         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2004         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2005
2006         if (ue_status_lo || ue_status_hi) {
2007                 adapter->ue_detected = true;
2008                 adapter->eeh_err = true;
2009                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2010         }
2011
2012         if (ue_status_lo) {
2013                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2014                         if (ue_status_lo & 1)
2015                                 dev_err(&adapter->pdev->dev,
2016                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2017                 }
2018         }
2019         if (ue_status_hi) {
2020                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2021                         if (ue_status_hi & 1)
2022                                 dev_err(&adapter->pdev->dev,
2023                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2024                 }
2025         }
2026
2027 }
2028
2029 static void be_worker(struct work_struct *work)
2030 {
2031         struct be_adapter *adapter =
2032                 container_of(work, struct be_adapter, work.work);
2033         struct be_rx_obj *rxo;
2034         struct be_tx_obj *txo;
2035         int i;
2036
2037         if (!adapter->ue_detected && !lancer_chip(adapter))
2038                 be_detect_dump_ue(adapter);
2039
2040         /* when interrupts are not yet enabled, just reap any pending
2041         * mcc completions */
2042         if (!netif_running(adapter->netdev)) {
2043                 int mcc_compl, status = 0;
2044
2045                 mcc_compl = be_process_mcc(adapter, &status);
2046
2047                 if (mcc_compl) {
2048                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2049                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2050                 }
2051
2052                 goto reschedule;
2053         }
2054
2055         if (!adapter->stats_cmd_sent) {
2056                 if (lancer_chip(adapter))
2057                         lancer_cmd_get_pport_stats(adapter,
2058                                                 &adapter->stats_cmd);
2059                 else
2060                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2061         }
2062
2063         for_all_tx_queues(adapter, txo, i)
2064                 be_tx_rate_update(txo);
2065
2066         for_all_rx_queues(adapter, rxo, i) {
2067                 be_rx_rate_update(rxo);
2068                 be_rx_eqd_update(adapter, rxo);
2069
2070                 if (rxo->rx_post_starved) {
2071                         rxo->rx_post_starved = false;
2072                         be_post_rx_frags(rxo, GFP_KERNEL);
2073                 }
2074         }
2075
2076 reschedule:
2077         adapter->work_counter++;
2078         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2079 }
2080
2081 static void be_msix_disable(struct be_adapter *adapter)
2082 {
2083         if (msix_enabled(adapter)) {
2084                 pci_disable_msix(adapter->pdev);
2085                 adapter->num_msix_vec = 0;
2086         }
2087 }
2088
2089 static void be_msix_enable(struct be_adapter *adapter)
2090 {
2091 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2092         int i, status, num_vec;
2093
2094         num_vec = be_num_rxqs_want(adapter) + 1;
2095
2096         for (i = 0; i < num_vec; i++)
2097                 adapter->msix_entries[i].entry = i;
2098
2099         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2100         if (status == 0) {
2101                 goto done;
2102         } else if (status >= BE_MIN_MSIX_VECTORS) {
2103                 num_vec = status;
2104                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2105                                 num_vec) == 0)
2106                         goto done;
2107         }
2108         return;
2109 done:
2110         adapter->num_msix_vec = num_vec;
2111         return;
2112 }
2113
2114 static void be_sriov_enable(struct be_adapter *adapter)
2115 {
2116         be_check_sriov_fn_type(adapter);
2117 #ifdef CONFIG_PCI_IOV
2118         if (be_physfn(adapter) && num_vfs) {
2119                 int status, pos;
2120                 u16 nvfs;
2121
2122                 pos = pci_find_ext_capability(adapter->pdev,
2123                                                 PCI_EXT_CAP_ID_SRIOV);
2124                 pci_read_config_word(adapter->pdev,
2125                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2126
2127                 if (num_vfs > nvfs) {
2128                         dev_info(&adapter->pdev->dev,
2129                                         "Device supports %d VFs and not %d\n",
2130                                         nvfs, num_vfs);
2131                         num_vfs = nvfs;
2132                 }
2133
2134                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2135                 adapter->sriov_enabled = status ? false : true;
2136         }
2137 #endif
2138 }
2139
2140 static void be_sriov_disable(struct be_adapter *adapter)
2141 {
2142 #ifdef CONFIG_PCI_IOV
2143         if (adapter->sriov_enabled) {
2144                 pci_disable_sriov(adapter->pdev);
2145                 adapter->sriov_enabled = false;
2146         }
2147 #endif
2148 }
2149
2150 static inline int be_msix_vec_get(struct be_adapter *adapter,
2151                                         struct be_eq_obj *eq_obj)
2152 {
2153         return adapter->msix_entries[eq_obj->eq_idx].vector;
2154 }
2155
2156 static int be_request_irq(struct be_adapter *adapter,
2157                 struct be_eq_obj *eq_obj,
2158                 void *handler, char *desc, void *context)
2159 {
2160         struct net_device *netdev = adapter->netdev;
2161         int vec;
2162
2163         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2164         vec = be_msix_vec_get(adapter, eq_obj);
2165         return request_irq(vec, handler, 0, eq_obj->desc, context);
2166 }
2167
2168 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2169                         void *context)
2170 {
2171         int vec = be_msix_vec_get(adapter, eq_obj);
2172         free_irq(vec, context);
2173 }
2174
2175 static int be_msix_register(struct be_adapter *adapter)
2176 {
2177         struct be_rx_obj *rxo;
2178         int status, i;
2179         char qname[10];
2180
2181         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2182                                 adapter);
2183         if (status)
2184                 goto err;
2185
2186         for_all_rx_queues(adapter, rxo, i) {
2187                 sprintf(qname, "rxq%d", i);
2188                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2189                                 qname, rxo);
2190                 if (status)
2191                         goto err_msix;
2192         }
2193
2194         return 0;
2195
2196 err_msix:
2197         be_free_irq(adapter, &adapter->tx_eq, adapter);
2198
2199         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2200                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2201
2202 err:
2203         dev_warn(&adapter->pdev->dev,
2204                 "MSIX Request IRQ failed - err %d\n", status);
2205         be_msix_disable(adapter);
2206         return status;
2207 }
2208
2209 static int be_irq_register(struct be_adapter *adapter)
2210 {
2211         struct net_device *netdev = adapter->netdev;
2212         int status;
2213
2214         if (msix_enabled(adapter)) {
2215                 status = be_msix_register(adapter);
2216                 if (status == 0)
2217                         goto done;
2218                 /* INTx is not supported for VF */
2219                 if (!be_physfn(adapter))
2220                         return status;
2221         }
2222
2223         /* INTx */
2224         netdev->irq = adapter->pdev->irq;
2225         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2226                         adapter);
2227         if (status) {
2228                 dev_err(&adapter->pdev->dev,
2229                         "INTx request IRQ failed - err %d\n", status);
2230                 return status;
2231         }
2232 done:
2233         adapter->isr_registered = true;
2234         return 0;
2235 }
2236
2237 static void be_irq_unregister(struct be_adapter *adapter)
2238 {
2239         struct net_device *netdev = adapter->netdev;
2240         struct be_rx_obj *rxo;
2241         int i;
2242
2243         if (!adapter->isr_registered)
2244                 return;
2245
2246         /* INTx */
2247         if (!msix_enabled(adapter)) {
2248                 free_irq(netdev->irq, adapter);
2249                 goto done;
2250         }
2251
2252         /* MSIx */
2253         be_free_irq(adapter, &adapter->tx_eq, adapter);
2254
2255         for_all_rx_queues(adapter, rxo, i)
2256                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2257
2258 done:
2259         adapter->isr_registered = false;
2260 }
2261
2262 static void be_rx_queues_clear(struct be_adapter *adapter)
2263 {
2264         struct be_queue_info *q;
2265         struct be_rx_obj *rxo;
2266         int i;
2267
2268         for_all_rx_queues(adapter, rxo, i) {
2269                 q = &rxo->q;
2270                 if (q->created) {
2271                         be_cmd_rxq_destroy(adapter, q);
2272                         /* After the rxq is invalidated, wait for a grace time
2273                          * of 1ms for all dma to end and the flush compl to
2274                          * arrive
2275                          */
2276                         mdelay(1);
2277                         be_rx_q_clean(adapter, rxo);
2278                 }
2279
2280                 /* Clear any residual events */
2281                 q = &rxo->rx_eq.q;
2282                 if (q->created)
2283                         be_eq_clean(adapter, &rxo->rx_eq);
2284         }
2285 }
2286
2287 static int be_close(struct net_device *netdev)
2288 {
2289         struct be_adapter *adapter = netdev_priv(netdev);
2290         struct be_rx_obj *rxo;
2291         struct be_tx_obj *txo;
2292         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2293         int vec, i;
2294
2295         be_async_mcc_disable(adapter);
2296
2297         netif_carrier_off(netdev);
2298         adapter->link_up = false;
2299
2300         if (!lancer_chip(adapter))
2301                 be_intr_set(adapter, false);
2302
2303         for_all_rx_queues(adapter, rxo, i)
2304                 napi_disable(&rxo->rx_eq.napi);
2305
2306         napi_disable(&tx_eq->napi);
2307
2308         if (lancer_chip(adapter)) {
2309                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2310                 for_all_rx_queues(adapter, rxo, i)
2311                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2312                 for_all_tx_queues(adapter, txo, i)
2313                          be_cq_notify(adapter, txo->cq.id, false, 0);
2314         }
2315
2316         if (msix_enabled(adapter)) {
2317                 vec = be_msix_vec_get(adapter, tx_eq);
2318                 synchronize_irq(vec);
2319
2320                 for_all_rx_queues(adapter, rxo, i) {
2321                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2322                         synchronize_irq(vec);
2323                 }
2324         } else {
2325                 synchronize_irq(netdev->irq);
2326         }
2327         be_irq_unregister(adapter);
2328
2329         /* Wait for all pending tx completions to arrive so that
2330          * all tx skbs are freed.
2331          */
2332         for_all_tx_queues(adapter, txo, i)
2333                 be_tx_compl_clean(adapter, txo);
2334
2335         be_rx_queues_clear(adapter);
2336         return 0;
2337 }
2338
2339 static int be_rx_queues_setup(struct be_adapter *adapter)
2340 {
2341         struct be_rx_obj *rxo;
2342         int rc, i;
2343         u8 rsstable[MAX_RSS_QS];
2344
2345         for_all_rx_queues(adapter, rxo, i) {
2346                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2347                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2348                         adapter->if_handle,
2349                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2350                 if (rc)
2351                         return rc;
2352         }
2353
2354         if (be_multi_rxq(adapter)) {
2355                 for_all_rss_queues(adapter, rxo, i)
2356                         rsstable[i] = rxo->rss_id;
2357
2358                 rc = be_cmd_rss_config(adapter, rsstable,
2359                         adapter->num_rx_qs - 1);
2360                 if (rc)
2361                         return rc;
2362         }
2363
2364         /* First time posting */
2365         for_all_rx_queues(adapter, rxo, i) {
2366                 be_post_rx_frags(rxo, GFP_KERNEL);
2367                 napi_enable(&rxo->rx_eq.napi);
2368         }
2369         return 0;
2370 }
2371
2372 static int be_open(struct net_device *netdev)
2373 {
2374         struct be_adapter *adapter = netdev_priv(netdev);
2375         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2376         struct be_rx_obj *rxo;
2377         bool link_up;
2378         int status, i;
2379         u8 mac_speed;
2380         u16 link_speed;
2381
2382         status = be_rx_queues_setup(adapter);
2383         if (status)
2384                 goto err;
2385
2386         napi_enable(&tx_eq->napi);
2387
2388         be_irq_register(adapter);
2389
2390         if (!lancer_chip(adapter))
2391                 be_intr_set(adapter, true);
2392
2393         /* The evt queues are created in unarmed state; arm them */
2394         for_all_rx_queues(adapter, rxo, i) {
2395                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2396                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2397         }
2398         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2399
2400         /* Now that interrupts are on we can process async mcc */
2401         be_async_mcc_enable(adapter);
2402
2403         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2404                         &link_speed, 0);
2405         if (status)
2406                 goto err;
2407         be_link_status_update(adapter, link_up);
2408
2409         if (be_physfn(adapter)) {
2410                 status = be_vid_config(adapter, false, 0);
2411                 if (status)
2412                         goto err;
2413
2414                 status = be_cmd_set_flow_control(adapter,
2415                                 adapter->tx_fc, adapter->rx_fc);
2416                 if (status)
2417                         goto err;
2418         }
2419
2420         return 0;
2421 err:
2422         be_close(adapter->netdev);
2423         return -EIO;
2424 }
2425
2426 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2427 {
2428         struct be_dma_mem cmd;
2429         int status = 0;
2430         u8 mac[ETH_ALEN];
2431
2432         memset(mac, 0, ETH_ALEN);
2433
2434         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2435         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2436                                     GFP_KERNEL);
2437         if (cmd.va == NULL)
2438                 return -1;
2439         memset(cmd.va, 0, cmd.size);
2440
2441         if (enable) {
2442                 status = pci_write_config_dword(adapter->pdev,
2443                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2444                 if (status) {
2445                         dev_err(&adapter->pdev->dev,
2446                                 "Could not enable Wake-on-lan\n");
2447                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2448                                           cmd.dma);
2449                         return status;
2450                 }
2451                 status = be_cmd_enable_magic_wol(adapter,
2452                                 adapter->netdev->dev_addr, &cmd);
2453                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2454                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2455         } else {
2456                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2457                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2458                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2459         }
2460
2461         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2462         return status;
2463 }
2464
2465 /*
2466  * Generate a seed MAC address from the PF MAC Address using jhash.
2467  * MAC Address for VFs are assigned incrementally starting from the seed.
2468  * These addresses are programmed in the ASIC by the PF and the VF driver
2469  * queries for the MAC address during its probe.
2470  */
2471 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2472 {
2473         u32 vf = 0;
2474         int status = 0;
2475         u8 mac[ETH_ALEN];
2476
2477         be_vf_eth_addr_generate(adapter, mac);
2478
2479         for (vf = 0; vf < num_vfs; vf++) {
2480                 status = be_cmd_pmac_add(adapter, mac,
2481                                         adapter->vf_cfg[vf].vf_if_handle,
2482                                         &adapter->vf_cfg[vf].vf_pmac_id,
2483                                         vf + 1);
2484                 if (status)
2485                         dev_err(&adapter->pdev->dev,
2486                                 "Mac address add failed for VF %d\n", vf);
2487                 else
2488                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2489
2490                 mac[5] += 1;
2491         }
2492         return status;
2493 }
2494
2495 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2496 {
2497         u32 vf;
2498
2499         for (vf = 0; vf < num_vfs; vf++) {
2500                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2501                         be_cmd_pmac_del(adapter,
2502                                         adapter->vf_cfg[vf].vf_if_handle,
2503                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2504         }
2505 }
2506
2507 static int be_setup(struct be_adapter *adapter)
2508 {
2509         struct net_device *netdev = adapter->netdev;
2510         u32 cap_flags, en_flags, vf = 0;
2511         int status;
2512         u8 mac[ETH_ALEN];
2513
2514         be_cmd_req_native_mode(adapter);
2515
2516         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2517                                 BE_IF_FLAGS_BROADCAST |
2518                                 BE_IF_FLAGS_MULTICAST;
2519
2520         if (be_physfn(adapter)) {
2521                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2522                                 BE_IF_FLAGS_PROMISCUOUS |
2523                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2524                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2525
2526                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2527                         cap_flags |= BE_IF_FLAGS_RSS;
2528                         en_flags |= BE_IF_FLAGS_RSS;
2529                 }
2530         }
2531
2532         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2533                         netdev->dev_addr, false/* pmac_invalid */,
2534                         &adapter->if_handle, &adapter->pmac_id, 0);
2535         if (status != 0)
2536                 goto do_none;
2537
2538         if (be_physfn(adapter)) {
2539                 if (adapter->sriov_enabled) {
2540                         while (vf < num_vfs) {
2541                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2542                                                         BE_IF_FLAGS_BROADCAST;
2543                                 status = be_cmd_if_create(adapter, cap_flags,
2544                                         en_flags, mac, true,
2545                                         &adapter->vf_cfg[vf].vf_if_handle,
2546                                         NULL, vf+1);
2547                                 if (status) {
2548                                         dev_err(&adapter->pdev->dev,
2549                                         "Interface Create failed for VF %d\n",
2550                                         vf);
2551                                         goto if_destroy;
2552                                 }
2553                                 adapter->vf_cfg[vf].vf_pmac_id =
2554                                                         BE_INVALID_PMAC_ID;
2555                                 vf++;
2556                         }
2557                 }
2558         } else {
2559                 status = be_cmd_mac_addr_query(adapter, mac,
2560                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2561                 if (!status) {
2562                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2563                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2564                 }
2565         }
2566
2567         status = be_tx_queues_create(adapter);
2568         if (status != 0)
2569                 goto if_destroy;
2570
2571         status = be_rx_queues_create(adapter);
2572         if (status != 0)
2573                 goto tx_qs_destroy;
2574
2575         /* Allow all priorities by default. A GRP5 evt may modify this */
2576         adapter->vlan_prio_bmap = 0xff;
2577
2578         status = be_mcc_queues_create(adapter);
2579         if (status != 0)
2580                 goto rx_qs_destroy;
2581
2582         adapter->link_speed = -1;
2583
2584         return 0;
2585
2586 rx_qs_destroy:
2587         be_rx_queues_destroy(adapter);
2588 tx_qs_destroy:
2589         be_tx_queues_destroy(adapter);
2590 if_destroy:
2591         if (be_physfn(adapter) && adapter->sriov_enabled)
2592                 for (vf = 0; vf < num_vfs; vf++)
2593                         if (adapter->vf_cfg[vf].vf_if_handle)
2594                                 be_cmd_if_destroy(adapter,
2595                                         adapter->vf_cfg[vf].vf_if_handle,
2596                                         vf + 1);
2597         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2598 do_none:
2599         return status;
2600 }
2601
2602 static int be_clear(struct be_adapter *adapter)
2603 {
2604         int vf;
2605
2606         if (be_physfn(adapter) && adapter->sriov_enabled)
2607                 be_vf_eth_addr_rem(adapter);
2608
2609         be_mcc_queues_destroy(adapter);
2610         be_rx_queues_destroy(adapter);
2611         be_tx_queues_destroy(adapter);
2612         adapter->eq_next_idx = 0;
2613
2614         if (be_physfn(adapter) && adapter->sriov_enabled)
2615                 for (vf = 0; vf < num_vfs; vf++)
2616                         if (adapter->vf_cfg[vf].vf_if_handle)
2617                                 be_cmd_if_destroy(adapter,
2618                                         adapter->vf_cfg[vf].vf_if_handle,
2619                                         vf + 1);
2620
2621         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2622
2623         adapter->be3_native = 0;
2624
2625         /* tell fw we're done with firing cmds */
2626         be_cmd_fw_clean(adapter);
2627         return 0;
2628 }
2629
2630
2631 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2632 static bool be_flash_redboot(struct be_adapter *adapter,
2633                         const u8 *p, u32 img_start, int image_size,
2634                         int hdr_size)
2635 {
2636         u32 crc_offset;
2637         u8 flashed_crc[4];
2638         int status;
2639
2640         crc_offset = hdr_size + img_start + image_size - 4;
2641
2642         p += crc_offset;
2643
2644         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2645                         (image_size - 4));
2646         if (status) {
2647                 dev_err(&adapter->pdev->dev,
2648                 "could not get crc from flash, not flashing redboot\n");
2649                 return false;
2650         }
2651
2652         /*update redboot only if crc does not match*/
2653         if (!memcmp(flashed_crc, p, 4))
2654                 return false;
2655         else
2656                 return true;
2657 }
2658
2659 static int be_flash_data(struct be_adapter *adapter,
2660                         const struct firmware *fw,
2661                         struct be_dma_mem *flash_cmd, int num_of_images)
2662
2663 {
2664         int status = 0, i, filehdr_size = 0;
2665         u32 total_bytes = 0, flash_op;
2666         int num_bytes;
2667         const u8 *p = fw->data;
2668         struct be_cmd_write_flashrom *req = flash_cmd->va;
2669         const struct flash_comp *pflashcomp;
2670         int num_comp;
2671
2672         static const struct flash_comp gen3_flash_types[9] = {
2673                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2674                         FLASH_IMAGE_MAX_SIZE_g3},
2675                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2676                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2677                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2678                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2679                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2680                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2681                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2682                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2683                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2684                         FLASH_IMAGE_MAX_SIZE_g3},
2685                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2686                         FLASH_IMAGE_MAX_SIZE_g3},
2687                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2688                         FLASH_IMAGE_MAX_SIZE_g3},
2689                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2690                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2691         };
2692         static const struct flash_comp gen2_flash_types[8] = {
2693                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2694                         FLASH_IMAGE_MAX_SIZE_g2},
2695                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2696                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2697                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2698                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2699                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2700                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2701                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2702                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2703                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2704                         FLASH_IMAGE_MAX_SIZE_g2},
2705                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2706                         FLASH_IMAGE_MAX_SIZE_g2},
2707                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2708                          FLASH_IMAGE_MAX_SIZE_g2}
2709         };
2710
2711         if (adapter->generation == BE_GEN3) {
2712                 pflashcomp = gen3_flash_types;
2713                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2714                 num_comp = ARRAY_SIZE(gen3_flash_types);
2715         } else {
2716                 pflashcomp = gen2_flash_types;
2717                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2718                 num_comp = ARRAY_SIZE(gen2_flash_types);
2719         }
2720         for (i = 0; i < num_comp; i++) {
2721                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2722                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2723                         continue;
2724                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2725                         (!be_flash_redboot(adapter, fw->data,
2726                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2727                         (num_of_images * sizeof(struct image_hdr)))))
2728                         continue;
2729                 p = fw->data;
2730                 p += filehdr_size + pflashcomp[i].offset
2731                         + (num_of_images * sizeof(struct image_hdr));
2732         if (p + pflashcomp[i].size > fw->data + fw->size)
2733                 return -1;
2734         total_bytes = pflashcomp[i].size;
2735                 while (total_bytes) {
2736                         if (total_bytes > 32*1024)
2737                                 num_bytes = 32*1024;
2738                         else
2739                                 num_bytes = total_bytes;
2740                         total_bytes -= num_bytes;
2741
2742                         if (!total_bytes)
2743                                 flash_op = FLASHROM_OPER_FLASH;
2744                         else
2745                                 flash_op = FLASHROM_OPER_SAVE;
2746                         memcpy(req->params.data_buf, p, num_bytes);
2747                         p += num_bytes;
2748                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2749                                 pflashcomp[i].optype, flash_op, num_bytes);
2750                         if (status) {
2751                                 dev_err(&adapter->pdev->dev,
2752                                         "cmd to write to flash rom failed.\n");
2753                                 return -1;
2754                         }
2755                 }
2756         }
2757         return 0;
2758 }
2759
2760 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2761 {
2762         if (fhdr == NULL)
2763                 return 0;
2764         if (fhdr->build[0] == '3')
2765                 return BE_GEN3;
2766         else if (fhdr->build[0] == '2')
2767                 return BE_GEN2;
2768         else
2769                 return 0;
2770 }
2771
2772 static int lancer_fw_download(struct be_adapter *adapter,
2773                                 const struct firmware *fw)
2774 {
2775 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2776 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2777         struct be_dma_mem flash_cmd;
2778         const u8 *data_ptr = NULL;
2779         u8 *dest_image_ptr = NULL;
2780         size_t image_size = 0;
2781         u32 chunk_size = 0;
2782         u32 data_written = 0;
2783         u32 offset = 0;
2784         int status = 0;
2785         u8 add_status = 0;
2786
2787         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2788                 dev_err(&adapter->pdev->dev,
2789                         "FW Image not properly aligned. "
2790                         "Length must be 4 byte aligned.\n");
2791                 status = -EINVAL;
2792                 goto lancer_fw_exit;
2793         }
2794
2795         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2796                                 + LANCER_FW_DOWNLOAD_CHUNK;
2797         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2798                                                 &flash_cmd.dma, GFP_KERNEL);
2799         if (!flash_cmd.va) {
2800                 status = -ENOMEM;
2801                 dev_err(&adapter->pdev->dev,
2802                         "Memory allocation failure while flashing\n");
2803                 goto lancer_fw_exit;
2804         }
2805
2806         dest_image_ptr = flash_cmd.va +
2807                                 sizeof(struct lancer_cmd_req_write_object);
2808         image_size = fw->size;
2809         data_ptr = fw->data;
2810
2811         while (image_size) {
2812                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2813
2814                 /* Copy the image chunk content. */
2815                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2816
2817                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2818                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2819                                 &data_written, &add_status);
2820
2821                 if (status)
2822                         break;
2823
2824                 offset += data_written;
2825                 data_ptr += data_written;
2826                 image_size -= data_written;
2827         }
2828
2829         if (!status) {
2830                 /* Commit the FW written */
2831                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2832                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2833                                         &data_written, &add_status);
2834         }
2835
2836         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2837                                 flash_cmd.dma);
2838         if (status) {
2839                 dev_err(&adapter->pdev->dev,
2840                         "Firmware load error. "
2841                         "Status code: 0x%x Additional Status: 0x%x\n",
2842                         status, add_status);
2843                 goto lancer_fw_exit;
2844         }
2845
2846         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2847 lancer_fw_exit:
2848         return status;
2849 }
2850
2851 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2852 {
2853         struct flash_file_hdr_g2 *fhdr;
2854         struct flash_file_hdr_g3 *fhdr3;
2855         struct image_hdr *img_hdr_ptr = NULL;
2856         struct be_dma_mem flash_cmd;
2857         const u8 *p;
2858         int status = 0, i = 0, num_imgs = 0;
2859
2860         p = fw->data;
2861         fhdr = (struct flash_file_hdr_g2 *) p;
2862
2863         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2864         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2865                                           &flash_cmd.dma, GFP_KERNEL);
2866         if (!flash_cmd.va) {
2867                 status = -ENOMEM;
2868                 dev_err(&adapter->pdev->dev,
2869                         "Memory allocation failure while flashing\n");
2870                 goto be_fw_exit;
2871         }
2872
2873         if ((adapter->generation == BE_GEN3) &&
2874                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2875                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2876                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2877                 for (i = 0; i < num_imgs; i++) {
2878                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2879                                         (sizeof(struct flash_file_hdr_g3) +
2880                                          i * sizeof(struct image_hdr)));
2881                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2882                                 status = be_flash_data(adapter, fw, &flash_cmd,
2883                                                         num_imgs);
2884                 }
2885         } else if ((adapter->generation == BE_GEN2) &&
2886                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2887                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2888         } else {
2889                 dev_err(&adapter->pdev->dev,
2890                         "UFI and Interface are not compatible for flashing\n");
2891                 status = -1;
2892         }
2893
2894         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2895                           flash_cmd.dma);
2896         if (status) {
2897                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2898                 goto be_fw_exit;
2899         }
2900
2901         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2902
2903 be_fw_exit:
2904         return status;
2905 }
2906
2907 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2908 {
2909         const struct firmware *fw;
2910         int status;
2911
2912         if (!netif_running(adapter->netdev)) {
2913                 dev_err(&adapter->pdev->dev,
2914                         "Firmware load not allowed (interface is down)\n");
2915                 return -1;
2916         }
2917
2918         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2919         if (status)
2920                 goto fw_exit;
2921
2922         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2923
2924         if (lancer_chip(adapter))
2925                 status = lancer_fw_download(adapter, fw);
2926         else
2927                 status = be_fw_download(adapter, fw);
2928
2929 fw_exit:
2930         release_firmware(fw);
2931         return status;
2932 }
2933
2934 static struct net_device_ops be_netdev_ops = {
2935         .ndo_open               = be_open,
2936         .ndo_stop               = be_close,
2937         .ndo_start_xmit         = be_xmit,
2938         .ndo_set_rx_mode        = be_set_multicast_list,
2939         .ndo_set_mac_address    = be_mac_addr_set,
2940         .ndo_change_mtu         = be_change_mtu,
2941         .ndo_validate_addr      = eth_validate_addr,
2942         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2943         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2944         .ndo_set_vf_mac         = be_set_vf_mac,
2945         .ndo_set_vf_vlan        = be_set_vf_vlan,
2946         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2947         .ndo_get_vf_config      = be_get_vf_config
2948 };
2949
2950 static void be_netdev_init(struct net_device *netdev)
2951 {
2952         struct be_adapter *adapter = netdev_priv(netdev);
2953         struct be_rx_obj *rxo;
2954         int i;
2955
2956         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2957                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2958                 NETIF_F_HW_VLAN_TX;
2959         if (be_multi_rxq(adapter))
2960                 netdev->hw_features |= NETIF_F_RXHASH;
2961
2962         netdev->features |= netdev->hw_features |
2963                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2964
2965         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2966                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2967
2968         netdev->flags |= IFF_MULTICAST;
2969
2970         /* Default settings for Rx and Tx flow control */
2971         adapter->rx_fc = true;
2972         adapter->tx_fc = true;
2973
2974         netif_set_gso_max_size(netdev, 65535);
2975
2976         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2977
2978         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2979
2980         for_all_rx_queues(adapter, rxo, i)
2981                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2982                                 BE_NAPI_WEIGHT);
2983
2984         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2985                 BE_NAPI_WEIGHT);
2986 }
2987
2988 static void be_unmap_pci_bars(struct be_adapter *adapter)
2989 {
2990         if (adapter->csr)
2991                 iounmap(adapter->csr);
2992         if (adapter->db)
2993                 iounmap(adapter->db);
2994         if (adapter->pcicfg && be_physfn(adapter))
2995                 iounmap(adapter->pcicfg);
2996 }
2997
2998 static int be_map_pci_bars(struct be_adapter *adapter)
2999 {
3000         u8 __iomem *addr;
3001         int pcicfg_reg, db_reg;
3002
3003         if (lancer_chip(adapter)) {
3004                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3005                         pci_resource_len(adapter->pdev, 0));
3006                 if (addr == NULL)
3007                         return -ENOMEM;
3008                 adapter->db = addr;
3009                 return 0;
3010         }
3011
3012         if (be_physfn(adapter)) {
3013                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3014                                 pci_resource_len(adapter->pdev, 2));
3015                 if (addr == NULL)
3016                         return -ENOMEM;
3017                 adapter->csr = addr;
3018         }
3019
3020         if (adapter->generation == BE_GEN2) {
3021                 pcicfg_reg = 1;
3022                 db_reg = 4;
3023         } else {
3024                 pcicfg_reg = 0;
3025                 if (be_physfn(adapter))
3026                         db_reg = 4;
3027                 else
3028                         db_reg = 0;
3029         }
3030         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3031                                 pci_resource_len(adapter->pdev, db_reg));
3032         if (addr == NULL)
3033                 goto pci_map_err;
3034         adapter->db = addr;
3035
3036         if (be_physfn(adapter)) {
3037                 addr = ioremap_nocache(
3038                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3039                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3040                 if (addr == NULL)
3041                         goto pci_map_err;
3042                 adapter->pcicfg = addr;
3043         } else
3044                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3045
3046         return 0;
3047 pci_map_err:
3048         be_unmap_pci_bars(adapter);
3049         return -ENOMEM;
3050 }
3051
3052
3053 static void be_ctrl_cleanup(struct be_adapter *adapter)
3054 {
3055         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3056
3057         be_unmap_pci_bars(adapter);
3058
3059         if (mem->va)
3060                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3061                                   mem->dma);
3062
3063         mem = &adapter->mc_cmd_mem;
3064         if (mem->va)
3065                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3066                                   mem->dma);
3067 }
3068
3069 static int be_ctrl_init(struct be_adapter *adapter)
3070 {
3071         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3072         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3073         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3074         int status;
3075
3076         status = be_map_pci_bars(adapter);
3077         if (status)
3078                 goto done;
3079
3080         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3081         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3082                                                 mbox_mem_alloc->size,
3083                                                 &mbox_mem_alloc->dma,
3084                                                 GFP_KERNEL);
3085         if (!mbox_mem_alloc->va) {
3086                 status = -ENOMEM;
3087                 goto unmap_pci_bars;
3088         }
3089
3090         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3091         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3092         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3093         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3094
3095         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3096         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3097                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
3098                                             GFP_KERNEL);
3099         if (mc_cmd_mem->va == NULL) {
3100                 status = -ENOMEM;
3101                 goto free_mbox;
3102         }
3103         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3104
3105         mutex_init(&adapter->mbox_lock);
3106         spin_lock_init(&adapter->mcc_lock);
3107         spin_lock_init(&adapter->mcc_cq_lock);
3108
3109         init_completion(&adapter->flash_compl);
3110         pci_save_state(adapter->pdev);
3111         return 0;
3112
3113 free_mbox:
3114         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3115                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3116
3117 unmap_pci_bars:
3118         be_unmap_pci_bars(adapter);
3119
3120 done:
3121         return status;
3122 }
3123
3124 static void be_stats_cleanup(struct be_adapter *adapter)
3125 {
3126         struct be_dma_mem *cmd = &adapter->stats_cmd;
3127
3128         if (cmd->va)
3129                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3130                                   cmd->va, cmd->dma);
3131 }
3132
3133 static int be_stats_init(struct be_adapter *adapter)
3134 {
3135         struct be_dma_mem *cmd = &adapter->stats_cmd;
3136
3137         if (adapter->generation == BE_GEN2) {
3138                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3139         } else {
3140                 if (lancer_chip(adapter))
3141                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3142                 else
3143                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3144         }
3145         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3146                                      GFP_KERNEL);
3147         if (cmd->va == NULL)
3148                 return -1;
3149         memset(cmd->va, 0, cmd->size);
3150         return 0;
3151 }
3152
3153 static void __devexit be_remove(struct pci_dev *pdev)
3154 {
3155         struct be_adapter *adapter = pci_get_drvdata(pdev);
3156
3157         if (!adapter)
3158                 return;
3159
3160         cancel_delayed_work_sync(&adapter->work);
3161
3162         unregister_netdev(adapter->netdev);
3163
3164         be_clear(adapter);
3165
3166         be_stats_cleanup(adapter);
3167
3168         be_ctrl_cleanup(adapter);
3169
3170         kfree(adapter->vf_cfg);
3171         be_sriov_disable(adapter);
3172
3173         be_msix_disable(adapter);
3174
3175         pci_set_drvdata(pdev, NULL);
3176         pci_release_regions(pdev);
3177         pci_disable_device(pdev);
3178
3179         free_netdev(adapter->netdev);
3180 }
3181
3182 static int be_get_config(struct be_adapter *adapter)
3183 {
3184         int status;
3185         u8 mac[ETH_ALEN];
3186
3187         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3188         if (status)
3189                 return status;
3190
3191         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3192                         &adapter->function_mode, &adapter->function_caps);
3193         if (status)
3194                 return status;
3195
3196         memset(mac, 0, ETH_ALEN);
3197
3198         /* A default permanent address is given to each VF for Lancer*/
3199         if (be_physfn(adapter) || lancer_chip(adapter)) {
3200                 status = be_cmd_mac_addr_query(adapter, mac,
3201                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3202
3203                 if (status)
3204                         return status;
3205
3206                 if (!is_valid_ether_addr(mac))
3207                         return -EADDRNOTAVAIL;
3208
3209                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3210                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3211         }
3212
3213         if (adapter->function_mode & 0x400)
3214                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3215         else
3216                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3217
3218         status = be_cmd_get_cntl_attributes(adapter);
3219         if (status)
3220                 return status;
3221
3222         if ((num_vfs && adapter->sriov_enabled) ||
3223                 (adapter->function_mode & 0x400) ||
3224                 lancer_chip(adapter) || !be_physfn(adapter)) {
3225                 adapter->num_tx_qs = 1;
3226                 netif_set_real_num_tx_queues(adapter->netdev,
3227                         adapter->num_tx_qs);
3228         } else {
3229                 adapter->num_tx_qs = MAX_TX_QS;
3230         }
3231
3232         return 0;
3233 }
3234
3235 static int be_dev_family_check(struct be_adapter *adapter)
3236 {
3237         struct pci_dev *pdev = adapter->pdev;
3238         u32 sli_intf = 0, if_type;
3239
3240         switch (pdev->device) {
3241         case BE_DEVICE_ID1:
3242         case OC_DEVICE_ID1:
3243                 adapter->generation = BE_GEN2;
3244                 break;
3245         case BE_DEVICE_ID2:
3246         case OC_DEVICE_ID2:
3247                 adapter->generation = BE_GEN3;
3248                 break;
3249         case OC_DEVICE_ID3:
3250         case OC_DEVICE_ID4:
3251                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3252                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3253                                                 SLI_INTF_IF_TYPE_SHIFT;
3254
3255                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3256                         if_type != 0x02) {
3257                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3258                         return -EINVAL;
3259                 }
3260                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3261                                          SLI_INTF_FAMILY_SHIFT);
3262                 adapter->generation = BE_GEN3;
3263                 break;
3264         default:
3265                 adapter->generation = 0;
3266         }
3267         return 0;
3268 }
3269
3270 static int lancer_wait_ready(struct be_adapter *adapter)
3271 {
3272 #define SLIPORT_READY_TIMEOUT 500
3273         u32 sliport_status;
3274         int status = 0, i;
3275
3276         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3277                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3278                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3279                         break;
3280
3281                 msleep(20);
3282         }
3283
3284         if (i == SLIPORT_READY_TIMEOUT)
3285                 status = -1;
3286
3287         return status;
3288 }
3289
3290 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3291 {
3292         int status;
3293         u32 sliport_status, err, reset_needed;
3294         status = lancer_wait_ready(adapter);
3295         if (!status) {
3296                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3297                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3298                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3299                 if (err && reset_needed) {
3300                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3301                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3302
3303                         /* check adapter has corrected the error */
3304                         status = lancer_wait_ready(adapter);
3305                         sliport_status = ioread32(adapter->db +
3306                                                         SLIPORT_STATUS_OFFSET);
3307                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3308                                                 SLIPORT_STATUS_RN_MASK);
3309                         if (status || sliport_status)
3310                                 status = -1;
3311                 } else if (err || reset_needed) {
3312                         status = -1;
3313                 }
3314         }
3315         return status;
3316 }
3317
3318 static int __devinit be_probe(struct pci_dev *pdev,
3319                         const struct pci_device_id *pdev_id)
3320 {
3321         int status = 0;
3322         struct be_adapter *adapter;
3323         struct net_device *netdev;
3324
3325         status = pci_enable_device(pdev);
3326         if (status)
3327                 goto do_none;
3328
3329         status = pci_request_regions(pdev, DRV_NAME);
3330         if (status)
3331                 goto disable_dev;
3332         pci_set_master(pdev);
3333
3334         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3335         if (netdev == NULL) {
3336                 status = -ENOMEM;
3337                 goto rel_reg;
3338         }
3339         adapter = netdev_priv(netdev);
3340         adapter->pdev = pdev;
3341         pci_set_drvdata(pdev, adapter);
3342
3343         status = be_dev_family_check(adapter);
3344         if (status)
3345                 goto free_netdev;
3346
3347         adapter->netdev = netdev;
3348         SET_NETDEV_DEV(netdev, &pdev->dev);
3349
3350         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3351         if (!status) {
3352                 netdev->features |= NETIF_F_HIGHDMA;
3353         } else {
3354                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3355                 if (status) {
3356                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3357                         goto free_netdev;
3358                 }
3359         }
3360
3361         be_sriov_enable(adapter);
3362         if (adapter->sriov_enabled) {
3363                 adapter->vf_cfg = kcalloc(num_vfs,
3364                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3365
3366                 if (!adapter->vf_cfg)
3367                         goto free_netdev;
3368         }
3369
3370         status = be_ctrl_init(adapter);
3371         if (status)
3372                 goto free_vf_cfg;
3373
3374         if (lancer_chip(adapter)) {
3375                 status = lancer_test_and_set_rdy_state(adapter);
3376                 if (status) {
3377                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3378                         goto ctrl_clean;
3379                 }
3380         }
3381
3382         /* sync up with fw's ready state */
3383         if (be_physfn(adapter)) {
3384                 status = be_cmd_POST(adapter);
3385                 if (status)
3386                         goto ctrl_clean;
3387         }
3388
3389         /* tell fw we're ready to fire cmds */
3390         status = be_cmd_fw_init(adapter);
3391         if (status)
3392                 goto ctrl_clean;
3393
3394         status = be_cmd_reset_function(adapter);
3395         if (status)
3396                 goto ctrl_clean;
3397
3398         status = be_stats_init(adapter);
3399         if (status)
3400                 goto ctrl_clean;
3401
3402         status = be_get_config(adapter);
3403         if (status)
3404                 goto stats_clean;
3405
3406         /* The INTR bit may be set in the card when probed by a kdump kernel
3407          * after a crash.
3408          */
3409         if (!lancer_chip(adapter))
3410                 be_intr_set(adapter, false);
3411
3412         be_msix_enable(adapter);
3413
3414         INIT_DELAYED_WORK(&adapter->work, be_worker);
3415
3416         status = be_setup(adapter);
3417         if (status)
3418                 goto msix_disable;
3419
3420         be_netdev_init(netdev);
3421         status = register_netdev(netdev);
3422         if (status != 0)
3423                 goto unsetup;
3424         netif_carrier_off(netdev);
3425
3426         if (be_physfn(adapter) && adapter->sriov_enabled) {
3427                 u8 mac_speed;
3428                 bool link_up;
3429                 u16 vf, lnk_speed;
3430
3431                 if (!lancer_chip(adapter)) {
3432                         status = be_vf_eth_addr_config(adapter);
3433                         if (status)
3434                                 goto unreg_netdev;
3435                 }
3436
3437                 for (vf = 0; vf < num_vfs; vf++) {
3438                         status = be_cmd_link_status_query(adapter, &link_up,
3439                                         &mac_speed, &lnk_speed, vf + 1);
3440                         if (!status)
3441                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3442                         else
3443                                 goto unreg_netdev;
3444                 }
3445         }
3446
3447         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3448
3449         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3450         return 0;
3451
3452 unreg_netdev:
3453         unregister_netdev(netdev);
3454 unsetup:
3455         be_clear(adapter);
3456 msix_disable:
3457         be_msix_disable(adapter);
3458 stats_clean:
3459         be_stats_cleanup(adapter);
3460 ctrl_clean:
3461         be_ctrl_cleanup(adapter);
3462 free_vf_cfg:
3463         kfree(adapter->vf_cfg);
3464 free_netdev:
3465         be_sriov_disable(adapter);
3466         free_netdev(netdev);
3467         pci_set_drvdata(pdev, NULL);
3468 rel_reg:
3469         pci_release_regions(pdev);
3470 disable_dev:
3471         pci_disable_device(pdev);
3472 do_none:
3473         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3474         return status;
3475 }
3476
3477 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3478 {
3479         struct be_adapter *adapter = pci_get_drvdata(pdev);
3480         struct net_device *netdev =  adapter->netdev;
3481
3482         cancel_delayed_work_sync(&adapter->work);
3483         if (adapter->wol)
3484                 be_setup_wol(adapter, true);
3485
3486         netif_device_detach(netdev);
3487         if (netif_running(netdev)) {
3488                 rtnl_lock();
3489                 be_close(netdev);
3490                 rtnl_unlock();
3491         }
3492         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3493         be_clear(adapter);
3494
3495         be_msix_disable(adapter);
3496         pci_save_state(pdev);
3497         pci_disable_device(pdev);
3498         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3499         return 0;
3500 }
3501
3502 static int be_resume(struct pci_dev *pdev)
3503 {
3504         int status = 0;
3505         struct be_adapter *adapter = pci_get_drvdata(pdev);
3506         struct net_device *netdev =  adapter->netdev;
3507
3508         netif_device_detach(netdev);
3509
3510         status = pci_enable_device(pdev);
3511         if (status)
3512                 return status;
3513
3514         pci_set_power_state(pdev, 0);
3515         pci_restore_state(pdev);
3516
3517         be_msix_enable(adapter);
3518         /* tell fw we're ready to fire cmds */
3519         status = be_cmd_fw_init(adapter);
3520         if (status)
3521                 return status;
3522
3523         be_setup(adapter);
3524         if (netif_running(netdev)) {
3525                 rtnl_lock();
3526                 be_open(netdev);
3527                 rtnl_unlock();
3528         }
3529         netif_device_attach(netdev);
3530
3531         if (adapter->wol)
3532                 be_setup_wol(adapter, false);
3533
3534         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3535         return 0;
3536 }
3537
3538 /*
3539  * An FLR will stop BE from DMAing any data.
3540  */
3541 static void be_shutdown(struct pci_dev *pdev)
3542 {
3543         struct be_adapter *adapter = pci_get_drvdata(pdev);
3544
3545         if (!adapter)
3546                 return;
3547
3548         cancel_delayed_work_sync(&adapter->work);
3549
3550         netif_device_detach(adapter->netdev);
3551
3552         if (adapter->wol)
3553                 be_setup_wol(adapter, true);
3554
3555         be_cmd_reset_function(adapter);
3556
3557         pci_disable_device(pdev);
3558 }
3559
3560 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3561                                 pci_channel_state_t state)
3562 {
3563         struct be_adapter *adapter = pci_get_drvdata(pdev);
3564         struct net_device *netdev =  adapter->netdev;
3565
3566         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3567
3568         adapter->eeh_err = true;
3569
3570         netif_device_detach(netdev);
3571
3572         if (netif_running(netdev)) {
3573                 rtnl_lock();
3574                 be_close(netdev);
3575                 rtnl_unlock();
3576         }
3577         be_clear(adapter);
3578
3579         if (state == pci_channel_io_perm_failure)
3580                 return PCI_ERS_RESULT_DISCONNECT;
3581
3582         pci_disable_device(pdev);
3583
3584         return PCI_ERS_RESULT_NEED_RESET;
3585 }
3586
3587 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3588 {
3589         struct be_adapter *adapter = pci_get_drvdata(pdev);
3590         int status;
3591
3592         dev_info(&adapter->pdev->dev, "EEH reset\n");
3593         adapter->eeh_err = false;
3594
3595         status = pci_enable_device(pdev);
3596         if (status)
3597                 return PCI_ERS_RESULT_DISCONNECT;
3598
3599         pci_set_master(pdev);
3600         pci_set_power_state(pdev, 0);
3601         pci_restore_state(pdev);
3602
3603         /* Check if card is ok and fw is ready */
3604         status = be_cmd_POST(adapter);
3605         if (status)
3606                 return PCI_ERS_RESULT_DISCONNECT;
3607
3608         return PCI_ERS_RESULT_RECOVERED;
3609 }
3610
3611 static void be_eeh_resume(struct pci_dev *pdev)
3612 {
3613         int status = 0;
3614         struct be_adapter *adapter = pci_get_drvdata(pdev);
3615         struct net_device *netdev =  adapter->netdev;
3616
3617         dev_info(&adapter->pdev->dev, "EEH resume\n");
3618
3619         pci_save_state(pdev);
3620
3621         /* tell fw we're ready to fire cmds */
3622         status = be_cmd_fw_init(adapter);
3623         if (status)
3624                 goto err;
3625
3626         status = be_setup(adapter);
3627         if (status)
3628                 goto err;
3629
3630         if (netif_running(netdev)) {
3631                 status = be_open(netdev);
3632                 if (status)
3633                         goto err;
3634         }
3635         netif_device_attach(netdev);
3636         return;
3637 err:
3638         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3639 }
3640
3641 static struct pci_error_handlers be_eeh_handlers = {
3642         .error_detected = be_eeh_err_detected,
3643         .slot_reset = be_eeh_reset,
3644         .resume = be_eeh_resume,
3645 };
3646
3647 static struct pci_driver be_driver = {
3648         .name = DRV_NAME,
3649         .id_table = be_dev_ids,
3650         .probe = be_probe,
3651         .remove = be_remove,
3652         .suspend = be_suspend,
3653         .resume = be_resume,
3654         .shutdown = be_shutdown,
3655         .err_handler = &be_eeh_handlers
3656 };
3657
3658 static int __init be_init_module(void)
3659 {
3660         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3661             rx_frag_size != 2048) {
3662                 printk(KERN_WARNING DRV_NAME
3663                         " : Module param rx_frag_size must be 2048/4096/8192."
3664                         " Using 2048\n");
3665                 rx_frag_size = 2048;
3666         }
3667
3668         return pci_register_driver(&be_driver);
3669 }
3670 module_init(be_init_module);
3671
3672 static void __exit be_exit_module(void)
3673 {
3674         pci_unregister_driver(&be_driver);
3675 }
3676 module_exit(be_exit_module);