Merge branch 'for-next' of git://git2.kernel.org/pub/scm/linux/kernel/git/lowpan...
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43         { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static char *ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static char *ue_status_hi_desc[] = {
83         "LPCMEMHOST",
84         "MGMT_MAC",
85         "PCS0ONLINE",
86         "MPU_IRAM",
87         "PCS1ONLINE",
88         "PCTL0",
89         "PCTL1",
90         "PMEM",
91         "RR",
92         "TXPB",
93         "RXPP",
94         "XAUI",
95         "TXP",
96         "ARM",
97         "IPC",
98         "HOST2",
99         "HOST3",
100         "HOST4",
101         "HOST5",
102         "HOST6",
103         "HOST7",
104         "HOST8",
105         "HOST9",
106         "NETC"
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119         struct be_dma_mem *mem = &q->dma_mem;
120         if (mem->va)
121                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122                                   mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126                 u16 len, u16 entry_size)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129
130         memset(q, 0, sizeof(*q));
131         q->len = len;
132         q->entry_size = entry_size;
133         mem->size = len * entry_size;
134         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135                                      GFP_KERNEL);
136         if (!mem->va)
137                 return -1;
138         memset(mem->va, 0, mem->size);
139         return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145         u32 reg = ioread32(addr);
146         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
147
148         if (adapter->eeh_err)
149                 return;
150
151         if (!enabled && enable)
152                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153         else if (enabled && !enable)
154                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else
156                 return;
157
158         iowrite32(reg, addr);
159 }
160
161 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
162 {
163         u32 val = 0;
164         val |= qid & DB_RQ_RING_ID_MASK;
165         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
166
167         wmb();
168         iowrite32(val, adapter->db + DB_RQ_OFFSET);
169 }
170
171 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172 {
173         u32 val = 0;
174         val |= qid & DB_TXULP_RING_ID_MASK;
175         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
176
177         wmb();
178         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
179 }
180
181 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182                 bool arm, bool clear_int, u16 num_popped)
183 {
184         u32 val = 0;
185         val |= qid & DB_EQ_RING_ID_MASK;
186         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
188
189         if (adapter->eeh_err)
190                 return;
191
192         if (arm)
193                 val |= 1 << DB_EQ_REARM_SHIFT;
194         if (clear_int)
195                 val |= 1 << DB_EQ_CLR_SHIFT;
196         val |= 1 << DB_EQ_EVNT_SHIFT;
197         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198         iowrite32(val, adapter->db + DB_EQ_OFFSET);
199 }
200
201 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
202 {
203         u32 val = 0;
204         val |= qid & DB_CQ_RING_ID_MASK;
205         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
207
208         if (adapter->eeh_err)
209                 return;
210
211         if (arm)
212                 val |= 1 << DB_CQ_REARM_SHIFT;
213         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_CQ_OFFSET);
215 }
216
217 static int be_mac_addr_set(struct net_device *netdev, void *p)
218 {
219         struct be_adapter *adapter = netdev_priv(netdev);
220         struct sockaddr *addr = p;
221         int status = 0;
222
223         if (!is_valid_ether_addr(addr->sa_data))
224                 return -EADDRNOTAVAIL;
225
226         /* MAC addr configuration will be done in hardware for VFs
227          * by their corresponding PFs. Just copy to netdev addr here
228          */
229         if (!be_physfn(adapter))
230                 goto netdev_addr;
231
232         status = be_cmd_pmac_del(adapter, adapter->if_handle,
233                                 adapter->pmac_id, 0);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                                 adapter->if_handle, &adapter->pmac_id, 0);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 static void populate_be2_stats(struct be_adapter *adapter)
247 {
248
249         struct be_drv_stats *drvs = &adapter->drv_stats;
250         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251         struct be_port_rxf_stats_v0 *port_stats =
252                 be_port_rxf_stats_from_cmd(adapter);
253         struct be_rxf_stats_v0 *rxf_stats =
254                 be_rxf_stats_from_cmd(adapter);
255
256         drvs->rx_pause_frames = port_stats->rx_pause_frames;
257         drvs->rx_crc_errors = port_stats->rx_crc_errors;
258         drvs->rx_control_frames = port_stats->rx_control_frames;
259         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270         drvs->rx_input_fifo_overflow_drop =
271                 port_stats->rx_input_fifo_overflow;
272         drvs->rx_dropped_header_too_small =
273                 port_stats->rx_dropped_header_too_small;
274         drvs->rx_address_match_errors =
275                 port_stats->rx_address_match_errors;
276         drvs->rx_alignment_symbol_errors =
277                 port_stats->rx_alignment_symbol_errors;
278
279         drvs->tx_pauseframes = port_stats->tx_pauseframes;
280         drvs->tx_controlframes = port_stats->tx_controlframes;
281
282         if (adapter->port_num)
283                 drvs->jabber_events =
284                         rxf_stats->port1_jabber_events;
285         else
286                 drvs->jabber_events =
287                         rxf_stats->port0_jabber_events;
288         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292         drvs->forwarded_packets = rxf_stats->forwarded_packets;
293         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294         drvs->rx_drops_no_tpre_descr =
295                 rxf_stats->rx_drops_no_tpre_descr;
296         drvs->rx_drops_too_many_frags =
297                 rxf_stats->rx_drops_too_many_frags;
298         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299 }
300
301 static void populate_be3_stats(struct be_adapter *adapter)
302 {
303         struct be_drv_stats *drvs = &adapter->drv_stats;
304         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306         struct be_rxf_stats_v1 *rxf_stats =
307                 be_rxf_stats_from_cmd(adapter);
308         struct be_port_rxf_stats_v1 *port_stats =
309                 be_port_rxf_stats_from_cmd(adapter);
310
311         drvs->rx_priority_pause_frames = 0;
312         drvs->pmem_fifo_overflow_drop = 0;
313         drvs->rx_pause_frames = port_stats->rx_pause_frames;
314         drvs->rx_crc_errors = port_stats->rx_crc_errors;
315         drvs->rx_control_frames = port_stats->rx_control_frames;
316         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326         drvs->rx_dropped_header_too_small =
327                 port_stats->rx_dropped_header_too_small;
328         drvs->rx_input_fifo_overflow_drop =
329                 port_stats->rx_input_fifo_overflow_drop;
330         drvs->rx_address_match_errors =
331                 port_stats->rx_address_match_errors;
332         drvs->rx_alignment_symbol_errors =
333                 port_stats->rx_alignment_symbol_errors;
334         drvs->rxpp_fifo_overflow_drop =
335                 port_stats->rxpp_fifo_overflow_drop;
336         drvs->tx_pauseframes = port_stats->tx_pauseframes;
337         drvs->tx_controlframes = port_stats->tx_controlframes;
338         drvs->jabber_events = port_stats->jabber_events;
339         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343         drvs->forwarded_packets = rxf_stats->forwarded_packets;
344         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345         drvs->rx_drops_no_tpre_descr =
346                 rxf_stats->rx_drops_no_tpre_descr;
347         drvs->rx_drops_too_many_frags =
348                 rxf_stats->rx_drops_too_many_frags;
349         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350 }
351
352 static void populate_lancer_stats(struct be_adapter *adapter)
353 {
354
355         struct be_drv_stats *drvs = &adapter->drv_stats;
356         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357                                                 (adapter);
358         drvs->rx_priority_pause_frames = 0;
359         drvs->pmem_fifo_overflow_drop = 0;
360         drvs->rx_pause_frames =
361                 make_64bit_val(pport_stats->rx_pause_frames_hi,
362                                  pport_stats->rx_pause_frames_lo);
363         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364                                                 pport_stats->rx_crc_errors_lo);
365         drvs->rx_control_frames =
366                         make_64bit_val(pport_stats->rx_control_frames_hi,
367                         pport_stats->rx_control_frames_lo);
368         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369         drvs->rx_frame_too_long =
370                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371                                         pport_stats->rx_frames_too_long_lo);
372         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376         drvs->rx_dropped_tcp_length =
377                                 pport_stats->rx_dropped_invalid_tcp_length;
378         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381         drvs->rx_dropped_header_too_small =
382                                 pport_stats->rx_dropped_header_too_small;
383         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385         drvs->rx_alignment_symbol_errors =
386                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387                                 pport_stats->rx_symbol_errors_lo);
388         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390                                         pport_stats->tx_pause_frames_lo);
391         drvs->tx_controlframes =
392                 make_64bit_val(pport_stats->tx_control_frames_hi,
393                                 pport_stats->tx_control_frames_lo);
394         drvs->jabber_events = pport_stats->rx_jabbers;
395         drvs->rx_drops_no_pbuf = 0;
396         drvs->rx_drops_no_txpb = 0;
397         drvs->rx_drops_no_erx_descr = 0;
398         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400                                                 pport_stats->num_forwards_lo);
401         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402                                                 pport_stats->rx_drops_mtu_lo);
403         drvs->rx_drops_no_tpre_descr = 0;
404         drvs->rx_drops_too_many_frags =
405                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406                                 pport_stats->rx_drops_too_many_frags_lo);
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         if (adapter->generation == BE_GEN3) {
412                 if (lancer_chip(adapter))
413                         populate_lancer_stats(adapter);
414                  else
415                         populate_be3_stats(adapter);
416         } else {
417                 populate_be2_stats(adapter);
418         }
419 }
420
421 void netdev_stats_update(struct be_adapter *adapter)
422 {
423         struct be_drv_stats *drvs = &adapter->drv_stats;
424         struct net_device_stats *dev_stats = &adapter->netdev->stats;
425         struct be_rx_obj *rxo;
426         struct be_tx_obj *txo;
427         unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
428         int i;
429
430         for_all_rx_queues(adapter, rxo, i) {
431                 pkts += rx_stats(rxo)->rx_pkts;
432                 bytes += rx_stats(rxo)->rx_bytes;
433                 mcast += rx_stats(rxo)->rx_mcast_pkts;
434                 /*  no space in linux buffers: best possible approximation */
435                 if (adapter->generation == BE_GEN3) {
436                         if (!(lancer_chip(adapter))) {
437                                 struct be_erx_stats_v1 *erx =
438                                         be_erx_stats_from_cmd(adapter);
439                                 drops += erx->rx_drops_no_fragments[rxo->q.id];
440                         }
441                 } else {
442                         struct be_erx_stats_v0 *erx =
443                                         be_erx_stats_from_cmd(adapter);
444                         drops += erx->rx_drops_no_fragments[rxo->q.id];
445                 }
446         }
447         dev_stats->rx_packets = pkts;
448         dev_stats->rx_bytes = bytes;
449         dev_stats->multicast = mcast;
450         dev_stats->rx_dropped = drops;
451
452         pkts = bytes = 0;
453         for_all_tx_queues(adapter, txo, i) {
454                 pkts += tx_stats(txo)->be_tx_pkts;
455                 bytes += tx_stats(txo)->be_tx_bytes;
456         }
457         dev_stats->tx_packets = pkts;
458         dev_stats->tx_bytes = bytes;
459
460         /* bad pkts received */
461         dev_stats->rx_errors = drvs->rx_crc_errors +
462                 drvs->rx_alignment_symbol_errors +
463                 drvs->rx_in_range_errors +
464                 drvs->rx_out_range_errors +
465                 drvs->rx_frame_too_long +
466                 drvs->rx_dropped_too_small +
467                 drvs->rx_dropped_too_short +
468                 drvs->rx_dropped_header_too_small +
469                 drvs->rx_dropped_tcp_length +
470                 drvs->rx_dropped_runt +
471                 drvs->rx_tcp_checksum_errs +
472                 drvs->rx_ip_checksum_errs +
473                 drvs->rx_udp_checksum_errs;
474
475         /* detailed rx errors */
476         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
477                 drvs->rx_out_range_errors +
478                 drvs->rx_frame_too_long;
479
480         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
481
482         /* frame alignment errors */
483         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
484
485         /* receiver fifo overrun */
486         /* drops_no_pbuf is no per i/f, it's per BE card */
487         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488                                 drvs->rx_input_fifo_overflow_drop +
489                                 drvs->rx_drops_no_pbuf;
490 }
491
492 void be_link_status_update(struct be_adapter *adapter, bool link_up)
493 {
494         struct net_device *netdev = adapter->netdev;
495
496         /* If link came up or went down */
497         if (adapter->link_up != link_up) {
498                 adapter->link_speed = -1;
499                 if (link_up) {
500                         netif_carrier_on(netdev);
501                         printk(KERN_INFO "%s: Link up\n", netdev->name);
502                 } else {
503                         netif_carrier_off(netdev);
504                         printk(KERN_INFO "%s: Link down\n", netdev->name);
505                 }
506                 adapter->link_up = link_up;
507         }
508 }
509
510 /* Update the EQ delay n BE based on the RX frags consumed / sec */
511 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
512 {
513         struct be_eq_obj *rx_eq = &rxo->rx_eq;
514         struct be_rx_stats *stats = &rxo->stats;
515         ulong now = jiffies;
516         u32 eqd;
517
518         if (!rx_eq->enable_aic)
519                 return;
520
521         /* Wrapped around */
522         if (time_before(now, stats->rx_fps_jiffies)) {
523                 stats->rx_fps_jiffies = now;
524                 return;
525         }
526
527         /* Update once a second */
528         if ((now - stats->rx_fps_jiffies) < HZ)
529                 return;
530
531         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
532                         ((now - stats->rx_fps_jiffies) / HZ);
533
534         stats->rx_fps_jiffies = now;
535         stats->prev_rx_frags = stats->rx_frags;
536         eqd = stats->rx_fps / 110000;
537         eqd = eqd << 3;
538         if (eqd > rx_eq->max_eqd)
539                 eqd = rx_eq->max_eqd;
540         if (eqd < rx_eq->min_eqd)
541                 eqd = rx_eq->min_eqd;
542         if (eqd < 10)
543                 eqd = 0;
544         if (eqd != rx_eq->cur_eqd)
545                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
546
547         rx_eq->cur_eqd = eqd;
548 }
549
550 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
551 {
552         u64 rate = bytes;
553
554         do_div(rate, ticks / HZ);
555         rate <<= 3;                     /* bytes/sec -> bits/sec */
556         do_div(rate, 1000000ul);        /* MB/Sec */
557
558         return rate;
559 }
560
561 static void be_tx_rate_update(struct be_tx_obj *txo)
562 {
563         struct be_tx_stats *stats = tx_stats(txo);
564         ulong now = jiffies;
565
566         /* Wrapped around? */
567         if (time_before(now, stats->be_tx_jiffies)) {
568                 stats->be_tx_jiffies = now;
569                 return;
570         }
571
572         /* Update tx rate once in two seconds */
573         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
574                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
575                                                   - stats->be_tx_bytes_prev,
576                                                  now - stats->be_tx_jiffies);
577                 stats->be_tx_jiffies = now;
578                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
579         }
580 }
581
582 static void be_tx_stats_update(struct be_tx_obj *txo,
583                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
584 {
585         struct be_tx_stats *stats = tx_stats(txo);
586
587         stats->be_tx_reqs++;
588         stats->be_tx_wrbs += wrb_cnt;
589         stats->be_tx_bytes += copied;
590         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
591         if (stopped)
592                 stats->be_tx_stops++;
593 }
594
595 /* Determine number of WRB entries needed to xmit data in an skb */
596 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
597                                                                 bool *dummy)
598 {
599         int cnt = (skb->len > skb->data_len);
600
601         cnt += skb_shinfo(skb)->nr_frags;
602
603         /* to account for hdr wrb */
604         cnt++;
605         if (lancer_chip(adapter) || !(cnt & 1)) {
606                 *dummy = false;
607         } else {
608                 /* add a dummy to make it an even num */
609                 cnt++;
610                 *dummy = true;
611         }
612         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
613         return cnt;
614 }
615
616 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
617 {
618         wrb->frag_pa_hi = upper_32_bits(addr);
619         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
620         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
621 }
622
623 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
624                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
625 {
626         u8 vlan_prio = 0;
627         u16 vlan_tag = 0;
628
629         memset(hdr, 0, sizeof(*hdr));
630
631         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
632
633         if (skb_is_gso(skb)) {
634                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
635                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
636                         hdr, skb_shinfo(skb)->gso_size);
637                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
638                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
639                 if (lancer_chip(adapter) && adapter->sli_family  ==
640                                                         LANCER_A0_SLI_FAMILY) {
641                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
642                         if (is_tcp_pkt(skb))
643                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
644                                                                 tcpcs, hdr, 1);
645                         else if (is_udp_pkt(skb))
646                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
647                                                                 udpcs, hdr, 1);
648                 }
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = vlan_tx_tag_get(skb);
659                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
660                 /* If vlan priority provided by OS is NOT in available bmap */
661                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
662                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
663                                         adapter->recommended_prio;
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
665         }
666
667         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
668         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
669         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
670         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
671 }
672
673 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
674                 bool unmap_single)
675 {
676         dma_addr_t dma;
677
678         be_dws_le_to_cpu(wrb, sizeof(*wrb));
679
680         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
681         if (wrb->frag_len) {
682                 if (unmap_single)
683                         dma_unmap_single(dev, dma, wrb->frag_len,
684                                          DMA_TO_DEVICE);
685                 else
686                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
687         }
688 }
689
690 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
691                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
692 {
693         dma_addr_t busaddr;
694         int i, copied = 0;
695         struct device *dev = &adapter->pdev->dev;
696         struct sk_buff *first_skb = skb;
697         struct be_eth_wrb *wrb;
698         struct be_eth_hdr_wrb *hdr;
699         bool map_single = false;
700         u16 map_head;
701
702         hdr = queue_head_node(txq);
703         queue_head_inc(txq);
704         map_head = txq->head;
705
706         if (skb->len > skb->data_len) {
707                 int len = skb_headlen(skb);
708                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
709                 if (dma_mapping_error(dev, busaddr))
710                         goto dma_err;
711                 map_single = true;
712                 wrb = queue_head_node(txq);
713                 wrb_fill(wrb, busaddr, len);
714                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
715                 queue_head_inc(txq);
716                 copied += len;
717         }
718
719         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
720                 struct skb_frag_struct *frag =
721                         &skb_shinfo(skb)->frags[i];
722                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
723                                        frag->size, DMA_TO_DEVICE);
724                 if (dma_mapping_error(dev, busaddr))
725                         goto dma_err;
726                 wrb = queue_head_node(txq);
727                 wrb_fill(wrb, busaddr, frag->size);
728                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729                 queue_head_inc(txq);
730                 copied += frag->size;
731         }
732
733         if (dummy_wrb) {
734                 wrb = queue_head_node(txq);
735                 wrb_fill(wrb, 0, 0);
736                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737                 queue_head_inc(txq);
738         }
739
740         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
741         be_dws_cpu_to_le(hdr, sizeof(*hdr));
742
743         return copied;
744 dma_err:
745         txq->head = map_head;
746         while (copied) {
747                 wrb = queue_head_node(txq);
748                 unmap_tx_frag(dev, wrb, map_single);
749                 map_single = false;
750                 copied -= wrb->frag_len;
751                 queue_head_inc(txq);
752         }
753         return 0;
754 }
755
756 static netdev_tx_t be_xmit(struct sk_buff *skb,
757                         struct net_device *netdev)
758 {
759         struct be_adapter *adapter = netdev_priv(netdev);
760         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
761         struct be_queue_info *txq = &txo->q;
762         u32 wrb_cnt = 0, copied = 0;
763         u32 start = txq->head;
764         bool dummy_wrb, stopped = false;
765
766         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
767
768         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
769         if (copied) {
770                 /* record the sent skb in the sent_skb table */
771                 BUG_ON(txo->sent_skb_list[start]);
772                 txo->sent_skb_list[start] = skb;
773
774                 /* Ensure txq has space for the next skb; Else stop the queue
775                  * *BEFORE* ringing the tx doorbell, so that we serialze the
776                  * tx compls of the current transmit which'll wake up the queue
777                  */
778                 atomic_add(wrb_cnt, &txq->used);
779                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780                                                                 txq->len) {
781                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782                         stopped = true;
783                 }
784
785                 be_txq_notify(adapter, txq->id, wrb_cnt);
786
787                 be_tx_stats_update(txo, wrb_cnt, copied,
788                                 skb_shinfo(skb)->gso_segs, stopped);
789         } else {
790                 txq->head = start;
791                 dev_kfree_skb_any(skb);
792         }
793         return NETDEV_TX_OK;
794 }
795
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799         if (new_mtu < BE_MIN_MTU ||
800                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801                                         (ETH_HLEN + ETH_FCS_LEN))) {
802                 dev_info(&adapter->pdev->dev,
803                         "MTU must be between %d and %d bytes\n",
804                         BE_MIN_MTU,
805                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806                 return -EINVAL;
807         }
808         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809                         netdev->mtu, new_mtu);
810         netdev->mtu = new_mtu;
811         return 0;
812 }
813
814 /*
815  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816  * If the user configures more, place BE in vlan promiscuous mode.
817  */
818 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
819 {
820         u16 vtag[BE_NUM_VLANS_SUPPORTED];
821         u16 ntags = 0, i;
822         int status = 0;
823         u32 if_handle;
824
825         if (vf) {
826                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
827                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
828                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
829         }
830
831         if (adapter->vlans_added <= adapter->max_vlans)  {
832                 /* Construct VLAN Table to give to HW */
833                 for (i = 0; i < VLAN_N_VID; i++) {
834                         if (adapter->vlan_tag[i]) {
835                                 vtag[ntags] = cpu_to_le16(i);
836                                 ntags++;
837                         }
838                 }
839                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
840                                         vtag, ntags, 1, 0);
841         } else {
842                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
843                                         NULL, 0, 1, 1);
844         }
845
846         return status;
847 }
848
849 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
850 {
851         struct be_adapter *adapter = netdev_priv(netdev);
852
853         adapter->vlan_grp = grp;
854 }
855
856 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
857 {
858         struct be_adapter *adapter = netdev_priv(netdev);
859
860         adapter->vlans_added++;
861         if (!be_physfn(adapter))
862                 return;
863
864         adapter->vlan_tag[vid] = 1;
865         if (adapter->vlans_added <= (adapter->max_vlans + 1))
866                 be_vid_config(adapter, false, 0);
867 }
868
869 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
870 {
871         struct be_adapter *adapter = netdev_priv(netdev);
872
873         adapter->vlans_added--;
874         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
875
876         if (!be_physfn(adapter))
877                 return;
878
879         adapter->vlan_tag[vid] = 0;
880         if (adapter->vlans_added <= adapter->max_vlans)
881                 be_vid_config(adapter, false, 0);
882 }
883
884 static void be_set_multicast_list(struct net_device *netdev)
885 {
886         struct be_adapter *adapter = netdev_priv(netdev);
887
888         if (netdev->flags & IFF_PROMISC) {
889                 be_cmd_promiscuous_config(adapter, true);
890                 adapter->promiscuous = true;
891                 goto done;
892         }
893
894         /* BE was previously in promiscuous mode; disable it */
895         if (adapter->promiscuous) {
896                 adapter->promiscuous = false;
897                 be_cmd_promiscuous_config(adapter, false);
898         }
899
900         /* Enable multicast promisc if num configured exceeds what we support */
901         if (netdev->flags & IFF_ALLMULTI ||
902             netdev_mc_count(netdev) > BE_MAX_MC) {
903                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
904                                 &adapter->mc_cmd_mem);
905                 goto done;
906         }
907
908         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
909                 &adapter->mc_cmd_mem);
910 done:
911         return;
912 }
913
914 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
915 {
916         struct be_adapter *adapter = netdev_priv(netdev);
917         int status;
918
919         if (!adapter->sriov_enabled)
920                 return -EPERM;
921
922         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
923                 return -EINVAL;
924
925         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
926                 status = be_cmd_pmac_del(adapter,
927                                         adapter->vf_cfg[vf].vf_if_handle,
928                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
929
930         status = be_cmd_pmac_add(adapter, mac,
931                                 adapter->vf_cfg[vf].vf_if_handle,
932                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
933
934         if (status)
935                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
936                                 mac, vf);
937         else
938                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
939
940         return status;
941 }
942
943 static int be_get_vf_config(struct net_device *netdev, int vf,
944                         struct ifla_vf_info *vi)
945 {
946         struct be_adapter *adapter = netdev_priv(netdev);
947
948         if (!adapter->sriov_enabled)
949                 return -EPERM;
950
951         if (vf >= num_vfs)
952                 return -EINVAL;
953
954         vi->vf = vf;
955         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
956         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
957         vi->qos = 0;
958         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
959
960         return 0;
961 }
962
963 static int be_set_vf_vlan(struct net_device *netdev,
964                         int vf, u16 vlan, u8 qos)
965 {
966         struct be_adapter *adapter = netdev_priv(netdev);
967         int status = 0;
968
969         if (!adapter->sriov_enabled)
970                 return -EPERM;
971
972         if ((vf >= num_vfs) || (vlan > 4095))
973                 return -EINVAL;
974
975         if (vlan) {
976                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
977                 adapter->vlans_added++;
978         } else {
979                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
980                 adapter->vlans_added--;
981         }
982
983         status = be_vid_config(adapter, true, vf);
984
985         if (status)
986                 dev_info(&adapter->pdev->dev,
987                                 "VLAN %d config on VF %d failed\n", vlan, vf);
988         return status;
989 }
990
991 static int be_set_vf_tx_rate(struct net_device *netdev,
992                         int vf, int rate)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         int status = 0;
996
997         if (!adapter->sriov_enabled)
998                 return -EPERM;
999
1000         if ((vf >= num_vfs) || (rate < 0))
1001                 return -EINVAL;
1002
1003         if (rate > 10000)
1004                 rate = 10000;
1005
1006         adapter->vf_cfg[vf].vf_tx_rate = rate;
1007         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1008
1009         if (status)
1010                 dev_info(&adapter->pdev->dev,
1011                                 "tx rate %d on VF %d failed\n", rate, vf);
1012         return status;
1013 }
1014
1015 static void be_rx_rate_update(struct be_rx_obj *rxo)
1016 {
1017         struct be_rx_stats *stats = &rxo->stats;
1018         ulong now = jiffies;
1019
1020         /* Wrapped around */
1021         if (time_before(now, stats->rx_jiffies)) {
1022                 stats->rx_jiffies = now;
1023                 return;
1024         }
1025
1026         /* Update the rate once in two seconds */
1027         if ((now - stats->rx_jiffies) < 2 * HZ)
1028                 return;
1029
1030         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1031                                 now - stats->rx_jiffies);
1032         stats->rx_jiffies = now;
1033         stats->rx_bytes_prev = stats->rx_bytes;
1034 }
1035
1036 static void be_rx_stats_update(struct be_rx_obj *rxo,
1037                 struct be_rx_compl_info *rxcp)
1038 {
1039         struct be_rx_stats *stats = &rxo->stats;
1040
1041         stats->rx_compl++;
1042         stats->rx_frags += rxcp->num_rcvd;
1043         stats->rx_bytes += rxcp->pkt_size;
1044         stats->rx_pkts++;
1045         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1046                 stats->rx_mcast_pkts++;
1047         if (rxcp->err)
1048                 stats->rxcp_err++;
1049 }
1050
1051 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1052 {
1053         /* L4 checksum is not reliable for non TCP/UDP packets.
1054          * Also ignore ipcksm for ipv6 pkts */
1055         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1056                                 (rxcp->ip_csum || rxcp->ipv6);
1057 }
1058
1059 static struct be_rx_page_info *
1060 get_rx_page_info(struct be_adapter *adapter,
1061                 struct be_rx_obj *rxo,
1062                 u16 frag_idx)
1063 {
1064         struct be_rx_page_info *rx_page_info;
1065         struct be_queue_info *rxq = &rxo->q;
1066
1067         rx_page_info = &rxo->page_info_tbl[frag_idx];
1068         BUG_ON(!rx_page_info->page);
1069
1070         if (rx_page_info->last_page_user) {
1071                 dma_unmap_page(&adapter->pdev->dev,
1072                                dma_unmap_addr(rx_page_info, bus),
1073                                adapter->big_page_size, DMA_FROM_DEVICE);
1074                 rx_page_info->last_page_user = false;
1075         }
1076
1077         atomic_dec(&rxq->used);
1078         return rx_page_info;
1079 }
1080
1081 /* Throwaway the data in the Rx completion */
1082 static void be_rx_compl_discard(struct be_adapter *adapter,
1083                 struct be_rx_obj *rxo,
1084                 struct be_rx_compl_info *rxcp)
1085 {
1086         struct be_queue_info *rxq = &rxo->q;
1087         struct be_rx_page_info *page_info;
1088         u16 i, num_rcvd = rxcp->num_rcvd;
1089
1090         for (i = 0; i < num_rcvd; i++) {
1091                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1092                 put_page(page_info->page);
1093                 memset(page_info, 0, sizeof(*page_info));
1094                 index_inc(&rxcp->rxq_idx, rxq->len);
1095         }
1096 }
1097
1098 /*
1099  * skb_fill_rx_data forms a complete skb for an ether frame
1100  * indicated by rxcp.
1101  */
1102 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1103                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1104 {
1105         struct be_queue_info *rxq = &rxo->q;
1106         struct be_rx_page_info *page_info;
1107         u16 i, j;
1108         u16 hdr_len, curr_frag_len, remaining;
1109         u8 *start;
1110
1111         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1112         start = page_address(page_info->page) + page_info->page_offset;
1113         prefetch(start);
1114
1115         /* Copy data in the first descriptor of this completion */
1116         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1117
1118         /* Copy the header portion into skb_data */
1119         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1120         memcpy(skb->data, start, hdr_len);
1121         skb->len = curr_frag_len;
1122         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1123                 /* Complete packet has now been moved to data */
1124                 put_page(page_info->page);
1125                 skb->data_len = 0;
1126                 skb->tail += curr_frag_len;
1127         } else {
1128                 skb_shinfo(skb)->nr_frags = 1;
1129                 skb_shinfo(skb)->frags[0].page = page_info->page;
1130                 skb_shinfo(skb)->frags[0].page_offset =
1131                                         page_info->page_offset + hdr_len;
1132                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1133                 skb->data_len = curr_frag_len - hdr_len;
1134                 skb->tail += hdr_len;
1135         }
1136         page_info->page = NULL;
1137
1138         if (rxcp->pkt_size <= rx_frag_size) {
1139                 BUG_ON(rxcp->num_rcvd != 1);
1140                 return;
1141         }
1142
1143         /* More frags present for this completion */
1144         index_inc(&rxcp->rxq_idx, rxq->len);
1145         remaining = rxcp->pkt_size - curr_frag_len;
1146         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1147                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1148                 curr_frag_len = min(remaining, rx_frag_size);
1149
1150                 /* Coalesce all frags from the same physical page in one slot */
1151                 if (page_info->page_offset == 0) {
1152                         /* Fresh page */
1153                         j++;
1154                         skb_shinfo(skb)->frags[j].page = page_info->page;
1155                         skb_shinfo(skb)->frags[j].page_offset =
1156                                                         page_info->page_offset;
1157                         skb_shinfo(skb)->frags[j].size = 0;
1158                         skb_shinfo(skb)->nr_frags++;
1159                 } else {
1160                         put_page(page_info->page);
1161                 }
1162
1163                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1164                 skb->len += curr_frag_len;
1165                 skb->data_len += curr_frag_len;
1166
1167                 remaining -= curr_frag_len;
1168                 index_inc(&rxcp->rxq_idx, rxq->len);
1169                 page_info->page = NULL;
1170         }
1171         BUG_ON(j > MAX_SKB_FRAGS);
1172 }
1173
1174 /* Process the RX completion indicated by rxcp when GRO is disabled */
1175 static void be_rx_compl_process(struct be_adapter *adapter,
1176                         struct be_rx_obj *rxo,
1177                         struct be_rx_compl_info *rxcp)
1178 {
1179         struct net_device *netdev = adapter->netdev;
1180         struct sk_buff *skb;
1181
1182         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1183         if (unlikely(!skb)) {
1184                 if (net_ratelimit())
1185                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1186                 be_rx_compl_discard(adapter, rxo, rxcp);
1187                 return;
1188         }
1189
1190         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1191
1192         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1193                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1194         else
1195                 skb_checksum_none_assert(skb);
1196
1197         skb->truesize = skb->len + sizeof(struct sk_buff);
1198         skb->protocol = eth_type_trans(skb, netdev);
1199         if (adapter->netdev->features & NETIF_F_RXHASH)
1200                 skb->rxhash = rxcp->rss_hash;
1201
1202
1203         if (unlikely(rxcp->vlanf)) {
1204                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1205                         kfree_skb(skb);
1206                         return;
1207                 }
1208                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1209                                         rxcp->vlan_tag);
1210         } else {
1211                 netif_receive_skb(skb);
1212         }
1213 }
1214
1215 /* Process the RX completion indicated by rxcp when GRO is enabled */
1216 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1217                 struct be_rx_obj *rxo,
1218                 struct be_rx_compl_info *rxcp)
1219 {
1220         struct be_rx_page_info *page_info;
1221         struct sk_buff *skb = NULL;
1222         struct be_queue_info *rxq = &rxo->q;
1223         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1224         u16 remaining, curr_frag_len;
1225         u16 i, j;
1226
1227         skb = napi_get_frags(&eq_obj->napi);
1228         if (!skb) {
1229                 be_rx_compl_discard(adapter, rxo, rxcp);
1230                 return;
1231         }
1232
1233         remaining = rxcp->pkt_size;
1234         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1235                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1236
1237                 curr_frag_len = min(remaining, rx_frag_size);
1238
1239                 /* Coalesce all frags from the same physical page in one slot */
1240                 if (i == 0 || page_info->page_offset == 0) {
1241                         /* First frag or Fresh page */
1242                         j++;
1243                         skb_shinfo(skb)->frags[j].page = page_info->page;
1244                         skb_shinfo(skb)->frags[j].page_offset =
1245                                                         page_info->page_offset;
1246                         skb_shinfo(skb)->frags[j].size = 0;
1247                 } else {
1248                         put_page(page_info->page);
1249                 }
1250                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1251
1252                 remaining -= curr_frag_len;
1253                 index_inc(&rxcp->rxq_idx, rxq->len);
1254                 memset(page_info, 0, sizeof(*page_info));
1255         }
1256         BUG_ON(j > MAX_SKB_FRAGS);
1257
1258         skb_shinfo(skb)->nr_frags = j + 1;
1259         skb->len = rxcp->pkt_size;
1260         skb->data_len = rxcp->pkt_size;
1261         skb->truesize += rxcp->pkt_size;
1262         skb->ip_summed = CHECKSUM_UNNECESSARY;
1263         if (adapter->netdev->features & NETIF_F_RXHASH)
1264                 skb->rxhash = rxcp->rss_hash;
1265
1266         if (likely(!rxcp->vlanf))
1267                 napi_gro_frags(&eq_obj->napi);
1268         else
1269                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1270                                 rxcp->vlan_tag);
1271 }
1272
1273 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274                                 struct be_eth_rx_compl *compl,
1275                                 struct be_rx_compl_info *rxcp)
1276 {
1277         rxcp->pkt_size =
1278                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1282         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1283         rxcp->ip_csum =
1284                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285         rxcp->l4_csum =
1286                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287         rxcp->ipv6 =
1288                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289         rxcp->rxq_idx =
1290                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291         rxcp->num_rcvd =
1292                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293         rxcp->pkt_type =
1294                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1295         rxcp->rss_hash =
1296                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1297         if (rxcp->vlanf) {
1298                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1299                                           compl);
1300                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301                                                compl);
1302         }
1303 }
1304
1305 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1306                                 struct be_eth_rx_compl *compl,
1307                                 struct be_rx_compl_info *rxcp)
1308 {
1309         rxcp->pkt_size =
1310                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1314         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1315         rxcp->ip_csum =
1316                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317         rxcp->l4_csum =
1318                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319         rxcp->ipv6 =
1320                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321         rxcp->rxq_idx =
1322                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323         rxcp->num_rcvd =
1324                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325         rxcp->pkt_type =
1326                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1327         rxcp->rss_hash =
1328                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1329         if (rxcp->vlanf) {
1330                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1331                                           compl);
1332                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333                                                compl);
1334         }
1335 }
1336
1337 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1338 {
1339         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1340         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1341         struct be_adapter *adapter = rxo->adapter;
1342
1343         /* For checking the valid bit it is Ok to use either definition as the
1344          * valid bit is at the same position in both v0 and v1 Rx compl */
1345         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1346                 return NULL;
1347
1348         rmb();
1349         be_dws_le_to_cpu(compl, sizeof(*compl));
1350
1351         if (adapter->be3_native)
1352                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1353         else
1354                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1355
1356         if (rxcp->vlanf) {
1357                 /* vlanf could be wrongly set in some cards.
1358                  * ignore if vtm is not set */
1359                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1360                         rxcp->vlanf = 0;
1361
1362                 if (!lancer_chip(adapter))
1363                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1364
1365                 if (((adapter->pvid & VLAN_VID_MASK) ==
1366                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1367                     !adapter->vlan_tag[rxcp->vlan_tag])
1368                         rxcp->vlanf = 0;
1369         }
1370
1371         /* As the compl has been parsed, reset it; we wont touch it again */
1372         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1373
1374         queue_tail_inc(&rxo->cq);
1375         return rxcp;
1376 }
1377
1378 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1379 {
1380         u32 order = get_order(size);
1381
1382         if (order > 0)
1383                 gfp |= __GFP_COMP;
1384         return  alloc_pages(gfp, order);
1385 }
1386
1387 /*
1388  * Allocate a page, split it to fragments of size rx_frag_size and post as
1389  * receive buffers to BE
1390  */
1391 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1392 {
1393         struct be_adapter *adapter = rxo->adapter;
1394         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1395         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1396         struct be_queue_info *rxq = &rxo->q;
1397         struct page *pagep = NULL;
1398         struct be_eth_rx_d *rxd;
1399         u64 page_dmaaddr = 0, frag_dmaaddr;
1400         u32 posted, page_offset = 0;
1401
1402         page_info = &rxo->page_info_tbl[rxq->head];
1403         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1404                 if (!pagep) {
1405                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1406                         if (unlikely(!pagep)) {
1407                                 rxo->stats.rx_post_fail++;
1408                                 break;
1409                         }
1410                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1411                                                     0, adapter->big_page_size,
1412                                                     DMA_FROM_DEVICE);
1413                         page_info->page_offset = 0;
1414                 } else {
1415                         get_page(pagep);
1416                         page_info->page_offset = page_offset + rx_frag_size;
1417                 }
1418                 page_offset = page_info->page_offset;
1419                 page_info->page = pagep;
1420                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1421                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1422
1423                 rxd = queue_head_node(rxq);
1424                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1425                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1426
1427                 /* Any space left in the current big page for another frag? */
1428                 if ((page_offset + rx_frag_size + rx_frag_size) >
1429                                         adapter->big_page_size) {
1430                         pagep = NULL;
1431                         page_info->last_page_user = true;
1432                 }
1433
1434                 prev_page_info = page_info;
1435                 queue_head_inc(rxq);
1436                 page_info = &page_info_tbl[rxq->head];
1437         }
1438         if (pagep)
1439                 prev_page_info->last_page_user = true;
1440
1441         if (posted) {
1442                 atomic_add(posted, &rxq->used);
1443                 be_rxq_notify(adapter, rxq->id, posted);
1444         } else if (atomic_read(&rxq->used) == 0) {
1445                 /* Let be_worker replenish when memory is available */
1446                 rxo->rx_post_starved = true;
1447         }
1448 }
1449
1450 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1451 {
1452         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1453
1454         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1455                 return NULL;
1456
1457         rmb();
1458         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1459
1460         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1461
1462         queue_tail_inc(tx_cq);
1463         return txcp;
1464 }
1465
1466 static u16 be_tx_compl_process(struct be_adapter *adapter,
1467                 struct be_tx_obj *txo, u16 last_index)
1468 {
1469         struct be_queue_info *txq = &txo->q;
1470         struct be_eth_wrb *wrb;
1471         struct sk_buff **sent_skbs = txo->sent_skb_list;
1472         struct sk_buff *sent_skb;
1473         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1474         bool unmap_skb_hdr = true;
1475
1476         sent_skb = sent_skbs[txq->tail];
1477         BUG_ON(!sent_skb);
1478         sent_skbs[txq->tail] = NULL;
1479
1480         /* skip header wrb */
1481         queue_tail_inc(txq);
1482
1483         do {
1484                 cur_index = txq->tail;
1485                 wrb = queue_tail_node(txq);
1486                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1487                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1488                 unmap_skb_hdr = false;
1489
1490                 num_wrbs++;
1491                 queue_tail_inc(txq);
1492         } while (cur_index != last_index);
1493
1494         kfree_skb(sent_skb);
1495         return num_wrbs;
1496 }
1497
1498 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1499 {
1500         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1501
1502         if (!eqe->evt)
1503                 return NULL;
1504
1505         rmb();
1506         eqe->evt = le32_to_cpu(eqe->evt);
1507         queue_tail_inc(&eq_obj->q);
1508         return eqe;
1509 }
1510
1511 static int event_handle(struct be_adapter *adapter,
1512                         struct be_eq_obj *eq_obj,
1513                         bool rearm)
1514 {
1515         struct be_eq_entry *eqe;
1516         u16 num = 0;
1517
1518         while ((eqe = event_get(eq_obj)) != NULL) {
1519                 eqe->evt = 0;
1520                 num++;
1521         }
1522
1523         /* Deal with any spurious interrupts that come
1524          * without events
1525          */
1526         if (!num)
1527                 rearm = true;
1528
1529         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1530         if (num)
1531                 napi_schedule(&eq_obj->napi);
1532
1533         return num;
1534 }
1535
1536 /* Just read and notify events without processing them.
1537  * Used at the time of destroying event queues */
1538 static void be_eq_clean(struct be_adapter *adapter,
1539                         struct be_eq_obj *eq_obj)
1540 {
1541         struct be_eq_entry *eqe;
1542         u16 num = 0;
1543
1544         while ((eqe = event_get(eq_obj)) != NULL) {
1545                 eqe->evt = 0;
1546                 num++;
1547         }
1548
1549         if (num)
1550                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1551 }
1552
1553 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1554 {
1555         struct be_rx_page_info *page_info;
1556         struct be_queue_info *rxq = &rxo->q;
1557         struct be_queue_info *rx_cq = &rxo->cq;
1558         struct be_rx_compl_info *rxcp;
1559         u16 tail;
1560
1561         /* First cleanup pending rx completions */
1562         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1563                 be_rx_compl_discard(adapter, rxo, rxcp);
1564                 be_cq_notify(adapter, rx_cq->id, false, 1);
1565         }
1566
1567         /* Then free posted rx buffer that were not used */
1568         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1569         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1570                 page_info = get_rx_page_info(adapter, rxo, tail);
1571                 put_page(page_info->page);
1572                 memset(page_info, 0, sizeof(*page_info));
1573         }
1574         BUG_ON(atomic_read(&rxq->used));
1575         rxq->tail = rxq->head = 0;
1576 }
1577
1578 static void be_tx_compl_clean(struct be_adapter *adapter,
1579                                 struct be_tx_obj *txo)
1580 {
1581         struct be_queue_info *tx_cq = &txo->cq;
1582         struct be_queue_info *txq = &txo->q;
1583         struct be_eth_tx_compl *txcp;
1584         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1585         struct sk_buff **sent_skbs = txo->sent_skb_list;
1586         struct sk_buff *sent_skb;
1587         bool dummy_wrb;
1588
1589         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1590         do {
1591                 while ((txcp = be_tx_compl_get(tx_cq))) {
1592                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1593                                         wrb_index, txcp);
1594                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1595                         cmpl++;
1596                 }
1597                 if (cmpl) {
1598                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1599                         atomic_sub(num_wrbs, &txq->used);
1600                         cmpl = 0;
1601                         num_wrbs = 0;
1602                 }
1603
1604                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1605                         break;
1606
1607                 mdelay(1);
1608         } while (true);
1609
1610         if (atomic_read(&txq->used))
1611                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1612                         atomic_read(&txq->used));
1613
1614         /* free posted tx for which compls will never arrive */
1615         while (atomic_read(&txq->used)) {
1616                 sent_skb = sent_skbs[txq->tail];
1617                 end_idx = txq->tail;
1618                 index_adv(&end_idx,
1619                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1620                         txq->len);
1621                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1622                 atomic_sub(num_wrbs, &txq->used);
1623         }
1624 }
1625
1626 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1627 {
1628         struct be_queue_info *q;
1629
1630         q = &adapter->mcc_obj.q;
1631         if (q->created)
1632                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1633         be_queue_free(adapter, q);
1634
1635         q = &adapter->mcc_obj.cq;
1636         if (q->created)
1637                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1638         be_queue_free(adapter, q);
1639 }
1640
1641 /* Must be called only after TX qs are created as MCC shares TX EQ */
1642 static int be_mcc_queues_create(struct be_adapter *adapter)
1643 {
1644         struct be_queue_info *q, *cq;
1645
1646         /* Alloc MCC compl queue */
1647         cq = &adapter->mcc_obj.cq;
1648         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1649                         sizeof(struct be_mcc_compl)))
1650                 goto err;
1651
1652         /* Ask BE to create MCC compl queue; share TX's eq */
1653         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1654                 goto mcc_cq_free;
1655
1656         /* Alloc MCC queue */
1657         q = &adapter->mcc_obj.q;
1658         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1659                 goto mcc_cq_destroy;
1660
1661         /* Ask BE to create MCC queue */
1662         if (be_cmd_mccq_create(adapter, q, cq))
1663                 goto mcc_q_free;
1664
1665         return 0;
1666
1667 mcc_q_free:
1668         be_queue_free(adapter, q);
1669 mcc_cq_destroy:
1670         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1671 mcc_cq_free:
1672         be_queue_free(adapter, cq);
1673 err:
1674         return -1;
1675 }
1676
1677 static void be_tx_queues_destroy(struct be_adapter *adapter)
1678 {
1679         struct be_queue_info *q;
1680         struct be_tx_obj *txo;
1681         u8 i;
1682
1683         for_all_tx_queues(adapter, txo, i) {
1684                 q = &txo->q;
1685                 if (q->created)
1686                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1687                 be_queue_free(adapter, q);
1688
1689                 q = &txo->cq;
1690                 if (q->created)
1691                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1692                 be_queue_free(adapter, q);
1693         }
1694
1695         /* Clear any residual events */
1696         be_eq_clean(adapter, &adapter->tx_eq);
1697
1698         q = &adapter->tx_eq.q;
1699         if (q->created)
1700                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1701         be_queue_free(adapter, q);
1702 }
1703
1704 /* One TX event queue is shared by all TX compl qs */
1705 static int be_tx_queues_create(struct be_adapter *adapter)
1706 {
1707         struct be_queue_info *eq, *q, *cq;
1708         struct be_tx_obj *txo;
1709         u8 i;
1710
1711         adapter->tx_eq.max_eqd = 0;
1712         adapter->tx_eq.min_eqd = 0;
1713         adapter->tx_eq.cur_eqd = 96;
1714         adapter->tx_eq.enable_aic = false;
1715
1716         eq = &adapter->tx_eq.q;
1717         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1718                 sizeof(struct be_eq_entry)))
1719                 return -1;
1720
1721         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1722                 goto err;
1723         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1724
1725         for_all_tx_queues(adapter, txo, i) {
1726                 cq = &txo->cq;
1727                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1728                         sizeof(struct be_eth_tx_compl)))
1729                         goto err;
1730
1731                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1732                         goto err;
1733
1734                 q = &txo->q;
1735                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1736                         sizeof(struct be_eth_wrb)))
1737                         goto err;
1738
1739                 if (be_cmd_txq_create(adapter, q, cq))
1740                         goto err;
1741         }
1742         return 0;
1743
1744 err:
1745         be_tx_queues_destroy(adapter);
1746         return -1;
1747 }
1748
1749 static void be_rx_queues_destroy(struct be_adapter *adapter)
1750 {
1751         struct be_queue_info *q;
1752         struct be_rx_obj *rxo;
1753         int i;
1754
1755         for_all_rx_queues(adapter, rxo, i) {
1756                 be_queue_free(adapter, &rxo->q);
1757
1758                 q = &rxo->cq;
1759                 if (q->created)
1760                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1761                 be_queue_free(adapter, q);
1762
1763                 q = &rxo->rx_eq.q;
1764                 if (q->created)
1765                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1766                 be_queue_free(adapter, q);
1767         }
1768 }
1769
1770 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1771 {
1772         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1773                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1774                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1775         } else {
1776                 dev_warn(&adapter->pdev->dev,
1777                         "No support for multiple RX queues\n");
1778                 return 1;
1779         }
1780 }
1781
1782 static int be_rx_queues_create(struct be_adapter *adapter)
1783 {
1784         struct be_queue_info *eq, *q, *cq;
1785         struct be_rx_obj *rxo;
1786         int rc, i;
1787
1788         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1789                                 msix_enabled(adapter) ?
1790                                         adapter->num_msix_vec - 1 : 1);
1791         if (adapter->num_rx_qs != MAX_RX_QS)
1792                 dev_warn(&adapter->pdev->dev,
1793                         "Can create only %d RX queues", adapter->num_rx_qs);
1794
1795         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1796         for_all_rx_queues(adapter, rxo, i) {
1797                 rxo->adapter = adapter;
1798                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1799                 rxo->rx_eq.enable_aic = true;
1800
1801                 /* EQ */
1802                 eq = &rxo->rx_eq.q;
1803                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804                                         sizeof(struct be_eq_entry));
1805                 if (rc)
1806                         goto err;
1807
1808                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1809                 if (rc)
1810                         goto err;
1811
1812                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1813
1814                 /* CQ */
1815                 cq = &rxo->cq;
1816                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1817                                 sizeof(struct be_eth_rx_compl));
1818                 if (rc)
1819                         goto err;
1820
1821                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1822                 if (rc)
1823                         goto err;
1824
1825                 /* Rx Q - will be created in be_open() */
1826                 q = &rxo->q;
1827                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1828                                 sizeof(struct be_eth_rx_d));
1829                 if (rc)
1830                         goto err;
1831
1832         }
1833
1834         return 0;
1835 err:
1836         be_rx_queues_destroy(adapter);
1837         return -1;
1838 }
1839
1840 static bool event_peek(struct be_eq_obj *eq_obj)
1841 {
1842         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1843         if (!eqe->evt)
1844                 return false;
1845         else
1846                 return true;
1847 }
1848
1849 static irqreturn_t be_intx(int irq, void *dev)
1850 {
1851         struct be_adapter *adapter = dev;
1852         struct be_rx_obj *rxo;
1853         int isr, i, tx = 0 , rx = 0;
1854
1855         if (lancer_chip(adapter)) {
1856                 if (event_peek(&adapter->tx_eq))
1857                         tx = event_handle(adapter, &adapter->tx_eq, false);
1858                 for_all_rx_queues(adapter, rxo, i) {
1859                         if (event_peek(&rxo->rx_eq))
1860                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1861                 }
1862
1863                 if (!(tx || rx))
1864                         return IRQ_NONE;
1865
1866         } else {
1867                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1868                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1869                 if (!isr)
1870                         return IRQ_NONE;
1871
1872                 if ((1 << adapter->tx_eq.eq_idx & isr))
1873                         event_handle(adapter, &adapter->tx_eq, false);
1874
1875                 for_all_rx_queues(adapter, rxo, i) {
1876                         if ((1 << rxo->rx_eq.eq_idx & isr))
1877                                 event_handle(adapter, &rxo->rx_eq, true);
1878                 }
1879         }
1880
1881         return IRQ_HANDLED;
1882 }
1883
1884 static irqreturn_t be_msix_rx(int irq, void *dev)
1885 {
1886         struct be_rx_obj *rxo = dev;
1887         struct be_adapter *adapter = rxo->adapter;
1888
1889         event_handle(adapter, &rxo->rx_eq, true);
1890
1891         return IRQ_HANDLED;
1892 }
1893
1894 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1895 {
1896         struct be_adapter *adapter = dev;
1897
1898         event_handle(adapter, &adapter->tx_eq, false);
1899
1900         return IRQ_HANDLED;
1901 }
1902
1903 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1904 {
1905         return (rxcp->tcpf && !rxcp->err) ? true : false;
1906 }
1907
1908 static int be_poll_rx(struct napi_struct *napi, int budget)
1909 {
1910         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1911         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1912         struct be_adapter *adapter = rxo->adapter;
1913         struct be_queue_info *rx_cq = &rxo->cq;
1914         struct be_rx_compl_info *rxcp;
1915         u32 work_done;
1916
1917         rxo->stats.rx_polls++;
1918         for (work_done = 0; work_done < budget; work_done++) {
1919                 rxcp = be_rx_compl_get(rxo);
1920                 if (!rxcp)
1921                         break;
1922
1923                 /* Ignore flush completions */
1924                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1925                         if (do_gro(rxcp))
1926                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1927                         else
1928                                 be_rx_compl_process(adapter, rxo, rxcp);
1929                 } else if (rxcp->pkt_size == 0) {
1930                         be_rx_compl_discard(adapter, rxo, rxcp);
1931                 }
1932
1933                 be_rx_stats_update(rxo, rxcp);
1934         }
1935
1936         /* Refill the queue */
1937         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1938                 be_post_rx_frags(rxo, GFP_ATOMIC);
1939
1940         /* All consumed */
1941         if (work_done < budget) {
1942                 napi_complete(napi);
1943                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1944         } else {
1945                 /* More to be consumed; continue with interrupts disabled */
1946                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1947         }
1948         return work_done;
1949 }
1950
1951 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1952  * For TX/MCC we don't honour budget; consume everything
1953  */
1954 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1955 {
1956         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1957         struct be_adapter *adapter =
1958                 container_of(tx_eq, struct be_adapter, tx_eq);
1959         struct be_tx_obj *txo;
1960         struct be_eth_tx_compl *txcp;
1961         int tx_compl, mcc_compl, status = 0;
1962         u8 i;
1963         u16 num_wrbs;
1964
1965         for_all_tx_queues(adapter, txo, i) {
1966                 tx_compl = 0;
1967                 num_wrbs = 0;
1968                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1969                         num_wrbs += be_tx_compl_process(adapter, txo,
1970                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1971                                         wrb_index, txcp));
1972                         tx_compl++;
1973                 }
1974                 if (tx_compl) {
1975                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1976
1977                         atomic_sub(num_wrbs, &txo->q.used);
1978
1979                         /* As Tx wrbs have been freed up, wake up netdev queue
1980                          * if it was stopped due to lack of tx wrbs.  */
1981                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1982                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1983                                 netif_wake_subqueue(adapter->netdev, i);
1984                         }
1985
1986                         adapter->drv_stats.be_tx_events++;
1987                         txo->stats.be_tx_compl += tx_compl;
1988                 }
1989         }
1990
1991         mcc_compl = be_process_mcc(adapter, &status);
1992
1993         if (mcc_compl) {
1994                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1995                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1996         }
1997
1998         napi_complete(napi);
1999
2000         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2001         return 1;
2002 }
2003
2004 void be_detect_dump_ue(struct be_adapter *adapter)
2005 {
2006         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2007         u32 i;
2008
2009         pci_read_config_dword(adapter->pdev,
2010                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2011         pci_read_config_dword(adapter->pdev,
2012                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2013         pci_read_config_dword(adapter->pdev,
2014                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2015         pci_read_config_dword(adapter->pdev,
2016                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2017
2018         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2019         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2020
2021         if (ue_status_lo || ue_status_hi) {
2022                 adapter->ue_detected = true;
2023                 adapter->eeh_err = true;
2024                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2025         }
2026
2027         if (ue_status_lo) {
2028                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2029                         if (ue_status_lo & 1)
2030                                 dev_err(&adapter->pdev->dev,
2031                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2032                 }
2033         }
2034         if (ue_status_hi) {
2035                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2036                         if (ue_status_hi & 1)
2037                                 dev_err(&adapter->pdev->dev,
2038                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2039                 }
2040         }
2041
2042 }
2043
2044 static void be_worker(struct work_struct *work)
2045 {
2046         struct be_adapter *adapter =
2047                 container_of(work, struct be_adapter, work.work);
2048         struct be_rx_obj *rxo;
2049         struct be_tx_obj *txo;
2050         int i;
2051
2052         if (!adapter->ue_detected && !lancer_chip(adapter))
2053                 be_detect_dump_ue(adapter);
2054
2055         /* when interrupts are not yet enabled, just reap any pending
2056         * mcc completions */
2057         if (!netif_running(adapter->netdev)) {
2058                 int mcc_compl, status = 0;
2059
2060                 mcc_compl = be_process_mcc(adapter, &status);
2061
2062                 if (mcc_compl) {
2063                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2064                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2065                 }
2066
2067                 goto reschedule;
2068         }
2069
2070         if (!adapter->stats_cmd_sent) {
2071                 if (lancer_chip(adapter))
2072                         lancer_cmd_get_pport_stats(adapter,
2073                                                 &adapter->stats_cmd);
2074                 else
2075                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2076         }
2077
2078         for_all_tx_queues(adapter, txo, i)
2079                 be_tx_rate_update(txo);
2080
2081         for_all_rx_queues(adapter, rxo, i) {
2082                 be_rx_rate_update(rxo);
2083                 be_rx_eqd_update(adapter, rxo);
2084
2085                 if (rxo->rx_post_starved) {
2086                         rxo->rx_post_starved = false;
2087                         be_post_rx_frags(rxo, GFP_KERNEL);
2088                 }
2089         }
2090
2091 reschedule:
2092         adapter->work_counter++;
2093         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2094 }
2095
2096 static void be_msix_disable(struct be_adapter *adapter)
2097 {
2098         if (msix_enabled(adapter)) {
2099                 pci_disable_msix(adapter->pdev);
2100                 adapter->num_msix_vec = 0;
2101         }
2102 }
2103
2104 static void be_msix_enable(struct be_adapter *adapter)
2105 {
2106 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2107         int i, status, num_vec;
2108
2109         num_vec = be_num_rxqs_want(adapter) + 1;
2110
2111         for (i = 0; i < num_vec; i++)
2112                 adapter->msix_entries[i].entry = i;
2113
2114         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2115         if (status == 0) {
2116                 goto done;
2117         } else if (status >= BE_MIN_MSIX_VECTORS) {
2118                 num_vec = status;
2119                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2120                                 num_vec) == 0)
2121                         goto done;
2122         }
2123         return;
2124 done:
2125         adapter->num_msix_vec = num_vec;
2126         return;
2127 }
2128
2129 static void be_sriov_enable(struct be_adapter *adapter)
2130 {
2131         be_check_sriov_fn_type(adapter);
2132 #ifdef CONFIG_PCI_IOV
2133         if (be_physfn(adapter) && num_vfs) {
2134                 int status, pos;
2135                 u16 nvfs;
2136
2137                 pos = pci_find_ext_capability(adapter->pdev,
2138                                                 PCI_EXT_CAP_ID_SRIOV);
2139                 pci_read_config_word(adapter->pdev,
2140                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2141
2142                 if (num_vfs > nvfs) {
2143                         dev_info(&adapter->pdev->dev,
2144                                         "Device supports %d VFs and not %d\n",
2145                                         nvfs, num_vfs);
2146                         num_vfs = nvfs;
2147                 }
2148
2149                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2150                 adapter->sriov_enabled = status ? false : true;
2151         }
2152 #endif
2153 }
2154
2155 static void be_sriov_disable(struct be_adapter *adapter)
2156 {
2157 #ifdef CONFIG_PCI_IOV
2158         if (adapter->sriov_enabled) {
2159                 pci_disable_sriov(adapter->pdev);
2160                 adapter->sriov_enabled = false;
2161         }
2162 #endif
2163 }
2164
2165 static inline int be_msix_vec_get(struct be_adapter *adapter,
2166                                         struct be_eq_obj *eq_obj)
2167 {
2168         return adapter->msix_entries[eq_obj->eq_idx].vector;
2169 }
2170
2171 static int be_request_irq(struct be_adapter *adapter,
2172                 struct be_eq_obj *eq_obj,
2173                 void *handler, char *desc, void *context)
2174 {
2175         struct net_device *netdev = adapter->netdev;
2176         int vec;
2177
2178         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2179         vec = be_msix_vec_get(adapter, eq_obj);
2180         return request_irq(vec, handler, 0, eq_obj->desc, context);
2181 }
2182
2183 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2184                         void *context)
2185 {
2186         int vec = be_msix_vec_get(adapter, eq_obj);
2187         free_irq(vec, context);
2188 }
2189
2190 static int be_msix_register(struct be_adapter *adapter)
2191 {
2192         struct be_rx_obj *rxo;
2193         int status, i;
2194         char qname[10];
2195
2196         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2197                                 adapter);
2198         if (status)
2199                 goto err;
2200
2201         for_all_rx_queues(adapter, rxo, i) {
2202                 sprintf(qname, "rxq%d", i);
2203                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2204                                 qname, rxo);
2205                 if (status)
2206                         goto err_msix;
2207         }
2208
2209         return 0;
2210
2211 err_msix:
2212         be_free_irq(adapter, &adapter->tx_eq, adapter);
2213
2214         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2215                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2216
2217 err:
2218         dev_warn(&adapter->pdev->dev,
2219                 "MSIX Request IRQ failed - err %d\n", status);
2220         be_msix_disable(adapter);
2221         return status;
2222 }
2223
2224 static int be_irq_register(struct be_adapter *adapter)
2225 {
2226         struct net_device *netdev = adapter->netdev;
2227         int status;
2228
2229         if (msix_enabled(adapter)) {
2230                 status = be_msix_register(adapter);
2231                 if (status == 0)
2232                         goto done;
2233                 /* INTx is not supported for VF */
2234                 if (!be_physfn(adapter))
2235                         return status;
2236         }
2237
2238         /* INTx */
2239         netdev->irq = adapter->pdev->irq;
2240         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2241                         adapter);
2242         if (status) {
2243                 dev_err(&adapter->pdev->dev,
2244                         "INTx request IRQ failed - err %d\n", status);
2245                 return status;
2246         }
2247 done:
2248         adapter->isr_registered = true;
2249         return 0;
2250 }
2251
2252 static void be_irq_unregister(struct be_adapter *adapter)
2253 {
2254         struct net_device *netdev = adapter->netdev;
2255         struct be_rx_obj *rxo;
2256         int i;
2257
2258         if (!adapter->isr_registered)
2259                 return;
2260
2261         /* INTx */
2262         if (!msix_enabled(adapter)) {
2263                 free_irq(netdev->irq, adapter);
2264                 goto done;
2265         }
2266
2267         /* MSIx */
2268         be_free_irq(adapter, &adapter->tx_eq, adapter);
2269
2270         for_all_rx_queues(adapter, rxo, i)
2271                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2272
2273 done:
2274         adapter->isr_registered = false;
2275 }
2276
2277 static void be_rx_queues_clear(struct be_adapter *adapter)
2278 {
2279         struct be_queue_info *q;
2280         struct be_rx_obj *rxo;
2281         int i;
2282
2283         for_all_rx_queues(adapter, rxo, i) {
2284                 q = &rxo->q;
2285                 if (q->created) {
2286                         be_cmd_rxq_destroy(adapter, q);
2287                         /* After the rxq is invalidated, wait for a grace time
2288                          * of 1ms for all dma to end and the flush compl to
2289                          * arrive
2290                          */
2291                         mdelay(1);
2292                         be_rx_q_clean(adapter, rxo);
2293                 }
2294
2295                 /* Clear any residual events */
2296                 q = &rxo->rx_eq.q;
2297                 if (q->created)
2298                         be_eq_clean(adapter, &rxo->rx_eq);
2299         }
2300 }
2301
2302 static int be_close(struct net_device *netdev)
2303 {
2304         struct be_adapter *adapter = netdev_priv(netdev);
2305         struct be_rx_obj *rxo;
2306         struct be_tx_obj *txo;
2307         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2308         int vec, i;
2309
2310         be_async_mcc_disable(adapter);
2311
2312         netif_carrier_off(netdev);
2313         adapter->link_up = false;
2314
2315         if (!lancer_chip(adapter))
2316                 be_intr_set(adapter, false);
2317
2318         for_all_rx_queues(adapter, rxo, i)
2319                 napi_disable(&rxo->rx_eq.napi);
2320
2321         napi_disable(&tx_eq->napi);
2322
2323         if (lancer_chip(adapter)) {
2324                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2325                 for_all_rx_queues(adapter, rxo, i)
2326                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2327                 for_all_tx_queues(adapter, txo, i)
2328                          be_cq_notify(adapter, txo->cq.id, false, 0);
2329         }
2330
2331         if (msix_enabled(adapter)) {
2332                 vec = be_msix_vec_get(adapter, tx_eq);
2333                 synchronize_irq(vec);
2334
2335                 for_all_rx_queues(adapter, rxo, i) {
2336                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2337                         synchronize_irq(vec);
2338                 }
2339         } else {
2340                 synchronize_irq(netdev->irq);
2341         }
2342         be_irq_unregister(adapter);
2343
2344         /* Wait for all pending tx completions to arrive so that
2345          * all tx skbs are freed.
2346          */
2347         for_all_tx_queues(adapter, txo, i)
2348                 be_tx_compl_clean(adapter, txo);
2349
2350         be_rx_queues_clear(adapter);
2351         return 0;
2352 }
2353
2354 static int be_rx_queues_setup(struct be_adapter *adapter)
2355 {
2356         struct be_rx_obj *rxo;
2357         int rc, i;
2358         u8 rsstable[MAX_RSS_QS];
2359
2360         for_all_rx_queues(adapter, rxo, i) {
2361                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2362                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2363                         adapter->if_handle,
2364                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2365                 if (rc)
2366                         return rc;
2367         }
2368
2369         if (be_multi_rxq(adapter)) {
2370                 for_all_rss_queues(adapter, rxo, i)
2371                         rsstable[i] = rxo->rss_id;
2372
2373                 rc = be_cmd_rss_config(adapter, rsstable,
2374                         adapter->num_rx_qs - 1);
2375                 if (rc)
2376                         return rc;
2377         }
2378
2379         /* First time posting */
2380         for_all_rx_queues(adapter, rxo, i) {
2381                 be_post_rx_frags(rxo, GFP_KERNEL);
2382                 napi_enable(&rxo->rx_eq.napi);
2383         }
2384         return 0;
2385 }
2386
2387 static int be_open(struct net_device *netdev)
2388 {
2389         struct be_adapter *adapter = netdev_priv(netdev);
2390         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2391         struct be_rx_obj *rxo;
2392         bool link_up;
2393         int status, i;
2394         u8 mac_speed;
2395         u16 link_speed;
2396
2397         status = be_rx_queues_setup(adapter);
2398         if (status)
2399                 goto err;
2400
2401         napi_enable(&tx_eq->napi);
2402
2403         be_irq_register(adapter);
2404
2405         if (!lancer_chip(adapter))
2406                 be_intr_set(adapter, true);
2407
2408         /* The evt queues are created in unarmed state; arm them */
2409         for_all_rx_queues(adapter, rxo, i) {
2410                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2411                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2412         }
2413         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2414
2415         /* Now that interrupts are on we can process async mcc */
2416         be_async_mcc_enable(adapter);
2417
2418         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2419                         &link_speed, 0);
2420         if (status)
2421                 goto err;
2422         be_link_status_update(adapter, link_up);
2423
2424         if (be_physfn(adapter)) {
2425                 status = be_vid_config(adapter, false, 0);
2426                 if (status)
2427                         goto err;
2428
2429                 status = be_cmd_set_flow_control(adapter,
2430                                 adapter->tx_fc, adapter->rx_fc);
2431                 if (status)
2432                         goto err;
2433         }
2434
2435         return 0;
2436 err:
2437         be_close(adapter->netdev);
2438         return -EIO;
2439 }
2440
2441 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2442 {
2443         struct be_dma_mem cmd;
2444         int status = 0;
2445         u8 mac[ETH_ALEN];
2446
2447         memset(mac, 0, ETH_ALEN);
2448
2449         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2450         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2451                                     GFP_KERNEL);
2452         if (cmd.va == NULL)
2453                 return -1;
2454         memset(cmd.va, 0, cmd.size);
2455
2456         if (enable) {
2457                 status = pci_write_config_dword(adapter->pdev,
2458                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2459                 if (status) {
2460                         dev_err(&adapter->pdev->dev,
2461                                 "Could not enable Wake-on-lan\n");
2462                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2463                                           cmd.dma);
2464                         return status;
2465                 }
2466                 status = be_cmd_enable_magic_wol(adapter,
2467                                 adapter->netdev->dev_addr, &cmd);
2468                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2469                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2470         } else {
2471                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2472                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2473                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2474         }
2475
2476         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2477         return status;
2478 }
2479
2480 /*
2481  * Generate a seed MAC address from the PF MAC Address using jhash.
2482  * MAC Address for VFs are assigned incrementally starting from the seed.
2483  * These addresses are programmed in the ASIC by the PF and the VF driver
2484  * queries for the MAC address during its probe.
2485  */
2486 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2487 {
2488         u32 vf = 0;
2489         int status = 0;
2490         u8 mac[ETH_ALEN];
2491
2492         be_vf_eth_addr_generate(adapter, mac);
2493
2494         for (vf = 0; vf < num_vfs; vf++) {
2495                 status = be_cmd_pmac_add(adapter, mac,
2496                                         adapter->vf_cfg[vf].vf_if_handle,
2497                                         &adapter->vf_cfg[vf].vf_pmac_id,
2498                                         vf + 1);
2499                 if (status)
2500                         dev_err(&adapter->pdev->dev,
2501                                 "Mac address add failed for VF %d\n", vf);
2502                 else
2503                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2504
2505                 mac[5] += 1;
2506         }
2507         return status;
2508 }
2509
2510 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2511 {
2512         u32 vf;
2513
2514         for (vf = 0; vf < num_vfs; vf++) {
2515                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2516                         be_cmd_pmac_del(adapter,
2517                                         adapter->vf_cfg[vf].vf_if_handle,
2518                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2519         }
2520 }
2521
2522 static int be_setup(struct be_adapter *adapter)
2523 {
2524         struct net_device *netdev = adapter->netdev;
2525         u32 cap_flags, en_flags, vf = 0;
2526         int status;
2527         u8 mac[ETH_ALEN];
2528
2529         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2530                                 BE_IF_FLAGS_BROADCAST |
2531                                 BE_IF_FLAGS_MULTICAST;
2532
2533         if (be_physfn(adapter)) {
2534                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2535                                 BE_IF_FLAGS_PROMISCUOUS |
2536                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2537                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2538
2539                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2540                         cap_flags |= BE_IF_FLAGS_RSS;
2541                         en_flags |= BE_IF_FLAGS_RSS;
2542                 }
2543         }
2544
2545         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2546                         netdev->dev_addr, false/* pmac_invalid */,
2547                         &adapter->if_handle, &adapter->pmac_id, 0);
2548         if (status != 0)
2549                 goto do_none;
2550
2551         if (be_physfn(adapter)) {
2552                 if (adapter->sriov_enabled) {
2553                         while (vf < num_vfs) {
2554                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2555                                                         BE_IF_FLAGS_BROADCAST;
2556                                 status = be_cmd_if_create(adapter, cap_flags,
2557                                         en_flags, mac, true,
2558                                         &adapter->vf_cfg[vf].vf_if_handle,
2559                                         NULL, vf+1);
2560                                 if (status) {
2561                                         dev_err(&adapter->pdev->dev,
2562                                         "Interface Create failed for VF %d\n",
2563                                         vf);
2564                                         goto if_destroy;
2565                                 }
2566                                 adapter->vf_cfg[vf].vf_pmac_id =
2567                                                         BE_INVALID_PMAC_ID;
2568                                 vf++;
2569                         }
2570                 }
2571         } else {
2572                 status = be_cmd_mac_addr_query(adapter, mac,
2573                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2574                 if (!status) {
2575                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2576                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2577                 }
2578         }
2579
2580         status = be_tx_queues_create(adapter);
2581         if (status != 0)
2582                 goto if_destroy;
2583
2584         status = be_rx_queues_create(adapter);
2585         if (status != 0)
2586                 goto tx_qs_destroy;
2587
2588         /* Allow all priorities by default. A GRP5 evt may modify this */
2589         adapter->vlan_prio_bmap = 0xff;
2590
2591         status = be_mcc_queues_create(adapter);
2592         if (status != 0)
2593                 goto rx_qs_destroy;
2594
2595         adapter->link_speed = -1;
2596
2597         return 0;
2598
2599 rx_qs_destroy:
2600         be_rx_queues_destroy(adapter);
2601 tx_qs_destroy:
2602         be_tx_queues_destroy(adapter);
2603 if_destroy:
2604         if (be_physfn(adapter) && adapter->sriov_enabled)
2605                 for (vf = 0; vf < num_vfs; vf++)
2606                         if (adapter->vf_cfg[vf].vf_if_handle)
2607                                 be_cmd_if_destroy(adapter,
2608                                         adapter->vf_cfg[vf].vf_if_handle,
2609                                         vf + 1);
2610         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2611 do_none:
2612         return status;
2613 }
2614
2615 static int be_clear(struct be_adapter *adapter)
2616 {
2617         int vf;
2618
2619         if (be_physfn(adapter) && adapter->sriov_enabled)
2620                 be_vf_eth_addr_rem(adapter);
2621
2622         be_mcc_queues_destroy(adapter);
2623         be_rx_queues_destroy(adapter);
2624         be_tx_queues_destroy(adapter);
2625         adapter->eq_next_idx = 0;
2626
2627         if (be_physfn(adapter) && adapter->sriov_enabled)
2628                 for (vf = 0; vf < num_vfs; vf++)
2629                         if (adapter->vf_cfg[vf].vf_if_handle)
2630                                 be_cmd_if_destroy(adapter,
2631                                         adapter->vf_cfg[vf].vf_if_handle,
2632                                         vf + 1);
2633
2634         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2635
2636         /* tell fw we're done with firing cmds */
2637         be_cmd_fw_clean(adapter);
2638         return 0;
2639 }
2640
2641
2642 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2643 static bool be_flash_redboot(struct be_adapter *adapter,
2644                         const u8 *p, u32 img_start, int image_size,
2645                         int hdr_size)
2646 {
2647         u32 crc_offset;
2648         u8 flashed_crc[4];
2649         int status;
2650
2651         crc_offset = hdr_size + img_start + image_size - 4;
2652
2653         p += crc_offset;
2654
2655         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2656                         (image_size - 4));
2657         if (status) {
2658                 dev_err(&adapter->pdev->dev,
2659                 "could not get crc from flash, not flashing redboot\n");
2660                 return false;
2661         }
2662
2663         /*update redboot only if crc does not match*/
2664         if (!memcmp(flashed_crc, p, 4))
2665                 return false;
2666         else
2667                 return true;
2668 }
2669
2670 static int be_flash_data(struct be_adapter *adapter,
2671                         const struct firmware *fw,
2672                         struct be_dma_mem *flash_cmd, int num_of_images)
2673
2674 {
2675         int status = 0, i, filehdr_size = 0;
2676         u32 total_bytes = 0, flash_op;
2677         int num_bytes;
2678         const u8 *p = fw->data;
2679         struct be_cmd_write_flashrom *req = flash_cmd->va;
2680         const struct flash_comp *pflashcomp;
2681         int num_comp;
2682
2683         static const struct flash_comp gen3_flash_types[9] = {
2684                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2685                         FLASH_IMAGE_MAX_SIZE_g3},
2686                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2687                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2688                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2689                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2690                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2691                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2692                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2693                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2694                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2695                         FLASH_IMAGE_MAX_SIZE_g3},
2696                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2697                         FLASH_IMAGE_MAX_SIZE_g3},
2698                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2699                         FLASH_IMAGE_MAX_SIZE_g3},
2700                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2701                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2702         };
2703         static const struct flash_comp gen2_flash_types[8] = {
2704                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2705                         FLASH_IMAGE_MAX_SIZE_g2},
2706                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2707                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2708                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2709                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2710                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2711                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2712                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2713                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2714                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2715                         FLASH_IMAGE_MAX_SIZE_g2},
2716                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2717                         FLASH_IMAGE_MAX_SIZE_g2},
2718                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2719                          FLASH_IMAGE_MAX_SIZE_g2}
2720         };
2721
2722         if (adapter->generation == BE_GEN3) {
2723                 pflashcomp = gen3_flash_types;
2724                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2725                 num_comp = ARRAY_SIZE(gen3_flash_types);
2726         } else {
2727                 pflashcomp = gen2_flash_types;
2728                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2729                 num_comp = ARRAY_SIZE(gen2_flash_types);
2730         }
2731         for (i = 0; i < num_comp; i++) {
2732                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2733                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2734                         continue;
2735                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2736                         (!be_flash_redboot(adapter, fw->data,
2737                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2738                         (num_of_images * sizeof(struct image_hdr)))))
2739                         continue;
2740                 p = fw->data;
2741                 p += filehdr_size + pflashcomp[i].offset
2742                         + (num_of_images * sizeof(struct image_hdr));
2743         if (p + pflashcomp[i].size > fw->data + fw->size)
2744                 return -1;
2745         total_bytes = pflashcomp[i].size;
2746                 while (total_bytes) {
2747                         if (total_bytes > 32*1024)
2748                                 num_bytes = 32*1024;
2749                         else
2750                                 num_bytes = total_bytes;
2751                         total_bytes -= num_bytes;
2752
2753                         if (!total_bytes)
2754                                 flash_op = FLASHROM_OPER_FLASH;
2755                         else
2756                                 flash_op = FLASHROM_OPER_SAVE;
2757                         memcpy(req->params.data_buf, p, num_bytes);
2758                         p += num_bytes;
2759                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2760                                 pflashcomp[i].optype, flash_op, num_bytes);
2761                         if (status) {
2762                                 dev_err(&adapter->pdev->dev,
2763                                         "cmd to write to flash rom failed.\n");
2764                                 return -1;
2765                         }
2766                 }
2767         }
2768         return 0;
2769 }
2770
2771 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2772 {
2773         if (fhdr == NULL)
2774                 return 0;
2775         if (fhdr->build[0] == '3')
2776                 return BE_GEN3;
2777         else if (fhdr->build[0] == '2')
2778                 return BE_GEN2;
2779         else
2780                 return 0;
2781 }
2782
2783 static int lancer_fw_download(struct be_adapter *adapter,
2784                                 const struct firmware *fw)
2785 {
2786 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2787 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2788         struct be_dma_mem flash_cmd;
2789         const u8 *data_ptr = NULL;
2790         u8 *dest_image_ptr = NULL;
2791         size_t image_size = 0;
2792         u32 chunk_size = 0;
2793         u32 data_written = 0;
2794         u32 offset = 0;
2795         int status = 0;
2796         u8 add_status = 0;
2797
2798         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2799                 dev_err(&adapter->pdev->dev,
2800                         "FW Image not properly aligned. "
2801                         "Length must be 4 byte aligned.\n");
2802                 status = -EINVAL;
2803                 goto lancer_fw_exit;
2804         }
2805
2806         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2807                                 + LANCER_FW_DOWNLOAD_CHUNK;
2808         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2809                                                 &flash_cmd.dma, GFP_KERNEL);
2810         if (!flash_cmd.va) {
2811                 status = -ENOMEM;
2812                 dev_err(&adapter->pdev->dev,
2813                         "Memory allocation failure while flashing\n");
2814                 goto lancer_fw_exit;
2815         }
2816
2817         dest_image_ptr = flash_cmd.va +
2818                                 sizeof(struct lancer_cmd_req_write_object);
2819         image_size = fw->size;
2820         data_ptr = fw->data;
2821
2822         while (image_size) {
2823                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2824
2825                 /* Copy the image chunk content. */
2826                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2827
2828                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2829                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2830                                 &data_written, &add_status);
2831
2832                 if (status)
2833                         break;
2834
2835                 offset += data_written;
2836                 data_ptr += data_written;
2837                 image_size -= data_written;
2838         }
2839
2840         if (!status) {
2841                 /* Commit the FW written */
2842                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2843                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2844                                         &data_written, &add_status);
2845         }
2846
2847         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2848                                 flash_cmd.dma);
2849         if (status) {
2850                 dev_err(&adapter->pdev->dev,
2851                         "Firmware load error. "
2852                         "Status code: 0x%x Additional Status: 0x%x\n",
2853                         status, add_status);
2854                 goto lancer_fw_exit;
2855         }
2856
2857         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2858 lancer_fw_exit:
2859         return status;
2860 }
2861
2862 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2863 {
2864         struct flash_file_hdr_g2 *fhdr;
2865         struct flash_file_hdr_g3 *fhdr3;
2866         struct image_hdr *img_hdr_ptr = NULL;
2867         struct be_dma_mem flash_cmd;
2868         const u8 *p;
2869         int status = 0, i = 0, num_imgs = 0;
2870
2871         p = fw->data;
2872         fhdr = (struct flash_file_hdr_g2 *) p;
2873
2874         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2875         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2876                                           &flash_cmd.dma, GFP_KERNEL);
2877         if (!flash_cmd.va) {
2878                 status = -ENOMEM;
2879                 dev_err(&adapter->pdev->dev,
2880                         "Memory allocation failure while flashing\n");
2881                 goto be_fw_exit;
2882         }
2883
2884         if ((adapter->generation == BE_GEN3) &&
2885                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2886                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2887                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2888                 for (i = 0; i < num_imgs; i++) {
2889                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2890                                         (sizeof(struct flash_file_hdr_g3) +
2891                                          i * sizeof(struct image_hdr)));
2892                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2893                                 status = be_flash_data(adapter, fw, &flash_cmd,
2894                                                         num_imgs);
2895                 }
2896         } else if ((adapter->generation == BE_GEN2) &&
2897                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2898                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2899         } else {
2900                 dev_err(&adapter->pdev->dev,
2901                         "UFI and Interface are not compatible for flashing\n");
2902                 status = -1;
2903         }
2904
2905         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2906                           flash_cmd.dma);
2907         if (status) {
2908                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2909                 goto be_fw_exit;
2910         }
2911
2912         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2913
2914 be_fw_exit:
2915         return status;
2916 }
2917
2918 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2919 {
2920         const struct firmware *fw;
2921         int status;
2922
2923         if (!netif_running(adapter->netdev)) {
2924                 dev_err(&adapter->pdev->dev,
2925                         "Firmware load not allowed (interface is down)\n");
2926                 return -1;
2927         }
2928
2929         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2930         if (status)
2931                 goto fw_exit;
2932
2933         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2934
2935         if (lancer_chip(adapter))
2936                 status = lancer_fw_download(adapter, fw);
2937         else
2938                 status = be_fw_download(adapter, fw);
2939
2940 fw_exit:
2941         release_firmware(fw);
2942         return status;
2943 }
2944
2945 static struct net_device_ops be_netdev_ops = {
2946         .ndo_open               = be_open,
2947         .ndo_stop               = be_close,
2948         .ndo_start_xmit         = be_xmit,
2949         .ndo_set_rx_mode        = be_set_multicast_list,
2950         .ndo_set_mac_address    = be_mac_addr_set,
2951         .ndo_change_mtu         = be_change_mtu,
2952         .ndo_validate_addr      = eth_validate_addr,
2953         .ndo_vlan_rx_register   = be_vlan_register,
2954         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2955         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2956         .ndo_set_vf_mac         = be_set_vf_mac,
2957         .ndo_set_vf_vlan        = be_set_vf_vlan,
2958         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2959         .ndo_get_vf_config      = be_get_vf_config
2960 };
2961
2962 static void be_netdev_init(struct net_device *netdev)
2963 {
2964         struct be_adapter *adapter = netdev_priv(netdev);
2965         struct be_rx_obj *rxo;
2966         int i;
2967
2968         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2969                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2970                 NETIF_F_HW_VLAN_TX;
2971         if (be_multi_rxq(adapter))
2972                 netdev->hw_features |= NETIF_F_RXHASH;
2973
2974         netdev->features |= netdev->hw_features |
2975                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2976
2977         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2978                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2979
2980         netdev->flags |= IFF_MULTICAST;
2981
2982         /* Default settings for Rx and Tx flow control */
2983         adapter->rx_fc = true;
2984         adapter->tx_fc = true;
2985
2986         netif_set_gso_max_size(netdev, 65535);
2987
2988         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2989
2990         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2991
2992         for_all_rx_queues(adapter, rxo, i)
2993                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2994                                 BE_NAPI_WEIGHT);
2995
2996         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2997                 BE_NAPI_WEIGHT);
2998 }
2999
3000 static void be_unmap_pci_bars(struct be_adapter *adapter)
3001 {
3002         if (adapter->csr)
3003                 iounmap(adapter->csr);
3004         if (adapter->db)
3005                 iounmap(adapter->db);
3006         if (adapter->pcicfg && be_physfn(adapter))
3007                 iounmap(adapter->pcicfg);
3008 }
3009
3010 static int be_map_pci_bars(struct be_adapter *adapter)
3011 {
3012         u8 __iomem *addr;
3013         int pcicfg_reg, db_reg;
3014
3015         if (lancer_chip(adapter)) {
3016                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3017                         pci_resource_len(adapter->pdev, 0));
3018                 if (addr == NULL)
3019                         return -ENOMEM;
3020                 adapter->db = addr;
3021                 return 0;
3022         }
3023
3024         if (be_physfn(adapter)) {
3025                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3026                                 pci_resource_len(adapter->pdev, 2));
3027                 if (addr == NULL)
3028                         return -ENOMEM;
3029                 adapter->csr = addr;
3030         }
3031
3032         if (adapter->generation == BE_GEN2) {
3033                 pcicfg_reg = 1;
3034                 db_reg = 4;
3035         } else {
3036                 pcicfg_reg = 0;
3037                 if (be_physfn(adapter))
3038                         db_reg = 4;
3039                 else
3040                         db_reg = 0;
3041         }
3042         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3043                                 pci_resource_len(adapter->pdev, db_reg));
3044         if (addr == NULL)
3045                 goto pci_map_err;
3046         adapter->db = addr;
3047
3048         if (be_physfn(adapter)) {
3049                 addr = ioremap_nocache(
3050                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3051                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3052                 if (addr == NULL)
3053                         goto pci_map_err;
3054                 adapter->pcicfg = addr;
3055         } else
3056                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3057
3058         return 0;
3059 pci_map_err:
3060         be_unmap_pci_bars(adapter);
3061         return -ENOMEM;
3062 }
3063
3064
3065 static void be_ctrl_cleanup(struct be_adapter *adapter)
3066 {
3067         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3068
3069         be_unmap_pci_bars(adapter);
3070
3071         if (mem->va)
3072                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3073                                   mem->dma);
3074
3075         mem = &adapter->mc_cmd_mem;
3076         if (mem->va)
3077                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3078                                   mem->dma);
3079 }
3080
3081 static int be_ctrl_init(struct be_adapter *adapter)
3082 {
3083         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3084         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3085         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3086         int status;
3087
3088         status = be_map_pci_bars(adapter);
3089         if (status)
3090                 goto done;
3091
3092         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3093         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3094                                                 mbox_mem_alloc->size,
3095                                                 &mbox_mem_alloc->dma,
3096                                                 GFP_KERNEL);
3097         if (!mbox_mem_alloc->va) {
3098                 status = -ENOMEM;
3099                 goto unmap_pci_bars;
3100         }
3101
3102         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3103         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3104         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3105         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3106
3107         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3108         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3109                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
3110                                             GFP_KERNEL);
3111         if (mc_cmd_mem->va == NULL) {
3112                 status = -ENOMEM;
3113                 goto free_mbox;
3114         }
3115         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3116
3117         mutex_init(&adapter->mbox_lock);
3118         spin_lock_init(&adapter->mcc_lock);
3119         spin_lock_init(&adapter->mcc_cq_lock);
3120
3121         init_completion(&adapter->flash_compl);
3122         pci_save_state(adapter->pdev);
3123         return 0;
3124
3125 free_mbox:
3126         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3127                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3128
3129 unmap_pci_bars:
3130         be_unmap_pci_bars(adapter);
3131
3132 done:
3133         return status;
3134 }
3135
3136 static void be_stats_cleanup(struct be_adapter *adapter)
3137 {
3138         struct be_dma_mem *cmd = &adapter->stats_cmd;
3139
3140         if (cmd->va)
3141                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3142                                   cmd->va, cmd->dma);
3143 }
3144
3145 static int be_stats_init(struct be_adapter *adapter)
3146 {
3147         struct be_dma_mem *cmd = &adapter->stats_cmd;
3148
3149         if (adapter->generation == BE_GEN2) {
3150                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3151         } else {
3152                 if (lancer_chip(adapter))
3153                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3154                 else
3155                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3156         }
3157         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3158                                      GFP_KERNEL);
3159         if (cmd->va == NULL)
3160                 return -1;
3161         memset(cmd->va, 0, cmd->size);
3162         return 0;
3163 }
3164
3165 static void __devexit be_remove(struct pci_dev *pdev)
3166 {
3167         struct be_adapter *adapter = pci_get_drvdata(pdev);
3168
3169         if (!adapter)
3170                 return;
3171
3172         cancel_delayed_work_sync(&adapter->work);
3173
3174         unregister_netdev(adapter->netdev);
3175
3176         be_clear(adapter);
3177
3178         be_stats_cleanup(adapter);
3179
3180         be_ctrl_cleanup(adapter);
3181
3182         kfree(adapter->vf_cfg);
3183         be_sriov_disable(adapter);
3184
3185         be_msix_disable(adapter);
3186
3187         pci_set_drvdata(pdev, NULL);
3188         pci_release_regions(pdev);
3189         pci_disable_device(pdev);
3190
3191         free_netdev(adapter->netdev);
3192 }
3193
3194 static int be_get_config(struct be_adapter *adapter)
3195 {
3196         int status;
3197         u8 mac[ETH_ALEN];
3198
3199         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3200         if (status)
3201                 return status;
3202
3203         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3204                         &adapter->function_mode, &adapter->function_caps);
3205         if (status)
3206                 return status;
3207
3208         memset(mac, 0, ETH_ALEN);
3209
3210         /* A default permanent address is given to each VF for Lancer*/
3211         if (be_physfn(adapter) || lancer_chip(adapter)) {
3212                 status = be_cmd_mac_addr_query(adapter, mac,
3213                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3214
3215                 if (status)
3216                         return status;
3217
3218                 if (!is_valid_ether_addr(mac))
3219                         return -EADDRNOTAVAIL;
3220
3221                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3222                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3223         }
3224
3225         if (adapter->function_mode & 0x400)
3226                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3227         else
3228                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3229
3230         status = be_cmd_get_cntl_attributes(adapter);
3231         if (status)
3232                 return status;
3233
3234         be_cmd_check_native_mode(adapter);
3235
3236         if ((num_vfs && adapter->sriov_enabled) ||
3237                 (adapter->function_mode & 0x400) ||
3238                 lancer_chip(adapter) || !be_physfn(adapter)) {
3239                 adapter->num_tx_qs = 1;
3240                 netif_set_real_num_tx_queues(adapter->netdev,
3241                         adapter->num_tx_qs);
3242         } else {
3243                 adapter->num_tx_qs = MAX_TX_QS;
3244         }
3245
3246         return 0;
3247 }
3248
3249 static int be_dev_family_check(struct be_adapter *adapter)
3250 {
3251         struct pci_dev *pdev = adapter->pdev;
3252         u32 sli_intf = 0, if_type;
3253
3254         switch (pdev->device) {
3255         case BE_DEVICE_ID1:
3256         case OC_DEVICE_ID1:
3257                 adapter->generation = BE_GEN2;
3258                 break;
3259         case BE_DEVICE_ID2:
3260         case OC_DEVICE_ID2:
3261                 adapter->generation = BE_GEN3;
3262                 break;
3263         case OC_DEVICE_ID3:
3264         case OC_DEVICE_ID4:
3265                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3266                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3267                                                 SLI_INTF_IF_TYPE_SHIFT;
3268
3269                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3270                         if_type != 0x02) {
3271                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3272                         return -EINVAL;
3273                 }
3274                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3275                                          SLI_INTF_FAMILY_SHIFT);
3276                 adapter->generation = BE_GEN3;
3277                 break;
3278         default:
3279                 adapter->generation = 0;
3280         }
3281         return 0;
3282 }
3283
3284 static int lancer_wait_ready(struct be_adapter *adapter)
3285 {
3286 #define SLIPORT_READY_TIMEOUT 500
3287         u32 sliport_status;
3288         int status = 0, i;
3289
3290         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3291                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3292                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3293                         break;
3294
3295                 msleep(20);
3296         }
3297
3298         if (i == SLIPORT_READY_TIMEOUT)
3299                 status = -1;
3300
3301         return status;
3302 }
3303
3304 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3305 {
3306         int status;
3307         u32 sliport_status, err, reset_needed;
3308         status = lancer_wait_ready(adapter);
3309         if (!status) {
3310                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3311                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3312                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3313                 if (err && reset_needed) {
3314                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3315                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3316
3317                         /* check adapter has corrected the error */
3318                         status = lancer_wait_ready(adapter);
3319                         sliport_status = ioread32(adapter->db +
3320                                                         SLIPORT_STATUS_OFFSET);
3321                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3322                                                 SLIPORT_STATUS_RN_MASK);
3323                         if (status || sliport_status)
3324                                 status = -1;
3325                 } else if (err || reset_needed) {
3326                         status = -1;
3327                 }
3328         }
3329         return status;
3330 }
3331
3332 static int __devinit be_probe(struct pci_dev *pdev,
3333                         const struct pci_device_id *pdev_id)
3334 {
3335         int status = 0;
3336         struct be_adapter *adapter;
3337         struct net_device *netdev;
3338
3339         status = pci_enable_device(pdev);
3340         if (status)
3341                 goto do_none;
3342
3343         status = pci_request_regions(pdev, DRV_NAME);
3344         if (status)
3345                 goto disable_dev;
3346         pci_set_master(pdev);
3347
3348         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3349         if (netdev == NULL) {
3350                 status = -ENOMEM;
3351                 goto rel_reg;
3352         }
3353         adapter = netdev_priv(netdev);
3354         adapter->pdev = pdev;
3355         pci_set_drvdata(pdev, adapter);
3356
3357         status = be_dev_family_check(adapter);
3358         if (status)
3359                 goto free_netdev;
3360
3361         adapter->netdev = netdev;
3362         SET_NETDEV_DEV(netdev, &pdev->dev);
3363
3364         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3365         if (!status) {
3366                 netdev->features |= NETIF_F_HIGHDMA;
3367         } else {
3368                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3369                 if (status) {
3370                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3371                         goto free_netdev;
3372                 }
3373         }
3374
3375         be_sriov_enable(adapter);
3376         if (adapter->sriov_enabled) {
3377                 adapter->vf_cfg = kcalloc(num_vfs,
3378                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3379
3380                 if (!adapter->vf_cfg)
3381                         goto free_netdev;
3382         }
3383
3384         status = be_ctrl_init(adapter);
3385         if (status)
3386                 goto free_vf_cfg;
3387
3388         if (lancer_chip(adapter)) {
3389                 status = lancer_test_and_set_rdy_state(adapter);
3390                 if (status) {
3391                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3392                         goto ctrl_clean;
3393                 }
3394         }
3395
3396         /* sync up with fw's ready state */
3397         if (be_physfn(adapter)) {
3398                 status = be_cmd_POST(adapter);
3399                 if (status)
3400                         goto ctrl_clean;
3401         }
3402
3403         /* tell fw we're ready to fire cmds */
3404         status = be_cmd_fw_init(adapter);
3405         if (status)
3406                 goto ctrl_clean;
3407
3408         status = be_cmd_reset_function(adapter);
3409         if (status)
3410                 goto ctrl_clean;
3411
3412         status = be_stats_init(adapter);
3413         if (status)
3414                 goto ctrl_clean;
3415
3416         status = be_get_config(adapter);
3417         if (status)
3418                 goto stats_clean;
3419
3420         /* The INTR bit may be set in the card when probed by a kdump kernel
3421          * after a crash.
3422          */
3423         if (!lancer_chip(adapter))
3424                 be_intr_set(adapter, false);
3425
3426         be_msix_enable(adapter);
3427
3428         INIT_DELAYED_WORK(&adapter->work, be_worker);
3429
3430         status = be_setup(adapter);
3431         if (status)
3432                 goto msix_disable;
3433
3434         be_netdev_init(netdev);
3435         status = register_netdev(netdev);
3436         if (status != 0)
3437                 goto unsetup;
3438         netif_carrier_off(netdev);
3439
3440         if (be_physfn(adapter) && adapter->sriov_enabled) {
3441                 u8 mac_speed;
3442                 bool link_up;
3443                 u16 vf, lnk_speed;
3444
3445                 if (!lancer_chip(adapter)) {
3446                         status = be_vf_eth_addr_config(adapter);
3447                         if (status)
3448                                 goto unreg_netdev;
3449                 }
3450
3451                 for (vf = 0; vf < num_vfs; vf++) {
3452                         status = be_cmd_link_status_query(adapter, &link_up,
3453                                         &mac_speed, &lnk_speed, vf + 1);
3454                         if (!status)
3455                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3456                         else
3457                                 goto unreg_netdev;
3458                 }
3459         }
3460
3461         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3462
3463         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3464         return 0;
3465
3466 unreg_netdev:
3467         unregister_netdev(netdev);
3468 unsetup:
3469         be_clear(adapter);
3470 msix_disable:
3471         be_msix_disable(adapter);
3472 stats_clean:
3473         be_stats_cleanup(adapter);
3474 ctrl_clean:
3475         be_ctrl_cleanup(adapter);
3476 free_vf_cfg:
3477         kfree(adapter->vf_cfg);
3478 free_netdev:
3479         be_sriov_disable(adapter);
3480         free_netdev(netdev);
3481         pci_set_drvdata(pdev, NULL);
3482 rel_reg:
3483         pci_release_regions(pdev);
3484 disable_dev:
3485         pci_disable_device(pdev);
3486 do_none:
3487         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3488         return status;
3489 }
3490
3491 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3492 {
3493         struct be_adapter *adapter = pci_get_drvdata(pdev);
3494         struct net_device *netdev =  adapter->netdev;
3495
3496         cancel_delayed_work_sync(&adapter->work);
3497         if (adapter->wol)
3498                 be_setup_wol(adapter, true);
3499
3500         netif_device_detach(netdev);
3501         if (netif_running(netdev)) {
3502                 rtnl_lock();
3503                 be_close(netdev);
3504                 rtnl_unlock();
3505         }
3506         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3507         be_clear(adapter);
3508
3509         be_msix_disable(adapter);
3510         pci_save_state(pdev);
3511         pci_disable_device(pdev);
3512         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3513         return 0;
3514 }
3515
3516 static int be_resume(struct pci_dev *pdev)
3517 {
3518         int status = 0;
3519         struct be_adapter *adapter = pci_get_drvdata(pdev);
3520         struct net_device *netdev =  adapter->netdev;
3521
3522         netif_device_detach(netdev);
3523
3524         status = pci_enable_device(pdev);
3525         if (status)
3526                 return status;
3527
3528         pci_set_power_state(pdev, 0);
3529         pci_restore_state(pdev);
3530
3531         be_msix_enable(adapter);
3532         /* tell fw we're ready to fire cmds */
3533         status = be_cmd_fw_init(adapter);
3534         if (status)
3535                 return status;
3536
3537         be_setup(adapter);
3538         if (netif_running(netdev)) {
3539                 rtnl_lock();
3540                 be_open(netdev);
3541                 rtnl_unlock();
3542         }
3543         netif_device_attach(netdev);
3544
3545         if (adapter->wol)
3546                 be_setup_wol(adapter, false);
3547
3548         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3549         return 0;
3550 }
3551
3552 /*
3553  * An FLR will stop BE from DMAing any data.
3554  */
3555 static void be_shutdown(struct pci_dev *pdev)
3556 {
3557         struct be_adapter *adapter = pci_get_drvdata(pdev);
3558
3559         if (!adapter)
3560                 return;
3561
3562         cancel_delayed_work_sync(&adapter->work);
3563
3564         netif_device_detach(adapter->netdev);
3565
3566         if (adapter->wol)
3567                 be_setup_wol(adapter, true);
3568
3569         be_cmd_reset_function(adapter);
3570
3571         pci_disable_device(pdev);
3572 }
3573
3574 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3575                                 pci_channel_state_t state)
3576 {
3577         struct be_adapter *adapter = pci_get_drvdata(pdev);
3578         struct net_device *netdev =  adapter->netdev;
3579
3580         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3581
3582         adapter->eeh_err = true;
3583
3584         netif_device_detach(netdev);
3585
3586         if (netif_running(netdev)) {
3587                 rtnl_lock();
3588                 be_close(netdev);
3589                 rtnl_unlock();
3590         }
3591         be_clear(adapter);
3592
3593         if (state == pci_channel_io_perm_failure)
3594                 return PCI_ERS_RESULT_DISCONNECT;
3595
3596         pci_disable_device(pdev);
3597
3598         return PCI_ERS_RESULT_NEED_RESET;
3599 }
3600
3601 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3602 {
3603         struct be_adapter *adapter = pci_get_drvdata(pdev);
3604         int status;
3605
3606         dev_info(&adapter->pdev->dev, "EEH reset\n");
3607         adapter->eeh_err = false;
3608
3609         status = pci_enable_device(pdev);
3610         if (status)
3611                 return PCI_ERS_RESULT_DISCONNECT;
3612
3613         pci_set_master(pdev);
3614         pci_set_power_state(pdev, 0);
3615         pci_restore_state(pdev);
3616
3617         /* Check if card is ok and fw is ready */
3618         status = be_cmd_POST(adapter);
3619         if (status)
3620                 return PCI_ERS_RESULT_DISCONNECT;
3621
3622         return PCI_ERS_RESULT_RECOVERED;
3623 }
3624
3625 static void be_eeh_resume(struct pci_dev *pdev)
3626 {
3627         int status = 0;
3628         struct be_adapter *adapter = pci_get_drvdata(pdev);
3629         struct net_device *netdev =  adapter->netdev;
3630
3631         dev_info(&adapter->pdev->dev, "EEH resume\n");
3632
3633         pci_save_state(pdev);
3634
3635         /* tell fw we're ready to fire cmds */
3636         status = be_cmd_fw_init(adapter);
3637         if (status)
3638                 goto err;
3639
3640         status = be_setup(adapter);
3641         if (status)
3642                 goto err;
3643
3644         if (netif_running(netdev)) {
3645                 status = be_open(netdev);
3646                 if (status)
3647                         goto err;
3648         }
3649         netif_device_attach(netdev);
3650         return;
3651 err:
3652         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3653 }
3654
3655 static struct pci_error_handlers be_eeh_handlers = {
3656         .error_detected = be_eeh_err_detected,
3657         .slot_reset = be_eeh_reset,
3658         .resume = be_eeh_resume,
3659 };
3660
3661 static struct pci_driver be_driver = {
3662         .name = DRV_NAME,
3663         .id_table = be_dev_ids,
3664         .probe = be_probe,
3665         .remove = be_remove,
3666         .suspend = be_suspend,
3667         .resume = be_resume,
3668         .shutdown = be_shutdown,
3669         .err_handler = &be_eeh_handlers
3670 };
3671
3672 static int __init be_init_module(void)
3673 {
3674         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3675             rx_frag_size != 2048) {
3676                 printk(KERN_WARNING DRV_NAME
3677                         " : Module param rx_frag_size must be 2048/4096/8192."
3678                         " Using 2048\n");
3679                 rx_frag_size = 2048;
3680         }
3681
3682         return pci_register_driver(&be_driver);
3683 }
3684 module_init(be_init_module);
3685
3686 static void __exit be_exit_module(void)
3687 {
3688         pci_unregister_driver(&be_driver);
3689 }
3690 module_exit(be_exit_module);