be2net: Kill set but unused variable 'req' in lancer_fw_download()
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static char *ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static char *ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC"
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
121 {
122         struct be_dma_mem *mem = &q->dma_mem;
123         if (mem->va)
124                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
125                                   mem->dma);
126 }
127
128 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
129                 u16 len, u16 entry_size)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132
133         memset(q, 0, sizeof(*q));
134         q->len = len;
135         q->entry_size = entry_size;
136         mem->size = len * entry_size;
137         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
138                                      GFP_KERNEL);
139         if (!mem->va)
140                 return -1;
141         memset(mem->va, 0, mem->size);
142         return 0;
143 }
144
145 static void be_intr_set(struct be_adapter *adapter, bool enable)
146 {
147         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
148         u32 reg = ioread32(addr);
149         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
150
151         if (adapter->eeh_err)
152                 return;
153
154         if (!enabled && enable)
155                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
156         else if (enabled && !enable)
157                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158         else
159                 return;
160
161         iowrite32(reg, addr);
162 }
163
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
165 {
166         u32 val = 0;
167         val |= qid & DB_RQ_RING_ID_MASK;
168         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
169
170         wmb();
171         iowrite32(val, adapter->db + DB_RQ_OFFSET);
172 }
173
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176         u32 val = 0;
177         val |= qid & DB_TXULP_RING_ID_MASK;
178         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
179
180         wmb();
181         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
182 }
183
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185                 bool arm, bool clear_int, u16 num_popped)
186 {
187         u32 val = 0;
188         val |= qid & DB_EQ_RING_ID_MASK;
189         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
210
211         if (adapter->eeh_err)
212                 return;
213
214         if (arm)
215                 val |= 1 << DB_CQ_REARM_SHIFT;
216         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217         iowrite32(val, adapter->db + DB_CQ_OFFSET);
218 }
219
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
221 {
222         struct be_adapter *adapter = netdev_priv(netdev);
223         struct sockaddr *addr = p;
224         int status = 0;
225
226         if (!is_valid_ether_addr(addr->sa_data))
227                 return -EADDRNOTAVAIL;
228
229         /* MAC addr configuration will be done in hardware for VFs
230          * by their corresponding PFs. Just copy to netdev addr here
231          */
232         if (!be_physfn(adapter))
233                 goto netdev_addr;
234
235         status = be_cmd_pmac_del(adapter, adapter->if_handle,
236                                 adapter->pmac_id, 0);
237         if (status)
238                 return status;
239
240         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241                                 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243         if (!status)
244                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246         return status;
247 }
248
249 static void populate_be2_stats(struct be_adapter *adapter)
250 {
251
252         struct be_drv_stats *drvs = &adapter->drv_stats;
253         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
254         struct be_port_rxf_stats_v0 *port_stats =
255                 be_port_rxf_stats_from_cmd(adapter);
256         struct be_rxf_stats_v0 *rxf_stats =
257                 be_rxf_stats_from_cmd(adapter);
258
259         drvs->rx_pause_frames = port_stats->rx_pause_frames;
260         drvs->rx_crc_errors = port_stats->rx_crc_errors;
261         drvs->rx_control_frames = port_stats->rx_control_frames;
262         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273         drvs->rx_input_fifo_overflow_drop =
274                 port_stats->rx_input_fifo_overflow;
275         drvs->rx_dropped_header_too_small =
276                 port_stats->rx_dropped_header_too_small;
277         drvs->rx_address_match_errors =
278                 port_stats->rx_address_match_errors;
279         drvs->rx_alignment_symbol_errors =
280                 port_stats->rx_alignment_symbol_errors;
281
282         drvs->tx_pauseframes = port_stats->tx_pauseframes;
283         drvs->tx_controlframes = port_stats->tx_controlframes;
284
285         if (adapter->port_num)
286                 drvs->jabber_events =
287                         rxf_stats->port1_jabber_events;
288         else
289                 drvs->jabber_events =
290                         rxf_stats->port0_jabber_events;
291         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
292         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
293         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
294         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
295         drvs->forwarded_packets = rxf_stats->forwarded_packets;
296         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
297         drvs->rx_drops_no_tpre_descr =
298                 rxf_stats->rx_drops_no_tpre_descr;
299         drvs->rx_drops_too_many_frags =
300                 rxf_stats->rx_drops_too_many_frags;
301         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
302 }
303
304 static void populate_be3_stats(struct be_adapter *adapter)
305 {
306         struct be_drv_stats *drvs = &adapter->drv_stats;
307         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
308
309         struct be_rxf_stats_v1 *rxf_stats =
310                 be_rxf_stats_from_cmd(adapter);
311         struct be_port_rxf_stats_v1 *port_stats =
312                 be_port_rxf_stats_from_cmd(adapter);
313
314         drvs->rx_priority_pause_frames = 0;
315         drvs->pmem_fifo_overflow_drop = 0;
316         drvs->rx_pause_frames = port_stats->rx_pause_frames;
317         drvs->rx_crc_errors = port_stats->rx_crc_errors;
318         drvs->rx_control_frames = port_stats->rx_control_frames;
319         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
320         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
321         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
322         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
323         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
324         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
325         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
326         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
327         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
328         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
329         drvs->rx_dropped_header_too_small =
330                 port_stats->rx_dropped_header_too_small;
331         drvs->rx_input_fifo_overflow_drop =
332                 port_stats->rx_input_fifo_overflow_drop;
333         drvs->rx_address_match_errors =
334                 port_stats->rx_address_match_errors;
335         drvs->rx_alignment_symbol_errors =
336                 port_stats->rx_alignment_symbol_errors;
337         drvs->rxpp_fifo_overflow_drop =
338                 port_stats->rxpp_fifo_overflow_drop;
339         drvs->tx_pauseframes = port_stats->tx_pauseframes;
340         drvs->tx_controlframes = port_stats->tx_controlframes;
341         drvs->jabber_events = port_stats->jabber_events;
342         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346         drvs->forwarded_packets = rxf_stats->forwarded_packets;
347         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348         drvs->rx_drops_no_tpre_descr =
349                 rxf_stats->rx_drops_no_tpre_descr;
350         drvs->rx_drops_too_many_frags =
351                 rxf_stats->rx_drops_too_many_frags;
352         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
353 }
354
355 static void populate_lancer_stats(struct be_adapter *adapter)
356 {
357
358         struct be_drv_stats *drvs = &adapter->drv_stats;
359         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
360                                                 (adapter);
361         drvs->rx_priority_pause_frames = 0;
362         drvs->pmem_fifo_overflow_drop = 0;
363         drvs->rx_pause_frames =
364                 make_64bit_val(pport_stats->rx_pause_frames_lo,
365                                  pport_stats->rx_pause_frames_hi);
366         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
367                                                 pport_stats->rx_crc_errors_lo);
368         drvs->rx_control_frames =
369                         make_64bit_val(pport_stats->rx_control_frames_hi,
370                         pport_stats->rx_control_frames_lo);
371         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
372         drvs->rx_frame_too_long =
373                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
374                                         pport_stats->rx_frames_too_long_lo);
375         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
376         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
377         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
378         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
379         drvs->rx_dropped_tcp_length =
380                                 pport_stats->rx_dropped_invalid_tcp_length;
381         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
382         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
383         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
384         drvs->rx_dropped_header_too_small =
385                                 pport_stats->rx_dropped_header_too_small;
386         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
387         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
388         drvs->rx_alignment_symbol_errors =
389                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
390                                 pport_stats->rx_symbol_errors_lo);
391         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
392         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
393                                         pport_stats->tx_pause_frames_lo);
394         drvs->tx_controlframes =
395                 make_64bit_val(pport_stats->tx_control_frames_hi,
396                                 pport_stats->tx_control_frames_lo);
397         drvs->jabber_events = pport_stats->rx_jabbers;
398         drvs->rx_drops_no_pbuf = 0;
399         drvs->rx_drops_no_txpb = 0;
400         drvs->rx_drops_no_erx_descr = 0;
401         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
402         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
403                                                 pport_stats->num_forwards_lo);
404         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
405                                                 pport_stats->rx_drops_mtu_lo);
406         drvs->rx_drops_no_tpre_descr = 0;
407         drvs->rx_drops_too_many_frags =
408                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
409                                 pport_stats->rx_drops_too_many_frags_lo);
410 }
411
412 void be_parse_stats(struct be_adapter *adapter)
413 {
414         if (adapter->generation == BE_GEN3) {
415                 if (lancer_chip(adapter))
416                         populate_lancer_stats(adapter);
417                  else
418                         populate_be3_stats(adapter);
419         } else {
420                 populate_be2_stats(adapter);
421         }
422 }
423
424 void netdev_stats_update(struct be_adapter *adapter)
425 {
426         struct be_drv_stats *drvs = &adapter->drv_stats;
427         struct net_device_stats *dev_stats = &adapter->netdev->stats;
428         struct be_rx_obj *rxo;
429         int i;
430
431         memset(dev_stats, 0, sizeof(*dev_stats));
432         for_all_rx_queues(adapter, rxo, i) {
433                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
434                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
435                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
436                 /*  no space in linux buffers: best possible approximation */
437                 if (adapter->generation == BE_GEN3) {
438                         if (!(lancer_chip(adapter))) {
439                                 struct be_erx_stats_v1 *erx_stats =
440                                         be_erx_stats_from_cmd(adapter);
441                                 dev_stats->rx_dropped +=
442                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
443                         }
444                 } else {
445                         struct be_erx_stats_v0 *erx_stats =
446                                         be_erx_stats_from_cmd(adapter);
447                         dev_stats->rx_dropped +=
448                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
449                 }
450         }
451
452         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
453         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
454
455         /* bad pkts received */
456         dev_stats->rx_errors = drvs->rx_crc_errors +
457                 drvs->rx_alignment_symbol_errors +
458                 drvs->rx_in_range_errors +
459                 drvs->rx_out_range_errors +
460                 drvs->rx_frame_too_long +
461                 drvs->rx_dropped_too_small +
462                 drvs->rx_dropped_too_short +
463                 drvs->rx_dropped_header_too_small +
464                 drvs->rx_dropped_tcp_length +
465                 drvs->rx_dropped_runt +
466                 drvs->rx_tcp_checksum_errs +
467                 drvs->rx_ip_checksum_errs +
468                 drvs->rx_udp_checksum_errs;
469
470         /* detailed rx errors */
471         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
472                 drvs->rx_out_range_errors +
473                 drvs->rx_frame_too_long;
474
475         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
476
477         /* frame alignment errors */
478         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
479
480         /* receiver fifo overrun */
481         /* drops_no_pbuf is no per i/f, it's per BE card */
482         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
483                                 drvs->rx_input_fifo_overflow_drop +
484                                 drvs->rx_drops_no_pbuf;
485 }
486
487 void be_link_status_update(struct be_adapter *adapter, bool link_up)
488 {
489         struct net_device *netdev = adapter->netdev;
490
491         /* If link came up or went down */
492         if (adapter->link_up != link_up) {
493                 adapter->link_speed = -1;
494                 if (link_up) {
495                         netif_carrier_on(netdev);
496                         printk(KERN_INFO "%s: Link up\n", netdev->name);
497                 } else {
498                         netif_carrier_off(netdev);
499                         printk(KERN_INFO "%s: Link down\n", netdev->name);
500                 }
501                 adapter->link_up = link_up;
502         }
503 }
504
505 /* Update the EQ delay n BE based on the RX frags consumed / sec */
506 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
507 {
508         struct be_eq_obj *rx_eq = &rxo->rx_eq;
509         struct be_rx_stats *stats = &rxo->stats;
510         ulong now = jiffies;
511         u32 eqd;
512
513         if (!rx_eq->enable_aic)
514                 return;
515
516         /* Wrapped around */
517         if (time_before(now, stats->rx_fps_jiffies)) {
518                 stats->rx_fps_jiffies = now;
519                 return;
520         }
521
522         /* Update once a second */
523         if ((now - stats->rx_fps_jiffies) < HZ)
524                 return;
525
526         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
527                         ((now - stats->rx_fps_jiffies) / HZ);
528
529         stats->rx_fps_jiffies = now;
530         stats->prev_rx_frags = stats->rx_frags;
531         eqd = stats->rx_fps / 110000;
532         eqd = eqd << 3;
533         if (eqd > rx_eq->max_eqd)
534                 eqd = rx_eq->max_eqd;
535         if (eqd < rx_eq->min_eqd)
536                 eqd = rx_eq->min_eqd;
537         if (eqd < 10)
538                 eqd = 0;
539         if (eqd != rx_eq->cur_eqd)
540                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
541
542         rx_eq->cur_eqd = eqd;
543 }
544
545 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
546 {
547         u64 rate = bytes;
548
549         do_div(rate, ticks / HZ);
550         rate <<= 3;                     /* bytes/sec -> bits/sec */
551         do_div(rate, 1000000ul);        /* MB/Sec */
552
553         return rate;
554 }
555
556 static void be_tx_rate_update(struct be_adapter *adapter)
557 {
558         struct be_tx_stats *stats = tx_stats(adapter);
559         ulong now = jiffies;
560
561         /* Wrapped around? */
562         if (time_before(now, stats->be_tx_jiffies)) {
563                 stats->be_tx_jiffies = now;
564                 return;
565         }
566
567         /* Update tx rate once in two seconds */
568         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
569                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
570                                                   - stats->be_tx_bytes_prev,
571                                                  now - stats->be_tx_jiffies);
572                 stats->be_tx_jiffies = now;
573                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
574         }
575 }
576
577 static void be_tx_stats_update(struct be_adapter *adapter,
578                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
579 {
580         struct be_tx_stats *stats = tx_stats(adapter);
581         stats->be_tx_reqs++;
582         stats->be_tx_wrbs += wrb_cnt;
583         stats->be_tx_bytes += copied;
584         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
585         if (stopped)
586                 stats->be_tx_stops++;
587 }
588
589 /* Determine number of WRB entries needed to xmit data in an skb */
590 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
591                                                                 bool *dummy)
592 {
593         int cnt = (skb->len > skb->data_len);
594
595         cnt += skb_shinfo(skb)->nr_frags;
596
597         /* to account for hdr wrb */
598         cnt++;
599         if (lancer_chip(adapter) || !(cnt & 1)) {
600                 *dummy = false;
601         } else {
602                 /* add a dummy to make it an even num */
603                 cnt++;
604                 *dummy = true;
605         }
606         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
607         return cnt;
608 }
609
610 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
611 {
612         wrb->frag_pa_hi = upper_32_bits(addr);
613         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
614         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
615 }
616
617 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
618                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
619 {
620         u8 vlan_prio = 0;
621         u16 vlan_tag = 0;
622
623         memset(hdr, 0, sizeof(*hdr));
624
625         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
626
627         if (skb_is_gso(skb)) {
628                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
629                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
630                         hdr, skb_shinfo(skb)->gso_size);
631                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
632                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
633                 if (lancer_chip(adapter) && adapter->sli_family  ==
634                                                         LANCER_A0_SLI_FAMILY) {
635                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
636                         if (is_tcp_pkt(skb))
637                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
638                                                                 tcpcs, hdr, 1);
639                         else if (is_udp_pkt(skb))
640                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
641                                                                 udpcs, hdr, 1);
642                 }
643         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
644                 if (is_tcp_pkt(skb))
645                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
646                 else if (is_udp_pkt(skb))
647                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
648         }
649
650         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
651                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
652                 vlan_tag = vlan_tx_tag_get(skb);
653                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
654                 /* If vlan priority provided by OS is NOT in available bmap */
655                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
656                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
657                                         adapter->recommended_prio;
658                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
659         }
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
665 }
666
667 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
668                 bool unmap_single)
669 {
670         dma_addr_t dma;
671
672         be_dws_le_to_cpu(wrb, sizeof(*wrb));
673
674         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
675         if (wrb->frag_len) {
676                 if (unmap_single)
677                         dma_unmap_single(dev, dma, wrb->frag_len,
678                                          DMA_TO_DEVICE);
679                 else
680                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
681         }
682 }
683
684 static int make_tx_wrbs(struct be_adapter *adapter,
685                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
686 {
687         dma_addr_t busaddr;
688         int i, copied = 0;
689         struct device *dev = &adapter->pdev->dev;
690         struct sk_buff *first_skb = skb;
691         struct be_queue_info *txq = &adapter->tx_obj.q;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
718                                        frag->size, DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, frag->size);
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += frag->size;
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static netdev_tx_t be_xmit(struct sk_buff *skb,
752                         struct net_device *netdev)
753 {
754         struct be_adapter *adapter = netdev_priv(netdev);
755         struct be_tx_obj *tx_obj = &adapter->tx_obj;
756         struct be_queue_info *txq = &tx_obj->q;
757         u32 wrb_cnt = 0, copied = 0;
758         u32 start = txq->head;
759         bool dummy_wrb, stopped = false;
760
761         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
762
763         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
764         if (copied) {
765                 /* record the sent skb in the sent_skb table */
766                 BUG_ON(tx_obj->sent_skb_list[start]);
767                 tx_obj->sent_skb_list[start] = skb;
768
769                 /* Ensure txq has space for the next skb; Else stop the queue
770                  * *BEFORE* ringing the tx doorbell, so that we serialze the
771                  * tx compls of the current transmit which'll wake up the queue
772                  */
773                 atomic_add(wrb_cnt, &txq->used);
774                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
775                                                                 txq->len) {
776                         netif_stop_queue(netdev);
777                         stopped = true;
778                 }
779
780                 be_txq_notify(adapter, txq->id, wrb_cnt);
781
782                 be_tx_stats_update(adapter, wrb_cnt, copied,
783                                 skb_shinfo(skb)->gso_segs, stopped);
784         } else {
785                 txq->head = start;
786                 dev_kfree_skb_any(skb);
787         }
788         return NETDEV_TX_OK;
789 }
790
791 static int be_change_mtu(struct net_device *netdev, int new_mtu)
792 {
793         struct be_adapter *adapter = netdev_priv(netdev);
794         if (new_mtu < BE_MIN_MTU ||
795                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
796                                         (ETH_HLEN + ETH_FCS_LEN))) {
797                 dev_info(&adapter->pdev->dev,
798                         "MTU must be between %d and %d bytes\n",
799                         BE_MIN_MTU,
800                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
801                 return -EINVAL;
802         }
803         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
804                         netdev->mtu, new_mtu);
805         netdev->mtu = new_mtu;
806         return 0;
807 }
808
809 /*
810  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
811  * If the user configures more, place BE in vlan promiscuous mode.
812  */
813 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
814 {
815         u16 vtag[BE_NUM_VLANS_SUPPORTED];
816         u16 ntags = 0, i;
817         int status = 0;
818         u32 if_handle;
819
820         if (vf) {
821                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
822                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
823                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
824         }
825
826         if (adapter->vlans_added <= adapter->max_vlans)  {
827                 /* Construct VLAN Table to give to HW */
828                 for (i = 0; i < VLAN_N_VID; i++) {
829                         if (adapter->vlan_tag[i]) {
830                                 vtag[ntags] = cpu_to_le16(i);
831                                 ntags++;
832                         }
833                 }
834                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
835                                         vtag, ntags, 1, 0);
836         } else {
837                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
838                                         NULL, 0, 1, 1);
839         }
840
841         return status;
842 }
843
844 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
845 {
846         struct be_adapter *adapter = netdev_priv(netdev);
847
848         adapter->vlan_grp = grp;
849 }
850
851 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
852 {
853         struct be_adapter *adapter = netdev_priv(netdev);
854
855         adapter->vlans_added++;
856         if (!be_physfn(adapter))
857                 return;
858
859         adapter->vlan_tag[vid] = 1;
860         if (adapter->vlans_added <= (adapter->max_vlans + 1))
861                 be_vid_config(adapter, false, 0);
862 }
863
864 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
865 {
866         struct be_adapter *adapter = netdev_priv(netdev);
867
868         adapter->vlans_added--;
869         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
870
871         if (!be_physfn(adapter))
872                 return;
873
874         adapter->vlan_tag[vid] = 0;
875         if (adapter->vlans_added <= adapter->max_vlans)
876                 be_vid_config(adapter, false, 0);
877 }
878
879 static void be_set_multicast_list(struct net_device *netdev)
880 {
881         struct be_adapter *adapter = netdev_priv(netdev);
882
883         if (netdev->flags & IFF_PROMISC) {
884                 be_cmd_promiscuous_config(adapter, true);
885                 adapter->promiscuous = true;
886                 goto done;
887         }
888
889         /* BE was previously in promiscuous mode; disable it */
890         if (adapter->promiscuous) {
891                 adapter->promiscuous = false;
892                 be_cmd_promiscuous_config(adapter, false);
893         }
894
895         /* Enable multicast promisc if num configured exceeds what we support */
896         if (netdev->flags & IFF_ALLMULTI ||
897             netdev_mc_count(netdev) > BE_MAX_MC) {
898                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
899                                 &adapter->mc_cmd_mem);
900                 goto done;
901         }
902
903         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
904                 &adapter->mc_cmd_mem);
905 done:
906         return;
907 }
908
909 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
910 {
911         struct be_adapter *adapter = netdev_priv(netdev);
912         int status;
913
914         if (!adapter->sriov_enabled)
915                 return -EPERM;
916
917         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
918                 return -EINVAL;
919
920         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
921                 status = be_cmd_pmac_del(adapter,
922                                         adapter->vf_cfg[vf].vf_if_handle,
923                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
924
925         status = be_cmd_pmac_add(adapter, mac,
926                                 adapter->vf_cfg[vf].vf_if_handle,
927                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
928
929         if (status)
930                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
931                                 mac, vf);
932         else
933                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
934
935         return status;
936 }
937
938 static int be_get_vf_config(struct net_device *netdev, int vf,
939                         struct ifla_vf_info *vi)
940 {
941         struct be_adapter *adapter = netdev_priv(netdev);
942
943         if (!adapter->sriov_enabled)
944                 return -EPERM;
945
946         if (vf >= num_vfs)
947                 return -EINVAL;
948
949         vi->vf = vf;
950         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
951         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
952         vi->qos = 0;
953         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
954
955         return 0;
956 }
957
958 static int be_set_vf_vlan(struct net_device *netdev,
959                         int vf, u16 vlan, u8 qos)
960 {
961         struct be_adapter *adapter = netdev_priv(netdev);
962         int status = 0;
963
964         if (!adapter->sriov_enabled)
965                 return -EPERM;
966
967         if ((vf >= num_vfs) || (vlan > 4095))
968                 return -EINVAL;
969
970         if (vlan) {
971                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
972                 adapter->vlans_added++;
973         } else {
974                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
975                 adapter->vlans_added--;
976         }
977
978         status = be_vid_config(adapter, true, vf);
979
980         if (status)
981                 dev_info(&adapter->pdev->dev,
982                                 "VLAN %d config on VF %d failed\n", vlan, vf);
983         return status;
984 }
985
986 static int be_set_vf_tx_rate(struct net_device *netdev,
987                         int vf, int rate)
988 {
989         struct be_adapter *adapter = netdev_priv(netdev);
990         int status = 0;
991
992         if (!adapter->sriov_enabled)
993                 return -EPERM;
994
995         if ((vf >= num_vfs) || (rate < 0))
996                 return -EINVAL;
997
998         if (rate > 10000)
999                 rate = 10000;
1000
1001         adapter->vf_cfg[vf].vf_tx_rate = rate;
1002         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1003
1004         if (status)
1005                 dev_info(&adapter->pdev->dev,
1006                                 "tx rate %d on VF %d failed\n", rate, vf);
1007         return status;
1008 }
1009
1010 static void be_rx_rate_update(struct be_rx_obj *rxo)
1011 {
1012         struct be_rx_stats *stats = &rxo->stats;
1013         ulong now = jiffies;
1014
1015         /* Wrapped around */
1016         if (time_before(now, stats->rx_jiffies)) {
1017                 stats->rx_jiffies = now;
1018                 return;
1019         }
1020
1021         /* Update the rate once in two seconds */
1022         if ((now - stats->rx_jiffies) < 2 * HZ)
1023                 return;
1024
1025         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1026                                 now - stats->rx_jiffies);
1027         stats->rx_jiffies = now;
1028         stats->rx_bytes_prev = stats->rx_bytes;
1029 }
1030
1031 static void be_rx_stats_update(struct be_rx_obj *rxo,
1032                 struct be_rx_compl_info *rxcp)
1033 {
1034         struct be_rx_stats *stats = &rxo->stats;
1035
1036         stats->rx_compl++;
1037         stats->rx_frags += rxcp->num_rcvd;
1038         stats->rx_bytes += rxcp->pkt_size;
1039         stats->rx_pkts++;
1040         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1041                 stats->rx_mcast_pkts++;
1042         if (rxcp->err)
1043                 stats->rxcp_err++;
1044 }
1045
1046 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1047 {
1048         /* L4 checksum is not reliable for non TCP/UDP packets.
1049          * Also ignore ipcksm for ipv6 pkts */
1050         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1051                                 (rxcp->ip_csum || rxcp->ipv6);
1052 }
1053
1054 static struct be_rx_page_info *
1055 get_rx_page_info(struct be_adapter *adapter,
1056                 struct be_rx_obj *rxo,
1057                 u16 frag_idx)
1058 {
1059         struct be_rx_page_info *rx_page_info;
1060         struct be_queue_info *rxq = &rxo->q;
1061
1062         rx_page_info = &rxo->page_info_tbl[frag_idx];
1063         BUG_ON(!rx_page_info->page);
1064
1065         if (rx_page_info->last_page_user) {
1066                 dma_unmap_page(&adapter->pdev->dev,
1067                                dma_unmap_addr(rx_page_info, bus),
1068                                adapter->big_page_size, DMA_FROM_DEVICE);
1069                 rx_page_info->last_page_user = false;
1070         }
1071
1072         atomic_dec(&rxq->used);
1073         return rx_page_info;
1074 }
1075
1076 /* Throwaway the data in the Rx completion */
1077 static void be_rx_compl_discard(struct be_adapter *adapter,
1078                 struct be_rx_obj *rxo,
1079                 struct be_rx_compl_info *rxcp)
1080 {
1081         struct be_queue_info *rxq = &rxo->q;
1082         struct be_rx_page_info *page_info;
1083         u16 i, num_rcvd = rxcp->num_rcvd;
1084
1085         for (i = 0; i < num_rcvd; i++) {
1086                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1087                 put_page(page_info->page);
1088                 memset(page_info, 0, sizeof(*page_info));
1089                 index_inc(&rxcp->rxq_idx, rxq->len);
1090         }
1091 }
1092
1093 /*
1094  * skb_fill_rx_data forms a complete skb for an ether frame
1095  * indicated by rxcp.
1096  */
1097 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1098                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1099 {
1100         struct be_queue_info *rxq = &rxo->q;
1101         struct be_rx_page_info *page_info;
1102         u16 i, j;
1103         u16 hdr_len, curr_frag_len, remaining;
1104         u8 *start;
1105
1106         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1107         start = page_address(page_info->page) + page_info->page_offset;
1108         prefetch(start);
1109
1110         /* Copy data in the first descriptor of this completion */
1111         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1112
1113         /* Copy the header portion into skb_data */
1114         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1115         memcpy(skb->data, start, hdr_len);
1116         skb->len = curr_frag_len;
1117         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1118                 /* Complete packet has now been moved to data */
1119                 put_page(page_info->page);
1120                 skb->data_len = 0;
1121                 skb->tail += curr_frag_len;
1122         } else {
1123                 skb_shinfo(skb)->nr_frags = 1;
1124                 skb_shinfo(skb)->frags[0].page = page_info->page;
1125                 skb_shinfo(skb)->frags[0].page_offset =
1126                                         page_info->page_offset + hdr_len;
1127                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1128                 skb->data_len = curr_frag_len - hdr_len;
1129                 skb->tail += hdr_len;
1130         }
1131         page_info->page = NULL;
1132
1133         if (rxcp->pkt_size <= rx_frag_size) {
1134                 BUG_ON(rxcp->num_rcvd != 1);
1135                 return;
1136         }
1137
1138         /* More frags present for this completion */
1139         index_inc(&rxcp->rxq_idx, rxq->len);
1140         remaining = rxcp->pkt_size - curr_frag_len;
1141         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1142                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1143                 curr_frag_len = min(remaining, rx_frag_size);
1144
1145                 /* Coalesce all frags from the same physical page in one slot */
1146                 if (page_info->page_offset == 0) {
1147                         /* Fresh page */
1148                         j++;
1149                         skb_shinfo(skb)->frags[j].page = page_info->page;
1150                         skb_shinfo(skb)->frags[j].page_offset =
1151                                                         page_info->page_offset;
1152                         skb_shinfo(skb)->frags[j].size = 0;
1153                         skb_shinfo(skb)->nr_frags++;
1154                 } else {
1155                         put_page(page_info->page);
1156                 }
1157
1158                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1159                 skb->len += curr_frag_len;
1160                 skb->data_len += curr_frag_len;
1161
1162                 remaining -= curr_frag_len;
1163                 index_inc(&rxcp->rxq_idx, rxq->len);
1164                 page_info->page = NULL;
1165         }
1166         BUG_ON(j > MAX_SKB_FRAGS);
1167 }
1168
1169 /* Process the RX completion indicated by rxcp when GRO is disabled */
1170 static void be_rx_compl_process(struct be_adapter *adapter,
1171                         struct be_rx_obj *rxo,
1172                         struct be_rx_compl_info *rxcp)
1173 {
1174         struct net_device *netdev = adapter->netdev;
1175         struct sk_buff *skb;
1176
1177         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1178         if (unlikely(!skb)) {
1179                 if (net_ratelimit())
1180                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1181                 be_rx_compl_discard(adapter, rxo, rxcp);
1182                 return;
1183         }
1184
1185         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1186
1187         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1188                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1189         else
1190                 skb_checksum_none_assert(skb);
1191
1192         skb->truesize = skb->len + sizeof(struct sk_buff);
1193         skb->protocol = eth_type_trans(skb, netdev);
1194         if (adapter->netdev->features & NETIF_F_RXHASH)
1195                 skb->rxhash = rxcp->rss_hash;
1196
1197
1198         if (unlikely(rxcp->vlanf)) {
1199                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1200                         kfree_skb(skb);
1201                         return;
1202                 }
1203                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1204                                         rxcp->vlan_tag);
1205         } else {
1206                 netif_receive_skb(skb);
1207         }
1208 }
1209
1210 /* Process the RX completion indicated by rxcp when GRO is enabled */
1211 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1212                 struct be_rx_obj *rxo,
1213                 struct be_rx_compl_info *rxcp)
1214 {
1215         struct be_rx_page_info *page_info;
1216         struct sk_buff *skb = NULL;
1217         struct be_queue_info *rxq = &rxo->q;
1218         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1219         u16 remaining, curr_frag_len;
1220         u16 i, j;
1221
1222         skb = napi_get_frags(&eq_obj->napi);
1223         if (!skb) {
1224                 be_rx_compl_discard(adapter, rxo, rxcp);
1225                 return;
1226         }
1227
1228         remaining = rxcp->pkt_size;
1229         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1230                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1231
1232                 curr_frag_len = min(remaining, rx_frag_size);
1233
1234                 /* Coalesce all frags from the same physical page in one slot */
1235                 if (i == 0 || page_info->page_offset == 0) {
1236                         /* First frag or Fresh page */
1237                         j++;
1238                         skb_shinfo(skb)->frags[j].page = page_info->page;
1239                         skb_shinfo(skb)->frags[j].page_offset =
1240                                                         page_info->page_offset;
1241                         skb_shinfo(skb)->frags[j].size = 0;
1242                 } else {
1243                         put_page(page_info->page);
1244                 }
1245                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1246
1247                 remaining -= curr_frag_len;
1248                 index_inc(&rxcp->rxq_idx, rxq->len);
1249                 memset(page_info, 0, sizeof(*page_info));
1250         }
1251         BUG_ON(j > MAX_SKB_FRAGS);
1252
1253         skb_shinfo(skb)->nr_frags = j + 1;
1254         skb->len = rxcp->pkt_size;
1255         skb->data_len = rxcp->pkt_size;
1256         skb->truesize += rxcp->pkt_size;
1257         skb->ip_summed = CHECKSUM_UNNECESSARY;
1258         if (adapter->netdev->features & NETIF_F_RXHASH)
1259                 skb->rxhash = rxcp->rss_hash;
1260
1261         if (likely(!rxcp->vlanf))
1262                 napi_gro_frags(&eq_obj->napi);
1263         else
1264                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1265                                 rxcp->vlan_tag);
1266 }
1267
1268 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1269                                 struct be_eth_rx_compl *compl,
1270                                 struct be_rx_compl_info *rxcp)
1271 {
1272         rxcp->pkt_size =
1273                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1274         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1275         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1276         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1277         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1278         rxcp->ip_csum =
1279                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1280         rxcp->l4_csum =
1281                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1282         rxcp->ipv6 =
1283                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1284         rxcp->rxq_idx =
1285                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1286         rxcp->num_rcvd =
1287                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1288         rxcp->pkt_type =
1289                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1290         rxcp->rss_hash =
1291                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1292         if (rxcp->vlanf) {
1293                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1294                                           compl);
1295                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1296                                                compl);
1297         }
1298 }
1299
1300 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1301                                 struct be_eth_rx_compl *compl,
1302                                 struct be_rx_compl_info *rxcp)
1303 {
1304         rxcp->pkt_size =
1305                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1306         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1307         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1308         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1309         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1310         rxcp->ip_csum =
1311                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1312         rxcp->l4_csum =
1313                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1314         rxcp->ipv6 =
1315                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1316         rxcp->rxq_idx =
1317                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1318         rxcp->num_rcvd =
1319                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1320         rxcp->pkt_type =
1321                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1322         rxcp->rss_hash =
1323                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1324         if (rxcp->vlanf) {
1325                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1326                                           compl);
1327                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1328                                                compl);
1329         }
1330 }
1331
1332 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1333 {
1334         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1335         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1336         struct be_adapter *adapter = rxo->adapter;
1337
1338         /* For checking the valid bit it is Ok to use either definition as the
1339          * valid bit is at the same position in both v0 and v1 Rx compl */
1340         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1341                 return NULL;
1342
1343         rmb();
1344         be_dws_le_to_cpu(compl, sizeof(*compl));
1345
1346         if (adapter->be3_native)
1347                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1348         else
1349                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1350
1351         if (rxcp->vlanf) {
1352                 /* vlanf could be wrongly set in some cards.
1353                  * ignore if vtm is not set */
1354                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1355                         rxcp->vlanf = 0;
1356
1357                 if (!lancer_chip(adapter))
1358                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1359
1360                 if (((adapter->pvid & VLAN_VID_MASK) ==
1361                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1362                     !adapter->vlan_tag[rxcp->vlan_tag])
1363                         rxcp->vlanf = 0;
1364         }
1365
1366         /* As the compl has been parsed, reset it; we wont touch it again */
1367         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1368
1369         queue_tail_inc(&rxo->cq);
1370         return rxcp;
1371 }
1372
1373 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1374 {
1375         u32 order = get_order(size);
1376
1377         if (order > 0)
1378                 gfp |= __GFP_COMP;
1379         return  alloc_pages(gfp, order);
1380 }
1381
1382 /*
1383  * Allocate a page, split it to fragments of size rx_frag_size and post as
1384  * receive buffers to BE
1385  */
1386 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1387 {
1388         struct be_adapter *adapter = rxo->adapter;
1389         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1390         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1391         struct be_queue_info *rxq = &rxo->q;
1392         struct page *pagep = NULL;
1393         struct be_eth_rx_d *rxd;
1394         u64 page_dmaaddr = 0, frag_dmaaddr;
1395         u32 posted, page_offset = 0;
1396
1397         page_info = &rxo->page_info_tbl[rxq->head];
1398         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1399                 if (!pagep) {
1400                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1401                         if (unlikely(!pagep)) {
1402                                 rxo->stats.rx_post_fail++;
1403                                 break;
1404                         }
1405                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1406                                                     0, adapter->big_page_size,
1407                                                     DMA_FROM_DEVICE);
1408                         page_info->page_offset = 0;
1409                 } else {
1410                         get_page(pagep);
1411                         page_info->page_offset = page_offset + rx_frag_size;
1412                 }
1413                 page_offset = page_info->page_offset;
1414                 page_info->page = pagep;
1415                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1416                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1417
1418                 rxd = queue_head_node(rxq);
1419                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1420                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1421
1422                 /* Any space left in the current big page for another frag? */
1423                 if ((page_offset + rx_frag_size + rx_frag_size) >
1424                                         adapter->big_page_size) {
1425                         pagep = NULL;
1426                         page_info->last_page_user = true;
1427                 }
1428
1429                 prev_page_info = page_info;
1430                 queue_head_inc(rxq);
1431                 page_info = &page_info_tbl[rxq->head];
1432         }
1433         if (pagep)
1434                 prev_page_info->last_page_user = true;
1435
1436         if (posted) {
1437                 atomic_add(posted, &rxq->used);
1438                 be_rxq_notify(adapter, rxq->id, posted);
1439         } else if (atomic_read(&rxq->used) == 0) {
1440                 /* Let be_worker replenish when memory is available */
1441                 rxo->rx_post_starved = true;
1442         }
1443 }
1444
1445 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1446 {
1447         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1448
1449         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1450                 return NULL;
1451
1452         rmb();
1453         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1454
1455         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1456
1457         queue_tail_inc(tx_cq);
1458         return txcp;
1459 }
1460
1461 static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1462 {
1463         struct be_queue_info *txq = &adapter->tx_obj.q;
1464         struct be_eth_wrb *wrb;
1465         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1466         struct sk_buff *sent_skb;
1467         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1468         bool unmap_skb_hdr = true;
1469
1470         sent_skb = sent_skbs[txq->tail];
1471         BUG_ON(!sent_skb);
1472         sent_skbs[txq->tail] = NULL;
1473
1474         /* skip header wrb */
1475         queue_tail_inc(txq);
1476
1477         do {
1478                 cur_index = txq->tail;
1479                 wrb = queue_tail_node(txq);
1480                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1481                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1482                 unmap_skb_hdr = false;
1483
1484                 num_wrbs++;
1485                 queue_tail_inc(txq);
1486         } while (cur_index != last_index);
1487
1488         kfree_skb(sent_skb);
1489         return num_wrbs;
1490 }
1491
1492 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1493 {
1494         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1495
1496         if (!eqe->evt)
1497                 return NULL;
1498
1499         rmb();
1500         eqe->evt = le32_to_cpu(eqe->evt);
1501         queue_tail_inc(&eq_obj->q);
1502         return eqe;
1503 }
1504
1505 static int event_handle(struct be_adapter *adapter,
1506                         struct be_eq_obj *eq_obj)
1507 {
1508         struct be_eq_entry *eqe;
1509         u16 num = 0;
1510
1511         while ((eqe = event_get(eq_obj)) != NULL) {
1512                 eqe->evt = 0;
1513                 num++;
1514         }
1515
1516         /* Deal with any spurious interrupts that come
1517          * without events
1518          */
1519         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1520         if (num)
1521                 napi_schedule(&eq_obj->napi);
1522
1523         return num;
1524 }
1525
1526 /* Just read and notify events without processing them.
1527  * Used at the time of destroying event queues */
1528 static void be_eq_clean(struct be_adapter *adapter,
1529                         struct be_eq_obj *eq_obj)
1530 {
1531         struct be_eq_entry *eqe;
1532         u16 num = 0;
1533
1534         while ((eqe = event_get(eq_obj)) != NULL) {
1535                 eqe->evt = 0;
1536                 num++;
1537         }
1538
1539         if (num)
1540                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1541 }
1542
1543 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1544 {
1545         struct be_rx_page_info *page_info;
1546         struct be_queue_info *rxq = &rxo->q;
1547         struct be_queue_info *rx_cq = &rxo->cq;
1548         struct be_rx_compl_info *rxcp;
1549         u16 tail;
1550
1551         /* First cleanup pending rx completions */
1552         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1553                 be_rx_compl_discard(adapter, rxo, rxcp);
1554                 be_cq_notify(adapter, rx_cq->id, false, 1);
1555         }
1556
1557         /* Then free posted rx buffer that were not used */
1558         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1559         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1560                 page_info = get_rx_page_info(adapter, rxo, tail);
1561                 put_page(page_info->page);
1562                 memset(page_info, 0, sizeof(*page_info));
1563         }
1564         BUG_ON(atomic_read(&rxq->used));
1565 }
1566
1567 static void be_tx_compl_clean(struct be_adapter *adapter)
1568 {
1569         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1570         struct be_queue_info *txq = &adapter->tx_obj.q;
1571         struct be_eth_tx_compl *txcp;
1572         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1573         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1574         struct sk_buff *sent_skb;
1575         bool dummy_wrb;
1576
1577         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1578         do {
1579                 while ((txcp = be_tx_compl_get(tx_cq))) {
1580                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1581                                         wrb_index, txcp);
1582                         num_wrbs += be_tx_compl_process(adapter, end_idx);
1583                         cmpl++;
1584                 }
1585                 if (cmpl) {
1586                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1587                         atomic_sub(num_wrbs, &txq->used);
1588                         cmpl = 0;
1589                         num_wrbs = 0;
1590                 }
1591
1592                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1593                         break;
1594
1595                 mdelay(1);
1596         } while (true);
1597
1598         if (atomic_read(&txq->used))
1599                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1600                         atomic_read(&txq->used));
1601
1602         /* free posted tx for which compls will never arrive */
1603         while (atomic_read(&txq->used)) {
1604                 sent_skb = sent_skbs[txq->tail];
1605                 end_idx = txq->tail;
1606                 index_adv(&end_idx,
1607                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1608                         txq->len);
1609                 num_wrbs = be_tx_compl_process(adapter, end_idx);
1610                 atomic_sub(num_wrbs, &txq->used);
1611         }
1612 }
1613
1614 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1615 {
1616         struct be_queue_info *q;
1617
1618         q = &adapter->mcc_obj.q;
1619         if (q->created)
1620                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1621         be_queue_free(adapter, q);
1622
1623         q = &adapter->mcc_obj.cq;
1624         if (q->created)
1625                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1626         be_queue_free(adapter, q);
1627 }
1628
1629 /* Must be called only after TX qs are created as MCC shares TX EQ */
1630 static int be_mcc_queues_create(struct be_adapter *adapter)
1631 {
1632         struct be_queue_info *q, *cq;
1633
1634         /* Alloc MCC compl queue */
1635         cq = &adapter->mcc_obj.cq;
1636         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1637                         sizeof(struct be_mcc_compl)))
1638                 goto err;
1639
1640         /* Ask BE to create MCC compl queue; share TX's eq */
1641         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1642                 goto mcc_cq_free;
1643
1644         /* Alloc MCC queue */
1645         q = &adapter->mcc_obj.q;
1646         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1647                 goto mcc_cq_destroy;
1648
1649         /* Ask BE to create MCC queue */
1650         if (be_cmd_mccq_create(adapter, q, cq))
1651                 goto mcc_q_free;
1652
1653         return 0;
1654
1655 mcc_q_free:
1656         be_queue_free(adapter, q);
1657 mcc_cq_destroy:
1658         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1659 mcc_cq_free:
1660         be_queue_free(adapter, cq);
1661 err:
1662         return -1;
1663 }
1664
1665 static void be_tx_queues_destroy(struct be_adapter *adapter)
1666 {
1667         struct be_queue_info *q;
1668
1669         q = &adapter->tx_obj.q;
1670         if (q->created)
1671                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1672         be_queue_free(adapter, q);
1673
1674         q = &adapter->tx_obj.cq;
1675         if (q->created)
1676                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1677         be_queue_free(adapter, q);
1678
1679         /* Clear any residual events */
1680         be_eq_clean(adapter, &adapter->tx_eq);
1681
1682         q = &adapter->tx_eq.q;
1683         if (q->created)
1684                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1685         be_queue_free(adapter, q);
1686 }
1687
1688 static int be_tx_queues_create(struct be_adapter *adapter)
1689 {
1690         struct be_queue_info *eq, *q, *cq;
1691
1692         adapter->tx_eq.max_eqd = 0;
1693         adapter->tx_eq.min_eqd = 0;
1694         adapter->tx_eq.cur_eqd = 96;
1695         adapter->tx_eq.enable_aic = false;
1696         /* Alloc Tx Event queue */
1697         eq = &adapter->tx_eq.q;
1698         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1699                 return -1;
1700
1701         /* Ask BE to create Tx Event queue */
1702         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1703                 goto tx_eq_free;
1704
1705         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1706
1707
1708         /* Alloc TX eth compl queue */
1709         cq = &adapter->tx_obj.cq;
1710         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1711                         sizeof(struct be_eth_tx_compl)))
1712                 goto tx_eq_destroy;
1713
1714         /* Ask BE to create Tx eth compl queue */
1715         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1716                 goto tx_cq_free;
1717
1718         /* Alloc TX eth queue */
1719         q = &adapter->tx_obj.q;
1720         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1721                 goto tx_cq_destroy;
1722
1723         /* Ask BE to create Tx eth queue */
1724         if (be_cmd_txq_create(adapter, q, cq))
1725                 goto tx_q_free;
1726         return 0;
1727
1728 tx_q_free:
1729         be_queue_free(adapter, q);
1730 tx_cq_destroy:
1731         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1732 tx_cq_free:
1733         be_queue_free(adapter, cq);
1734 tx_eq_destroy:
1735         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1736 tx_eq_free:
1737         be_queue_free(adapter, eq);
1738         return -1;
1739 }
1740
1741 static void be_rx_queues_destroy(struct be_adapter *adapter)
1742 {
1743         struct be_queue_info *q;
1744         struct be_rx_obj *rxo;
1745         int i;
1746
1747         for_all_rx_queues(adapter, rxo, i) {
1748                 q = &rxo->q;
1749                 if (q->created) {
1750                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1751                         /* After the rxq is invalidated, wait for a grace time
1752                          * of 1ms for all dma to end and the flush compl to
1753                          * arrive
1754                          */
1755                         mdelay(1);
1756                         be_rx_q_clean(adapter, rxo);
1757                 }
1758                 be_queue_free(adapter, q);
1759
1760                 q = &rxo->cq;
1761                 if (q->created)
1762                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1763                 be_queue_free(adapter, q);
1764
1765                 /* Clear any residual events */
1766                 q = &rxo->rx_eq.q;
1767                 if (q->created) {
1768                         be_eq_clean(adapter, &rxo->rx_eq);
1769                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1770                 }
1771                 be_queue_free(adapter, q);
1772         }
1773 }
1774
1775 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1776 {
1777         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1778                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1779                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1780         } else {
1781                 dev_warn(&adapter->pdev->dev,
1782                         "No support for multiple RX queues\n");
1783                 return 1;
1784         }
1785 }
1786
1787 static int be_rx_queues_create(struct be_adapter *adapter)
1788 {
1789         struct be_queue_info *eq, *q, *cq;
1790         struct be_rx_obj *rxo;
1791         int rc, i;
1792
1793         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1794                                 msix_enabled(adapter) ?
1795                                         adapter->num_msix_vec - 1 : 1);
1796         if (adapter->num_rx_qs != MAX_RX_QS)
1797                 dev_warn(&adapter->pdev->dev,
1798                         "Can create only %d RX queues", adapter->num_rx_qs);
1799
1800         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1801         for_all_rx_queues(adapter, rxo, i) {
1802                 rxo->adapter = adapter;
1803                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1804                 rxo->rx_eq.enable_aic = true;
1805
1806                 /* EQ */
1807                 eq = &rxo->rx_eq.q;
1808                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1809                                         sizeof(struct be_eq_entry));
1810                 if (rc)
1811                         goto err;
1812
1813                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1814                 if (rc)
1815                         goto err;
1816
1817                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1818
1819                 /* CQ */
1820                 cq = &rxo->cq;
1821                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1822                                 sizeof(struct be_eth_rx_compl));
1823                 if (rc)
1824                         goto err;
1825
1826                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1827                 if (rc)
1828                         goto err;
1829                 /* Rx Q */
1830                 q = &rxo->q;
1831                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1832                                 sizeof(struct be_eth_rx_d));
1833                 if (rc)
1834                         goto err;
1835
1836                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1837                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1838                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1839                 if (rc)
1840                         goto err;
1841         }
1842
1843         if (be_multi_rxq(adapter)) {
1844                 u8 rsstable[MAX_RSS_QS];
1845
1846                 for_all_rss_queues(adapter, rxo, i)
1847                         rsstable[i] = rxo->rss_id;
1848
1849                 rc = be_cmd_rss_config(adapter, rsstable,
1850                         adapter->num_rx_qs - 1);
1851                 if (rc)
1852                         goto err;
1853         }
1854
1855         return 0;
1856 err:
1857         be_rx_queues_destroy(adapter);
1858         return -1;
1859 }
1860
1861 static bool event_peek(struct be_eq_obj *eq_obj)
1862 {
1863         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1864         if (!eqe->evt)
1865                 return false;
1866         else
1867                 return true;
1868 }
1869
1870 static irqreturn_t be_intx(int irq, void *dev)
1871 {
1872         struct be_adapter *adapter = dev;
1873         struct be_rx_obj *rxo;
1874         int isr, i, tx = 0 , rx = 0;
1875
1876         if (lancer_chip(adapter)) {
1877                 if (event_peek(&adapter->tx_eq))
1878                         tx = event_handle(adapter, &adapter->tx_eq);
1879                 for_all_rx_queues(adapter, rxo, i) {
1880                         if (event_peek(&rxo->rx_eq))
1881                                 rx |= event_handle(adapter, &rxo->rx_eq);
1882                 }
1883
1884                 if (!(tx || rx))
1885                         return IRQ_NONE;
1886
1887         } else {
1888                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1889                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1890                 if (!isr)
1891                         return IRQ_NONE;
1892
1893                 if ((1 << adapter->tx_eq.eq_idx & isr))
1894                         event_handle(adapter, &adapter->tx_eq);
1895
1896                 for_all_rx_queues(adapter, rxo, i) {
1897                         if ((1 << rxo->rx_eq.eq_idx & isr))
1898                                 event_handle(adapter, &rxo->rx_eq);
1899                 }
1900         }
1901
1902         return IRQ_HANDLED;
1903 }
1904
1905 static irqreturn_t be_msix_rx(int irq, void *dev)
1906 {
1907         struct be_rx_obj *rxo = dev;
1908         struct be_adapter *adapter = rxo->adapter;
1909
1910         event_handle(adapter, &rxo->rx_eq);
1911
1912         return IRQ_HANDLED;
1913 }
1914
1915 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1916 {
1917         struct be_adapter *adapter = dev;
1918
1919         event_handle(adapter, &adapter->tx_eq);
1920
1921         return IRQ_HANDLED;
1922 }
1923
1924 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1925 {
1926         return (rxcp->tcpf && !rxcp->err) ? true : false;
1927 }
1928
1929 static int be_poll_rx(struct napi_struct *napi, int budget)
1930 {
1931         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1932         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1933         struct be_adapter *adapter = rxo->adapter;
1934         struct be_queue_info *rx_cq = &rxo->cq;
1935         struct be_rx_compl_info *rxcp;
1936         u32 work_done;
1937
1938         rxo->stats.rx_polls++;
1939         for (work_done = 0; work_done < budget; work_done++) {
1940                 rxcp = be_rx_compl_get(rxo);
1941                 if (!rxcp)
1942                         break;
1943
1944                 /* Ignore flush completions */
1945                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1946                         if (do_gro(rxcp))
1947                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1948                         else
1949                                 be_rx_compl_process(adapter, rxo, rxcp);
1950                 } else if (rxcp->pkt_size == 0) {
1951                         be_rx_compl_discard(adapter, rxo, rxcp);
1952                 }
1953
1954                 be_rx_stats_update(rxo, rxcp);
1955         }
1956
1957         /* Refill the queue */
1958         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1959                 be_post_rx_frags(rxo, GFP_ATOMIC);
1960
1961         /* All consumed */
1962         if (work_done < budget) {
1963                 napi_complete(napi);
1964                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1965         } else {
1966                 /* More to be consumed; continue with interrupts disabled */
1967                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1968         }
1969         return work_done;
1970 }
1971
1972 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1973  * For TX/MCC we don't honour budget; consume everything
1974  */
1975 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1976 {
1977         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1978         struct be_adapter *adapter =
1979                 container_of(tx_eq, struct be_adapter, tx_eq);
1980         struct be_queue_info *txq = &adapter->tx_obj.q;
1981         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1982         struct be_eth_tx_compl *txcp;
1983         int tx_compl = 0, mcc_compl, status = 0;
1984         u16 end_idx, num_wrbs = 0;
1985
1986         while ((txcp = be_tx_compl_get(tx_cq))) {
1987                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1988                                 wrb_index, txcp);
1989                 num_wrbs += be_tx_compl_process(adapter, end_idx);
1990                 tx_compl++;
1991         }
1992
1993         mcc_compl = be_process_mcc(adapter, &status);
1994
1995         napi_complete(napi);
1996
1997         if (mcc_compl) {
1998                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1999                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2000         }
2001
2002         if (tx_compl) {
2003                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
2004
2005                 atomic_sub(num_wrbs, &txq->used);
2006
2007                 /* As Tx wrbs have been freed up, wake up netdev queue if
2008                  * it was stopped due to lack of tx wrbs.
2009                  */
2010                 if (netif_queue_stopped(adapter->netdev) &&
2011                         atomic_read(&txq->used) < txq->len / 2) {
2012                         netif_wake_queue(adapter->netdev);
2013                 }
2014
2015                 tx_stats(adapter)->be_tx_events++;
2016                 tx_stats(adapter)->be_tx_compl += tx_compl;
2017         }
2018
2019         return 1;
2020 }
2021
2022 void be_detect_dump_ue(struct be_adapter *adapter)
2023 {
2024         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2025         u32 i;
2026
2027         pci_read_config_dword(adapter->pdev,
2028                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2029         pci_read_config_dword(adapter->pdev,
2030                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2031         pci_read_config_dword(adapter->pdev,
2032                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2033         pci_read_config_dword(adapter->pdev,
2034                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2035
2036         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2037         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2038
2039         if (ue_status_lo || ue_status_hi) {
2040                 adapter->ue_detected = true;
2041                 adapter->eeh_err = true;
2042                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2043         }
2044
2045         if (ue_status_lo) {
2046                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2047                         if (ue_status_lo & 1)
2048                                 dev_err(&adapter->pdev->dev,
2049                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2050                 }
2051         }
2052         if (ue_status_hi) {
2053                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2054                         if (ue_status_hi & 1)
2055                                 dev_err(&adapter->pdev->dev,
2056                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2057                 }
2058         }
2059
2060 }
2061
2062 static void be_worker(struct work_struct *work)
2063 {
2064         struct be_adapter *adapter =
2065                 container_of(work, struct be_adapter, work.work);
2066         struct be_rx_obj *rxo;
2067         int i;
2068
2069         if (!adapter->ue_detected && !lancer_chip(adapter))
2070                 be_detect_dump_ue(adapter);
2071
2072         /* when interrupts are not yet enabled, just reap any pending
2073         * mcc completions */
2074         if (!netif_running(adapter->netdev)) {
2075                 int mcc_compl, status = 0;
2076
2077                 mcc_compl = be_process_mcc(adapter, &status);
2078
2079                 if (mcc_compl) {
2080                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2081                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2082                 }
2083
2084                 goto reschedule;
2085         }
2086
2087         if (!adapter->stats_cmd_sent) {
2088                 if (lancer_chip(adapter))
2089                         lancer_cmd_get_pport_stats(adapter,
2090                                                 &adapter->stats_cmd);
2091                 else
2092                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2093         }
2094         be_tx_rate_update(adapter);
2095
2096         for_all_rx_queues(adapter, rxo, i) {
2097                 be_rx_rate_update(rxo);
2098                 be_rx_eqd_update(adapter, rxo);
2099
2100                 if (rxo->rx_post_starved) {
2101                         rxo->rx_post_starved = false;
2102                         be_post_rx_frags(rxo, GFP_KERNEL);
2103                 }
2104         }
2105
2106 reschedule:
2107         adapter->work_counter++;
2108         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2109 }
2110
2111 static void be_msix_disable(struct be_adapter *adapter)
2112 {
2113         if (msix_enabled(adapter)) {
2114                 pci_disable_msix(adapter->pdev);
2115                 adapter->num_msix_vec = 0;
2116         }
2117 }
2118
2119 static void be_msix_enable(struct be_adapter *adapter)
2120 {
2121 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2122         int i, status, num_vec;
2123
2124         num_vec = be_num_rxqs_want(adapter) + 1;
2125
2126         for (i = 0; i < num_vec; i++)
2127                 adapter->msix_entries[i].entry = i;
2128
2129         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2130         if (status == 0) {
2131                 goto done;
2132         } else if (status >= BE_MIN_MSIX_VECTORS) {
2133                 num_vec = status;
2134                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2135                                 num_vec) == 0)
2136                         goto done;
2137         }
2138         return;
2139 done:
2140         adapter->num_msix_vec = num_vec;
2141         return;
2142 }
2143
2144 static void be_sriov_enable(struct be_adapter *adapter)
2145 {
2146         be_check_sriov_fn_type(adapter);
2147 #ifdef CONFIG_PCI_IOV
2148         if (be_physfn(adapter) && num_vfs) {
2149                 int status, pos;
2150                 u16 nvfs;
2151
2152                 pos = pci_find_ext_capability(adapter->pdev,
2153                                                 PCI_EXT_CAP_ID_SRIOV);
2154                 pci_read_config_word(adapter->pdev,
2155                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2156
2157                 if (num_vfs > nvfs) {
2158                         dev_info(&adapter->pdev->dev,
2159                                         "Device supports %d VFs and not %d\n",
2160                                         nvfs, num_vfs);
2161                         num_vfs = nvfs;
2162                 }
2163
2164                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2165                 adapter->sriov_enabled = status ? false : true;
2166         }
2167 #endif
2168 }
2169
2170 static void be_sriov_disable(struct be_adapter *adapter)
2171 {
2172 #ifdef CONFIG_PCI_IOV
2173         if (adapter->sriov_enabled) {
2174                 pci_disable_sriov(adapter->pdev);
2175                 adapter->sriov_enabled = false;
2176         }
2177 #endif
2178 }
2179
2180 static inline int be_msix_vec_get(struct be_adapter *adapter,
2181                                         struct be_eq_obj *eq_obj)
2182 {
2183         return adapter->msix_entries[eq_obj->eq_idx].vector;
2184 }
2185
2186 static int be_request_irq(struct be_adapter *adapter,
2187                 struct be_eq_obj *eq_obj,
2188                 void *handler, char *desc, void *context)
2189 {
2190         struct net_device *netdev = adapter->netdev;
2191         int vec;
2192
2193         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2194         vec = be_msix_vec_get(adapter, eq_obj);
2195         return request_irq(vec, handler, 0, eq_obj->desc, context);
2196 }
2197
2198 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2199                         void *context)
2200 {
2201         int vec = be_msix_vec_get(adapter, eq_obj);
2202         free_irq(vec, context);
2203 }
2204
2205 static int be_msix_register(struct be_adapter *adapter)
2206 {
2207         struct be_rx_obj *rxo;
2208         int status, i;
2209         char qname[10];
2210
2211         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2212                                 adapter);
2213         if (status)
2214                 goto err;
2215
2216         for_all_rx_queues(adapter, rxo, i) {
2217                 sprintf(qname, "rxq%d", i);
2218                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2219                                 qname, rxo);
2220                 if (status)
2221                         goto err_msix;
2222         }
2223
2224         return 0;
2225
2226 err_msix:
2227         be_free_irq(adapter, &adapter->tx_eq, adapter);
2228
2229         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2230                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2231
2232 err:
2233         dev_warn(&adapter->pdev->dev,
2234                 "MSIX Request IRQ failed - err %d\n", status);
2235         be_msix_disable(adapter);
2236         return status;
2237 }
2238
2239 static int be_irq_register(struct be_adapter *adapter)
2240 {
2241         struct net_device *netdev = adapter->netdev;
2242         int status;
2243
2244         if (msix_enabled(adapter)) {
2245                 status = be_msix_register(adapter);
2246                 if (status == 0)
2247                         goto done;
2248                 /* INTx is not supported for VF */
2249                 if (!be_physfn(adapter))
2250                         return status;
2251         }
2252
2253         /* INTx */
2254         netdev->irq = adapter->pdev->irq;
2255         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2256                         adapter);
2257         if (status) {
2258                 dev_err(&adapter->pdev->dev,
2259                         "INTx request IRQ failed - err %d\n", status);
2260                 return status;
2261         }
2262 done:
2263         adapter->isr_registered = true;
2264         return 0;
2265 }
2266
2267 static void be_irq_unregister(struct be_adapter *adapter)
2268 {
2269         struct net_device *netdev = adapter->netdev;
2270         struct be_rx_obj *rxo;
2271         int i;
2272
2273         if (!adapter->isr_registered)
2274                 return;
2275
2276         /* INTx */
2277         if (!msix_enabled(adapter)) {
2278                 free_irq(netdev->irq, adapter);
2279                 goto done;
2280         }
2281
2282         /* MSIx */
2283         be_free_irq(adapter, &adapter->tx_eq, adapter);
2284
2285         for_all_rx_queues(adapter, rxo, i)
2286                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2287
2288 done:
2289         adapter->isr_registered = false;
2290 }
2291
2292 static int be_close(struct net_device *netdev)
2293 {
2294         struct be_adapter *adapter = netdev_priv(netdev);
2295         struct be_rx_obj *rxo;
2296         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2297         int vec, i;
2298
2299         be_async_mcc_disable(adapter);
2300
2301         netif_carrier_off(netdev);
2302         adapter->link_up = false;
2303
2304         if (!lancer_chip(adapter))
2305                 be_intr_set(adapter, false);
2306
2307         for_all_rx_queues(adapter, rxo, i)
2308                 napi_disable(&rxo->rx_eq.napi);
2309
2310         napi_disable(&tx_eq->napi);
2311
2312         if (lancer_chip(adapter)) {
2313                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2314                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2315                 for_all_rx_queues(adapter, rxo, i)
2316                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2317         }
2318
2319         if (msix_enabled(adapter)) {
2320                 vec = be_msix_vec_get(adapter, tx_eq);
2321                 synchronize_irq(vec);
2322
2323                 for_all_rx_queues(adapter, rxo, i) {
2324                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2325                         synchronize_irq(vec);
2326                 }
2327         } else {
2328                 synchronize_irq(netdev->irq);
2329         }
2330         be_irq_unregister(adapter);
2331
2332         /* Wait for all pending tx completions to arrive so that
2333          * all tx skbs are freed.
2334          */
2335         be_tx_compl_clean(adapter);
2336
2337         return 0;
2338 }
2339
2340 static int be_open(struct net_device *netdev)
2341 {
2342         struct be_adapter *adapter = netdev_priv(netdev);
2343         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2344         struct be_rx_obj *rxo;
2345         bool link_up;
2346         int status, i;
2347         u8 mac_speed;
2348         u16 link_speed;
2349
2350         for_all_rx_queues(adapter, rxo, i) {
2351                 be_post_rx_frags(rxo, GFP_KERNEL);
2352                 napi_enable(&rxo->rx_eq.napi);
2353         }
2354         napi_enable(&tx_eq->napi);
2355
2356         be_irq_register(adapter);
2357
2358         if (!lancer_chip(adapter))
2359                 be_intr_set(adapter, true);
2360
2361         /* The evt queues are created in unarmed state; arm them */
2362         for_all_rx_queues(adapter, rxo, i) {
2363                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2364                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2365         }
2366         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2367
2368         /* Now that interrupts are on we can process async mcc */
2369         be_async_mcc_enable(adapter);
2370
2371         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2372                         &link_speed, 0);
2373         if (status)
2374                 goto err;
2375         be_link_status_update(adapter, link_up);
2376
2377         if (be_physfn(adapter)) {
2378                 status = be_vid_config(adapter, false, 0);
2379                 if (status)
2380                         goto err;
2381
2382                 status = be_cmd_set_flow_control(adapter,
2383                                 adapter->tx_fc, adapter->rx_fc);
2384                 if (status)
2385                         goto err;
2386         }
2387
2388         return 0;
2389 err:
2390         be_close(adapter->netdev);
2391         return -EIO;
2392 }
2393
2394 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2395 {
2396         struct be_dma_mem cmd;
2397         int status = 0;
2398         u8 mac[ETH_ALEN];
2399
2400         memset(mac, 0, ETH_ALEN);
2401
2402         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2403         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2404                                     GFP_KERNEL);
2405         if (cmd.va == NULL)
2406                 return -1;
2407         memset(cmd.va, 0, cmd.size);
2408
2409         if (enable) {
2410                 status = pci_write_config_dword(adapter->pdev,
2411                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2412                 if (status) {
2413                         dev_err(&adapter->pdev->dev,
2414                                 "Could not enable Wake-on-lan\n");
2415                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2416                                           cmd.dma);
2417                         return status;
2418                 }
2419                 status = be_cmd_enable_magic_wol(adapter,
2420                                 adapter->netdev->dev_addr, &cmd);
2421                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2422                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2423         } else {
2424                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2425                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2426                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2427         }
2428
2429         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2430         return status;
2431 }
2432
2433 /*
2434  * Generate a seed MAC address from the PF MAC Address using jhash.
2435  * MAC Address for VFs are assigned incrementally starting from the seed.
2436  * These addresses are programmed in the ASIC by the PF and the VF driver
2437  * queries for the MAC address during its probe.
2438  */
2439 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2440 {
2441         u32 vf = 0;
2442         int status = 0;
2443         u8 mac[ETH_ALEN];
2444
2445         be_vf_eth_addr_generate(adapter, mac);
2446
2447         for (vf = 0; vf < num_vfs; vf++) {
2448                 status = be_cmd_pmac_add(adapter, mac,
2449                                         adapter->vf_cfg[vf].vf_if_handle,
2450                                         &adapter->vf_cfg[vf].vf_pmac_id,
2451                                         vf + 1);
2452                 if (status)
2453                         dev_err(&adapter->pdev->dev,
2454                                 "Mac address add failed for VF %d\n", vf);
2455                 else
2456                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2457
2458                 mac[5] += 1;
2459         }
2460         return status;
2461 }
2462
2463 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2464 {
2465         u32 vf;
2466
2467         for (vf = 0; vf < num_vfs; vf++) {
2468                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2469                         be_cmd_pmac_del(adapter,
2470                                         adapter->vf_cfg[vf].vf_if_handle,
2471                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2472         }
2473 }
2474
2475 static int be_setup(struct be_adapter *adapter)
2476 {
2477         struct net_device *netdev = adapter->netdev;
2478         u32 cap_flags, en_flags, vf = 0;
2479         int status;
2480         u8 mac[ETH_ALEN];
2481
2482         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2483                                 BE_IF_FLAGS_BROADCAST |
2484                                 BE_IF_FLAGS_MULTICAST;
2485
2486         if (be_physfn(adapter)) {
2487                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2488                                 BE_IF_FLAGS_PROMISCUOUS |
2489                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2490                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2491
2492                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2493                         cap_flags |= BE_IF_FLAGS_RSS;
2494                         en_flags |= BE_IF_FLAGS_RSS;
2495                 }
2496         }
2497
2498         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2499                         netdev->dev_addr, false/* pmac_invalid */,
2500                         &adapter->if_handle, &adapter->pmac_id, 0);
2501         if (status != 0)
2502                 goto do_none;
2503
2504         if (be_physfn(adapter)) {
2505                 if (adapter->sriov_enabled) {
2506                         while (vf < num_vfs) {
2507                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2508                                                         BE_IF_FLAGS_BROADCAST;
2509                                 status = be_cmd_if_create(adapter, cap_flags,
2510                                         en_flags, mac, true,
2511                                         &adapter->vf_cfg[vf].vf_if_handle,
2512                                         NULL, vf+1);
2513                                 if (status) {
2514                                         dev_err(&adapter->pdev->dev,
2515                                         "Interface Create failed for VF %d\n",
2516                                         vf);
2517                                         goto if_destroy;
2518                                 }
2519                                 adapter->vf_cfg[vf].vf_pmac_id =
2520                                                         BE_INVALID_PMAC_ID;
2521                                 vf++;
2522                         }
2523                 }
2524         } else {
2525                 status = be_cmd_mac_addr_query(adapter, mac,
2526                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2527                 if (!status) {
2528                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2529                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2530                 }
2531         }
2532
2533         status = be_tx_queues_create(adapter);
2534         if (status != 0)
2535                 goto if_destroy;
2536
2537         status = be_rx_queues_create(adapter);
2538         if (status != 0)
2539                 goto tx_qs_destroy;
2540
2541         status = be_mcc_queues_create(adapter);
2542         if (status != 0)
2543                 goto rx_qs_destroy;
2544
2545         adapter->link_speed = -1;
2546
2547         return 0;
2548
2549 rx_qs_destroy:
2550         be_rx_queues_destroy(adapter);
2551 tx_qs_destroy:
2552         be_tx_queues_destroy(adapter);
2553 if_destroy:
2554         if (be_physfn(adapter) && adapter->sriov_enabled)
2555                 for (vf = 0; vf < num_vfs; vf++)
2556                         if (adapter->vf_cfg[vf].vf_if_handle)
2557                                 be_cmd_if_destroy(adapter,
2558                                         adapter->vf_cfg[vf].vf_if_handle,
2559                                         vf + 1);
2560         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2561 do_none:
2562         return status;
2563 }
2564
2565 static int be_clear(struct be_adapter *adapter)
2566 {
2567         int vf;
2568
2569         if (be_physfn(adapter) && adapter->sriov_enabled)
2570                 be_vf_eth_addr_rem(adapter);
2571
2572         be_mcc_queues_destroy(adapter);
2573         be_rx_queues_destroy(adapter);
2574         be_tx_queues_destroy(adapter);
2575         adapter->eq_next_idx = 0;
2576
2577         if (be_physfn(adapter) && adapter->sriov_enabled)
2578                 for (vf = 0; vf < num_vfs; vf++)
2579                         if (adapter->vf_cfg[vf].vf_if_handle)
2580                                 be_cmd_if_destroy(adapter,
2581                                         adapter->vf_cfg[vf].vf_if_handle,
2582                                         vf + 1);
2583
2584         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2585
2586         /* tell fw we're done with firing cmds */
2587         be_cmd_fw_clean(adapter);
2588         return 0;
2589 }
2590
2591
2592 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2593 static bool be_flash_redboot(struct be_adapter *adapter,
2594                         const u8 *p, u32 img_start, int image_size,
2595                         int hdr_size)
2596 {
2597         u32 crc_offset;
2598         u8 flashed_crc[4];
2599         int status;
2600
2601         crc_offset = hdr_size + img_start + image_size - 4;
2602
2603         p += crc_offset;
2604
2605         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2606                         (image_size - 4));
2607         if (status) {
2608                 dev_err(&adapter->pdev->dev,
2609                 "could not get crc from flash, not flashing redboot\n");
2610                 return false;
2611         }
2612
2613         /*update redboot only if crc does not match*/
2614         if (!memcmp(flashed_crc, p, 4))
2615                 return false;
2616         else
2617                 return true;
2618 }
2619
2620 static int be_flash_data(struct be_adapter *adapter,
2621                         const struct firmware *fw,
2622                         struct be_dma_mem *flash_cmd, int num_of_images)
2623
2624 {
2625         int status = 0, i, filehdr_size = 0;
2626         u32 total_bytes = 0, flash_op;
2627         int num_bytes;
2628         const u8 *p = fw->data;
2629         struct be_cmd_write_flashrom *req = flash_cmd->va;
2630         const struct flash_comp *pflashcomp;
2631         int num_comp;
2632
2633         static const struct flash_comp gen3_flash_types[9] = {
2634                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2635                         FLASH_IMAGE_MAX_SIZE_g3},
2636                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2637                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2638                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2639                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2640                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2641                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2642                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2643                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2644                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2645                         FLASH_IMAGE_MAX_SIZE_g3},
2646                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2647                         FLASH_IMAGE_MAX_SIZE_g3},
2648                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2649                         FLASH_IMAGE_MAX_SIZE_g3},
2650                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2651                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2652         };
2653         static const struct flash_comp gen2_flash_types[8] = {
2654                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2655                         FLASH_IMAGE_MAX_SIZE_g2},
2656                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2657                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2658                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2659                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2660                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2661                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2662                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2663                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2664                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2665                         FLASH_IMAGE_MAX_SIZE_g2},
2666                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2667                         FLASH_IMAGE_MAX_SIZE_g2},
2668                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2669                          FLASH_IMAGE_MAX_SIZE_g2}
2670         };
2671
2672         if (adapter->generation == BE_GEN3) {
2673                 pflashcomp = gen3_flash_types;
2674                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2675                 num_comp = ARRAY_SIZE(gen3_flash_types);
2676         } else {
2677                 pflashcomp = gen2_flash_types;
2678                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2679                 num_comp = ARRAY_SIZE(gen2_flash_types);
2680         }
2681         for (i = 0; i < num_comp; i++) {
2682                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2683                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2684                         continue;
2685                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2686                         (!be_flash_redboot(adapter, fw->data,
2687                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2688                         (num_of_images * sizeof(struct image_hdr)))))
2689                         continue;
2690                 p = fw->data;
2691                 p += filehdr_size + pflashcomp[i].offset
2692                         + (num_of_images * sizeof(struct image_hdr));
2693         if (p + pflashcomp[i].size > fw->data + fw->size)
2694                 return -1;
2695         total_bytes = pflashcomp[i].size;
2696                 while (total_bytes) {
2697                         if (total_bytes > 32*1024)
2698                                 num_bytes = 32*1024;
2699                         else
2700                                 num_bytes = total_bytes;
2701                         total_bytes -= num_bytes;
2702
2703                         if (!total_bytes)
2704                                 flash_op = FLASHROM_OPER_FLASH;
2705                         else
2706                                 flash_op = FLASHROM_OPER_SAVE;
2707                         memcpy(req->params.data_buf, p, num_bytes);
2708                         p += num_bytes;
2709                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2710                                 pflashcomp[i].optype, flash_op, num_bytes);
2711                         if (status) {
2712                                 dev_err(&adapter->pdev->dev,
2713                                         "cmd to write to flash rom failed.\n");
2714                                 return -1;
2715                         }
2716                 }
2717         }
2718         return 0;
2719 }
2720
2721 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2722 {
2723         if (fhdr == NULL)
2724                 return 0;
2725         if (fhdr->build[0] == '3')
2726                 return BE_GEN3;
2727         else if (fhdr->build[0] == '2')
2728                 return BE_GEN2;
2729         else
2730                 return 0;
2731 }
2732
2733 static int lancer_fw_download(struct be_adapter *adapter,
2734                                 const struct firmware *fw)
2735 {
2736 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2737 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2738         struct be_dma_mem flash_cmd;
2739         const u8 *data_ptr = NULL;
2740         u8 *dest_image_ptr = NULL;
2741         size_t image_size = 0;
2742         u32 chunk_size = 0;
2743         u32 data_written = 0;
2744         u32 offset = 0;
2745         int status = 0;
2746         u8 add_status = 0;
2747
2748         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2749                 dev_err(&adapter->pdev->dev,
2750                         "FW Image not properly aligned. "
2751                         "Length must be 4 byte aligned.\n");
2752                 status = -EINVAL;
2753                 goto lancer_fw_exit;
2754         }
2755
2756         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2757                                 + LANCER_FW_DOWNLOAD_CHUNK;
2758         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2759                                                 &flash_cmd.dma, GFP_KERNEL);
2760         if (!flash_cmd.va) {
2761                 status = -ENOMEM;
2762                 dev_err(&adapter->pdev->dev,
2763                         "Memory allocation failure while flashing\n");
2764                 goto lancer_fw_exit;
2765         }
2766
2767         dest_image_ptr = flash_cmd.va +
2768                                 sizeof(struct lancer_cmd_req_write_object);
2769         image_size = fw->size;
2770         data_ptr = fw->data;
2771
2772         while (image_size) {
2773                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2774
2775                 /* Copy the image chunk content. */
2776                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2777
2778                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2779                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2780                                 &data_written, &add_status);
2781
2782                 if (status)
2783                         break;
2784
2785                 offset += data_written;
2786                 data_ptr += data_written;
2787                 image_size -= data_written;
2788         }
2789
2790         if (!status) {
2791                 /* Commit the FW written */
2792                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2793                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2794                                         &data_written, &add_status);
2795         }
2796
2797         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2798                                 flash_cmd.dma);
2799         if (status) {
2800                 dev_err(&adapter->pdev->dev,
2801                         "Firmware load error. "
2802                         "Status code: 0x%x Additional Status: 0x%x\n",
2803                         status, add_status);
2804                 goto lancer_fw_exit;
2805         }
2806
2807         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2808 lancer_fw_exit:
2809         return status;
2810 }
2811
2812 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2813 {
2814         struct flash_file_hdr_g2 *fhdr;
2815         struct flash_file_hdr_g3 *fhdr3;
2816         struct image_hdr *img_hdr_ptr = NULL;
2817         struct be_dma_mem flash_cmd;
2818         const u8 *p;
2819         int status = 0, i = 0, num_imgs = 0;
2820
2821         p = fw->data;
2822         fhdr = (struct flash_file_hdr_g2 *) p;
2823
2824         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2825         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2826                                           &flash_cmd.dma, GFP_KERNEL);
2827         if (!flash_cmd.va) {
2828                 status = -ENOMEM;
2829                 dev_err(&adapter->pdev->dev,
2830                         "Memory allocation failure while flashing\n");
2831                 goto be_fw_exit;
2832         }
2833
2834         if ((adapter->generation == BE_GEN3) &&
2835                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2836                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2837                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2838                 for (i = 0; i < num_imgs; i++) {
2839                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2840                                         (sizeof(struct flash_file_hdr_g3) +
2841                                          i * sizeof(struct image_hdr)));
2842                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2843                                 status = be_flash_data(adapter, fw, &flash_cmd,
2844                                                         num_imgs);
2845                 }
2846         } else if ((adapter->generation == BE_GEN2) &&
2847                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2848                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2849         } else {
2850                 dev_err(&adapter->pdev->dev,
2851                         "UFI and Interface are not compatible for flashing\n");
2852                 status = -1;
2853         }
2854
2855         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2856                           flash_cmd.dma);
2857         if (status) {
2858                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2859                 goto be_fw_exit;
2860         }
2861
2862         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2863
2864 be_fw_exit:
2865         return status;
2866 }
2867
2868 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2869 {
2870         const struct firmware *fw;
2871         int status;
2872
2873         if (!netif_running(adapter->netdev)) {
2874                 dev_err(&adapter->pdev->dev,
2875                         "Firmware load not allowed (interface is down)\n");
2876                 return -1;
2877         }
2878
2879         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2880         if (status)
2881                 goto fw_exit;
2882
2883         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2884
2885         if (lancer_chip(adapter))
2886                 status = lancer_fw_download(adapter, fw);
2887         else
2888                 status = be_fw_download(adapter, fw);
2889
2890 fw_exit:
2891         release_firmware(fw);
2892         return status;
2893 }
2894
2895 static struct net_device_ops be_netdev_ops = {
2896         .ndo_open               = be_open,
2897         .ndo_stop               = be_close,
2898         .ndo_start_xmit         = be_xmit,
2899         .ndo_set_rx_mode        = be_set_multicast_list,
2900         .ndo_set_mac_address    = be_mac_addr_set,
2901         .ndo_change_mtu         = be_change_mtu,
2902         .ndo_validate_addr      = eth_validate_addr,
2903         .ndo_vlan_rx_register   = be_vlan_register,
2904         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2905         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2906         .ndo_set_vf_mac         = be_set_vf_mac,
2907         .ndo_set_vf_vlan        = be_set_vf_vlan,
2908         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2909         .ndo_get_vf_config      = be_get_vf_config
2910 };
2911
2912 static void be_netdev_init(struct net_device *netdev)
2913 {
2914         struct be_adapter *adapter = netdev_priv(netdev);
2915         struct be_rx_obj *rxo;
2916         int i;
2917
2918         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2919                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2920                 NETIF_F_HW_VLAN_TX;
2921         if (be_multi_rxq(adapter))
2922                 netdev->hw_features |= NETIF_F_RXHASH;
2923
2924         netdev->features |= netdev->hw_features |
2925                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2926
2927         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2928                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2929
2930         if (lancer_chip(adapter))
2931                 netdev->vlan_features |= NETIF_F_TSO6;
2932
2933         netdev->flags |= IFF_MULTICAST;
2934
2935         /* Default settings for Rx and Tx flow control */
2936         adapter->rx_fc = true;
2937         adapter->tx_fc = true;
2938
2939         netif_set_gso_max_size(netdev, 65535);
2940
2941         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2942
2943         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2944
2945         for_all_rx_queues(adapter, rxo, i)
2946                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2947                                 BE_NAPI_WEIGHT);
2948
2949         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2950                 BE_NAPI_WEIGHT);
2951 }
2952
2953 static void be_unmap_pci_bars(struct be_adapter *adapter)
2954 {
2955         if (adapter->csr)
2956                 iounmap(adapter->csr);
2957         if (adapter->db)
2958                 iounmap(adapter->db);
2959         if (adapter->pcicfg && be_physfn(adapter))
2960                 iounmap(adapter->pcicfg);
2961 }
2962
2963 static int be_map_pci_bars(struct be_adapter *adapter)
2964 {
2965         u8 __iomem *addr;
2966         int pcicfg_reg, db_reg;
2967
2968         if (lancer_chip(adapter)) {
2969                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2970                         pci_resource_len(adapter->pdev, 0));
2971                 if (addr == NULL)
2972                         return -ENOMEM;
2973                 adapter->db = addr;
2974                 return 0;
2975         }
2976
2977         if (be_physfn(adapter)) {
2978                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2979                                 pci_resource_len(adapter->pdev, 2));
2980                 if (addr == NULL)
2981                         return -ENOMEM;
2982                 adapter->csr = addr;
2983         }
2984
2985         if (adapter->generation == BE_GEN2) {
2986                 pcicfg_reg = 1;
2987                 db_reg = 4;
2988         } else {
2989                 pcicfg_reg = 0;
2990                 if (be_physfn(adapter))
2991                         db_reg = 4;
2992                 else
2993                         db_reg = 0;
2994         }
2995         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2996                                 pci_resource_len(adapter->pdev, db_reg));
2997         if (addr == NULL)
2998                 goto pci_map_err;
2999         adapter->db = addr;
3000
3001         if (be_physfn(adapter)) {
3002                 addr = ioremap_nocache(
3003                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3004                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3005                 if (addr == NULL)
3006                         goto pci_map_err;
3007                 adapter->pcicfg = addr;
3008         } else
3009                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3010
3011         return 0;
3012 pci_map_err:
3013         be_unmap_pci_bars(adapter);
3014         return -ENOMEM;
3015 }
3016
3017
3018 static void be_ctrl_cleanup(struct be_adapter *adapter)
3019 {
3020         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3021
3022         be_unmap_pci_bars(adapter);
3023
3024         if (mem->va)
3025                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3026                                   mem->dma);
3027
3028         mem = &adapter->mc_cmd_mem;
3029         if (mem->va)
3030                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3031                                   mem->dma);
3032 }
3033
3034 static int be_ctrl_init(struct be_adapter *adapter)
3035 {
3036         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3037         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3038         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3039         int status;
3040
3041         status = be_map_pci_bars(adapter);
3042         if (status)
3043                 goto done;
3044
3045         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3046         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3047                                                 mbox_mem_alloc->size,
3048                                                 &mbox_mem_alloc->dma,
3049                                                 GFP_KERNEL);
3050         if (!mbox_mem_alloc->va) {
3051                 status = -ENOMEM;
3052                 goto unmap_pci_bars;
3053         }
3054
3055         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3056         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3057         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3058         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3059
3060         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3061         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3062                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
3063                                             GFP_KERNEL);
3064         if (mc_cmd_mem->va == NULL) {
3065                 status = -ENOMEM;
3066                 goto free_mbox;
3067         }
3068         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3069
3070         mutex_init(&adapter->mbox_lock);
3071         spin_lock_init(&adapter->mcc_lock);
3072         spin_lock_init(&adapter->mcc_cq_lock);
3073
3074         init_completion(&adapter->flash_compl);
3075         pci_save_state(adapter->pdev);
3076         return 0;
3077
3078 free_mbox:
3079         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3080                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3081
3082 unmap_pci_bars:
3083         be_unmap_pci_bars(adapter);
3084
3085 done:
3086         return status;
3087 }
3088
3089 static void be_stats_cleanup(struct be_adapter *adapter)
3090 {
3091         struct be_dma_mem *cmd = &adapter->stats_cmd;
3092
3093         if (cmd->va)
3094                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3095                                   cmd->va, cmd->dma);
3096 }
3097
3098 static int be_stats_init(struct be_adapter *adapter)
3099 {
3100         struct be_dma_mem *cmd = &adapter->stats_cmd;
3101
3102         if (adapter->generation == BE_GEN2) {
3103                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3104         } else {
3105                 if (lancer_chip(adapter))
3106                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3107                 else
3108                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3109         }
3110         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3111                                      GFP_KERNEL);
3112         if (cmd->va == NULL)
3113                 return -1;
3114         memset(cmd->va, 0, cmd->size);
3115         return 0;
3116 }
3117
3118 static void __devexit be_remove(struct pci_dev *pdev)
3119 {
3120         struct be_adapter *adapter = pci_get_drvdata(pdev);
3121
3122         if (!adapter)
3123                 return;
3124
3125         cancel_delayed_work_sync(&adapter->work);
3126
3127         unregister_netdev(adapter->netdev);
3128
3129         be_clear(adapter);
3130
3131         be_stats_cleanup(adapter);
3132
3133         be_ctrl_cleanup(adapter);
3134
3135         kfree(adapter->vf_cfg);
3136         be_sriov_disable(adapter);
3137
3138         be_msix_disable(adapter);
3139
3140         pci_set_drvdata(pdev, NULL);
3141         pci_release_regions(pdev);
3142         pci_disable_device(pdev);
3143
3144         free_netdev(adapter->netdev);
3145 }
3146
3147 static int be_get_config(struct be_adapter *adapter)
3148 {
3149         int status;
3150         u8 mac[ETH_ALEN];
3151
3152         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3153         if (status)
3154                 return status;
3155
3156         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3157                         &adapter->function_mode, &adapter->function_caps);
3158         if (status)
3159                 return status;
3160
3161         memset(mac, 0, ETH_ALEN);
3162
3163         /* A default permanent address is given to each VF for Lancer*/
3164         if (be_physfn(adapter) || lancer_chip(adapter)) {
3165                 status = be_cmd_mac_addr_query(adapter, mac,
3166                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3167
3168                 if (status)
3169                         return status;
3170
3171                 if (!is_valid_ether_addr(mac))
3172                         return -EADDRNOTAVAIL;
3173
3174                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3175                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3176         }
3177
3178         if (adapter->function_mode & 0x400)
3179                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3180         else
3181                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3182
3183         status = be_cmd_get_cntl_attributes(adapter);
3184         if (status)
3185                 return status;
3186
3187         be_cmd_check_native_mode(adapter);
3188         return 0;
3189 }
3190
3191 static int be_dev_family_check(struct be_adapter *adapter)
3192 {
3193         struct pci_dev *pdev = adapter->pdev;
3194         u32 sli_intf = 0, if_type;
3195
3196         switch (pdev->device) {
3197         case BE_DEVICE_ID1:
3198         case OC_DEVICE_ID1:
3199                 adapter->generation = BE_GEN2;
3200                 break;
3201         case BE_DEVICE_ID2:
3202         case OC_DEVICE_ID2:
3203                 adapter->generation = BE_GEN3;
3204                 break;
3205         case OC_DEVICE_ID3:
3206         case OC_DEVICE_ID4:
3207                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3208                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3209                                                 SLI_INTF_IF_TYPE_SHIFT;
3210
3211                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3212                         if_type != 0x02) {
3213                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3214                         return -EINVAL;
3215                 }
3216                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3217                                          SLI_INTF_FAMILY_SHIFT);
3218                 adapter->generation = BE_GEN3;
3219                 break;
3220         default:
3221                 adapter->generation = 0;
3222         }
3223         return 0;
3224 }
3225
3226 static int lancer_wait_ready(struct be_adapter *adapter)
3227 {
3228 #define SLIPORT_READY_TIMEOUT 500
3229         u32 sliport_status;
3230         int status = 0, i;
3231
3232         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3233                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3234                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3235                         break;
3236
3237                 msleep(20);
3238         }
3239
3240         if (i == SLIPORT_READY_TIMEOUT)
3241                 status = -1;
3242
3243         return status;
3244 }
3245
3246 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3247 {
3248         int status;
3249         u32 sliport_status, err, reset_needed;
3250         status = lancer_wait_ready(adapter);
3251         if (!status) {
3252                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3253                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3254                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3255                 if (err && reset_needed) {
3256                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3257                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3258
3259                         /* check adapter has corrected the error */
3260                         status = lancer_wait_ready(adapter);
3261                         sliport_status = ioread32(adapter->db +
3262                                                         SLIPORT_STATUS_OFFSET);
3263                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3264                                                 SLIPORT_STATUS_RN_MASK);
3265                         if (status || sliport_status)
3266                                 status = -1;
3267                 } else if (err || reset_needed) {
3268                         status = -1;
3269                 }
3270         }
3271         return status;
3272 }
3273
3274 static int __devinit be_probe(struct pci_dev *pdev,
3275                         const struct pci_device_id *pdev_id)
3276 {
3277         int status = 0;
3278         struct be_adapter *adapter;
3279         struct net_device *netdev;
3280
3281         status = pci_enable_device(pdev);
3282         if (status)
3283                 goto do_none;
3284
3285         status = pci_request_regions(pdev, DRV_NAME);
3286         if (status)
3287                 goto disable_dev;
3288         pci_set_master(pdev);
3289
3290         netdev = alloc_etherdev(sizeof(struct be_adapter));
3291         if (netdev == NULL) {
3292                 status = -ENOMEM;
3293                 goto rel_reg;
3294         }
3295         adapter = netdev_priv(netdev);
3296         adapter->pdev = pdev;
3297         pci_set_drvdata(pdev, adapter);
3298
3299         status = be_dev_family_check(adapter);
3300         if (status)
3301                 goto free_netdev;
3302
3303         adapter->netdev = netdev;
3304         SET_NETDEV_DEV(netdev, &pdev->dev);
3305
3306         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3307         if (!status) {
3308                 netdev->features |= NETIF_F_HIGHDMA;
3309         } else {
3310                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3311                 if (status) {
3312                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3313                         goto free_netdev;
3314                 }
3315         }
3316
3317         be_sriov_enable(adapter);
3318         if (adapter->sriov_enabled) {
3319                 adapter->vf_cfg = kcalloc(num_vfs,
3320                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3321
3322                 if (!adapter->vf_cfg)
3323                         goto free_netdev;
3324         }
3325
3326         status = be_ctrl_init(adapter);
3327         if (status)
3328                 goto free_vf_cfg;
3329
3330         if (lancer_chip(adapter)) {
3331                 status = lancer_test_and_set_rdy_state(adapter);
3332                 if (status) {
3333                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3334                         goto ctrl_clean;
3335                 }
3336         }
3337
3338         /* sync up with fw's ready state */
3339         if (be_physfn(adapter)) {
3340                 status = be_cmd_POST(adapter);
3341                 if (status)
3342                         goto ctrl_clean;
3343         }
3344
3345         /* tell fw we're ready to fire cmds */
3346         status = be_cmd_fw_init(adapter);
3347         if (status)
3348                 goto ctrl_clean;
3349
3350         status = be_cmd_reset_function(adapter);
3351         if (status)
3352                 goto ctrl_clean;
3353
3354         status = be_stats_init(adapter);
3355         if (status)
3356                 goto ctrl_clean;
3357
3358         status = be_get_config(adapter);
3359         if (status)
3360                 goto stats_clean;
3361
3362         be_msix_enable(adapter);
3363
3364         INIT_DELAYED_WORK(&adapter->work, be_worker);
3365
3366         status = be_setup(adapter);
3367         if (status)
3368                 goto msix_disable;
3369
3370         be_netdev_init(netdev);
3371         status = register_netdev(netdev);
3372         if (status != 0)
3373                 goto unsetup;
3374         netif_carrier_off(netdev);
3375
3376         if (be_physfn(adapter) && adapter->sriov_enabled) {
3377                 u8 mac_speed;
3378                 bool link_up;
3379                 u16 vf, lnk_speed;
3380
3381                 if (!lancer_chip(adapter)) {
3382                         status = be_vf_eth_addr_config(adapter);
3383                         if (status)
3384                                 goto unreg_netdev;
3385                 }
3386
3387                 for (vf = 0; vf < num_vfs; vf++) {
3388                         status = be_cmd_link_status_query(adapter, &link_up,
3389                                         &mac_speed, &lnk_speed, vf + 1);
3390                         if (!status)
3391                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3392                         else
3393                                 goto unreg_netdev;
3394                 }
3395         }
3396
3397         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3398         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3399         return 0;
3400
3401 unreg_netdev:
3402         unregister_netdev(netdev);
3403 unsetup:
3404         be_clear(adapter);
3405 msix_disable:
3406         be_msix_disable(adapter);
3407 stats_clean:
3408         be_stats_cleanup(adapter);
3409 ctrl_clean:
3410         be_ctrl_cleanup(adapter);
3411 free_vf_cfg:
3412         kfree(adapter->vf_cfg);
3413 free_netdev:
3414         be_sriov_disable(adapter);
3415         free_netdev(netdev);
3416         pci_set_drvdata(pdev, NULL);
3417 rel_reg:
3418         pci_release_regions(pdev);
3419 disable_dev:
3420         pci_disable_device(pdev);
3421 do_none:
3422         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3423         return status;
3424 }
3425
3426 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3427 {
3428         struct be_adapter *adapter = pci_get_drvdata(pdev);
3429         struct net_device *netdev =  adapter->netdev;
3430
3431         cancel_delayed_work_sync(&adapter->work);
3432         if (adapter->wol)
3433                 be_setup_wol(adapter, true);
3434
3435         netif_device_detach(netdev);
3436         if (netif_running(netdev)) {
3437                 rtnl_lock();
3438                 be_close(netdev);
3439                 rtnl_unlock();
3440         }
3441         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3442         be_clear(adapter);
3443
3444         be_msix_disable(adapter);
3445         pci_save_state(pdev);
3446         pci_disable_device(pdev);
3447         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3448         return 0;
3449 }
3450
3451 static int be_resume(struct pci_dev *pdev)
3452 {
3453         int status = 0;
3454         struct be_adapter *adapter = pci_get_drvdata(pdev);
3455         struct net_device *netdev =  adapter->netdev;
3456
3457         netif_device_detach(netdev);
3458
3459         status = pci_enable_device(pdev);
3460         if (status)
3461                 return status;
3462
3463         pci_set_power_state(pdev, 0);
3464         pci_restore_state(pdev);
3465
3466         be_msix_enable(adapter);
3467         /* tell fw we're ready to fire cmds */
3468         status = be_cmd_fw_init(adapter);
3469         if (status)
3470                 return status;
3471
3472         be_setup(adapter);
3473         if (netif_running(netdev)) {
3474                 rtnl_lock();
3475                 be_open(netdev);
3476                 rtnl_unlock();
3477         }
3478         netif_device_attach(netdev);
3479
3480         if (adapter->wol)
3481                 be_setup_wol(adapter, false);
3482
3483         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3484         return 0;
3485 }
3486
3487 /*
3488  * An FLR will stop BE from DMAing any data.
3489  */
3490 static void be_shutdown(struct pci_dev *pdev)
3491 {
3492         struct be_adapter *adapter = pci_get_drvdata(pdev);
3493
3494         if (!adapter)
3495                 return;
3496
3497         cancel_delayed_work_sync(&adapter->work);
3498
3499         netif_device_detach(adapter->netdev);
3500
3501         if (adapter->wol)
3502                 be_setup_wol(adapter, true);
3503
3504         be_cmd_reset_function(adapter);
3505
3506         pci_disable_device(pdev);
3507 }
3508
3509 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3510                                 pci_channel_state_t state)
3511 {
3512         struct be_adapter *adapter = pci_get_drvdata(pdev);
3513         struct net_device *netdev =  adapter->netdev;
3514
3515         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3516
3517         adapter->eeh_err = true;
3518
3519         netif_device_detach(netdev);
3520
3521         if (netif_running(netdev)) {
3522                 rtnl_lock();
3523                 be_close(netdev);
3524                 rtnl_unlock();
3525         }
3526         be_clear(adapter);
3527
3528         if (state == pci_channel_io_perm_failure)
3529                 return PCI_ERS_RESULT_DISCONNECT;
3530
3531         pci_disable_device(pdev);
3532
3533         return PCI_ERS_RESULT_NEED_RESET;
3534 }
3535
3536 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3537 {
3538         struct be_adapter *adapter = pci_get_drvdata(pdev);
3539         int status;
3540
3541         dev_info(&adapter->pdev->dev, "EEH reset\n");
3542         adapter->eeh_err = false;
3543
3544         status = pci_enable_device(pdev);
3545         if (status)
3546                 return PCI_ERS_RESULT_DISCONNECT;
3547
3548         pci_set_master(pdev);
3549         pci_set_power_state(pdev, 0);
3550         pci_restore_state(pdev);
3551
3552         /* Check if card is ok and fw is ready */
3553         status = be_cmd_POST(adapter);
3554         if (status)
3555                 return PCI_ERS_RESULT_DISCONNECT;
3556
3557         return PCI_ERS_RESULT_RECOVERED;
3558 }
3559
3560 static void be_eeh_resume(struct pci_dev *pdev)
3561 {
3562         int status = 0;
3563         struct be_adapter *adapter = pci_get_drvdata(pdev);
3564         struct net_device *netdev =  adapter->netdev;
3565
3566         dev_info(&adapter->pdev->dev, "EEH resume\n");
3567
3568         pci_save_state(pdev);
3569
3570         /* tell fw we're ready to fire cmds */
3571         status = be_cmd_fw_init(adapter);
3572         if (status)
3573                 goto err;
3574
3575         status = be_setup(adapter);
3576         if (status)
3577                 goto err;
3578
3579         if (netif_running(netdev)) {
3580                 status = be_open(netdev);
3581                 if (status)
3582                         goto err;
3583         }
3584         netif_device_attach(netdev);
3585         return;
3586 err:
3587         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3588 }
3589
3590 static struct pci_error_handlers be_eeh_handlers = {
3591         .error_detected = be_eeh_err_detected,
3592         .slot_reset = be_eeh_reset,
3593         .resume = be_eeh_resume,
3594 };
3595
3596 static struct pci_driver be_driver = {
3597         .name = DRV_NAME,
3598         .id_table = be_dev_ids,
3599         .probe = be_probe,
3600         .remove = be_remove,
3601         .suspend = be_suspend,
3602         .resume = be_resume,
3603         .shutdown = be_shutdown,
3604         .err_handler = &be_eeh_handlers
3605 };
3606
3607 static int __init be_init_module(void)
3608 {
3609         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3610             rx_frag_size != 2048) {
3611                 printk(KERN_WARNING DRV_NAME
3612                         " : Module param rx_frag_size must be 2048/4096/8192."
3613                         " Using 2048\n");
3614                 rx_frag_size = 2048;
3615         }
3616
3617         return pci_register_driver(&be_driver);
3618 }
3619 module_init(be_init_module);
3620
3621 static void __exit be_exit_module(void)
3622 {
3623         pci_unregister_driver(&be_driver);
3624 }
3625 module_exit(be_exit_module);