de40d3b7152f5f1454d5e998f54007600ebb860a
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 pci_free_consistent(adapter->pdev, mem->size,
129                         mem->va, mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
142         if (!mem->va)
143                 return -1;
144         memset(mem->va, 0, mem->size);
145         return 0;
146 }
147
148 static void be_intr_set(struct be_adapter *adapter, bool enable)
149 {
150         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
151         u32 reg = ioread32(addr);
152         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153
154         if (adapter->eeh_err)
155                 return;
156
157         if (!enabled && enable)
158                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159         else if (enabled && !enable)
160                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else
162                 return;
163
164         iowrite32(reg, addr);
165 }
166
167 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
168 {
169         u32 val = 0;
170         val |= qid & DB_RQ_RING_ID_MASK;
171         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
172
173         wmb();
174         iowrite32(val, adapter->db + DB_RQ_OFFSET);
175 }
176
177 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 {
179         u32 val = 0;
180         val |= qid & DB_TXULP_RING_ID_MASK;
181         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
182
183         wmb();
184         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
185 }
186
187 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188                 bool arm, bool clear_int, u16 num_popped)
189 {
190         u32 val = 0;
191         val |= qid & DB_EQ_RING_ID_MASK;
192         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
194
195         if (adapter->eeh_err)
196                 return;
197
198         if (arm)
199                 val |= 1 << DB_EQ_REARM_SHIFT;
200         if (clear_int)
201                 val |= 1 << DB_EQ_CLR_SHIFT;
202         val |= 1 << DB_EQ_EVNT_SHIFT;
203         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
204         iowrite32(val, adapter->db + DB_EQ_OFFSET);
205 }
206
207 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
208 {
209         u32 val = 0;
210         val |= qid & DB_CQ_RING_ID_MASK;
211         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
213
214         if (adapter->eeh_err)
215                 return;
216
217         if (arm)
218                 val |= 1 << DB_CQ_REARM_SHIFT;
219         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
220         iowrite32(val, adapter->db + DB_CQ_OFFSET);
221 }
222
223 static int be_mac_addr_set(struct net_device *netdev, void *p)
224 {
225         struct be_adapter *adapter = netdev_priv(netdev);
226         struct sockaddr *addr = p;
227         int status = 0;
228
229         if (!is_valid_ether_addr(addr->sa_data))
230                 return -EADDRNOTAVAIL;
231
232         /* MAC addr configuration will be done in hardware for VFs
233          * by their corresponding PFs. Just copy to netdev addr here
234          */
235         if (!be_physfn(adapter))
236                 goto netdev_addr;
237
238         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
239         if (status)
240                 return status;
241
242         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243                         adapter->if_handle, &adapter->pmac_id);
244 netdev_addr:
245         if (!status)
246                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
247
248         return status;
249 }
250
251 void netdev_stats_update(struct be_adapter *adapter)
252 {
253         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
254         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
255         struct be_port_rxf_stats *port_stats =
256                         &rxf_stats->port[adapter->port_num];
257         struct net_device_stats *dev_stats = &adapter->netdev->stats;
258         struct be_erx_stats *erx_stats = &hw_stats->erx;
259         struct be_rx_obj *rxo;
260         int i;
261
262         memset(dev_stats, 0, sizeof(*dev_stats));
263         for_all_rx_queues(adapter, rxo, i) {
264                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
265                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
266                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
267                 /*  no space in linux buffers: best possible approximation */
268                 dev_stats->rx_dropped +=
269                         erx_stats->rx_drops_no_fragments[rxo->q.id];
270         }
271
272         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
273         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
274
275         /* bad pkts received */
276         dev_stats->rx_errors = port_stats->rx_crc_errors +
277                 port_stats->rx_alignment_symbol_errors +
278                 port_stats->rx_in_range_errors +
279                 port_stats->rx_out_range_errors +
280                 port_stats->rx_frame_too_long +
281                 port_stats->rx_dropped_too_small +
282                 port_stats->rx_dropped_too_short +
283                 port_stats->rx_dropped_header_too_small +
284                 port_stats->rx_dropped_tcp_length +
285                 port_stats->rx_dropped_runt +
286                 port_stats->rx_tcp_checksum_errs +
287                 port_stats->rx_ip_checksum_errs +
288                 port_stats->rx_udp_checksum_errs;
289
290         /* detailed rx errors */
291         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
292                 port_stats->rx_out_range_errors +
293                 port_stats->rx_frame_too_long;
294
295         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
296
297         /* frame alignment errors */
298         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
299
300         /* receiver fifo overrun */
301         /* drops_no_pbuf is no per i/f, it's per BE card */
302         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
303                                         port_stats->rx_input_fifo_overflow +
304                                         rxf_stats->rx_drops_no_pbuf;
305 }
306
307 void be_link_status_update(struct be_adapter *adapter, bool link_up)
308 {
309         struct net_device *netdev = adapter->netdev;
310
311         /* If link came up or went down */
312         if (adapter->link_up != link_up) {
313                 adapter->link_speed = -1;
314                 if (link_up) {
315                         netif_start_queue(netdev);
316                         netif_carrier_on(netdev);
317                         printk(KERN_INFO "%s: Link up\n", netdev->name);
318                 } else {
319                         netif_stop_queue(netdev);
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         pci_unmap_single(pdev, dma, wrb->frag_len,
500                                 PCI_DMA_TODEVICE);
501                 else
502                         pci_unmap_page(pdev, dma, wrb->frag_len,
503                                 PCI_DMA_TODEVICE);
504         }
505 }
506
507 static int make_tx_wrbs(struct be_adapter *adapter,
508                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
509 {
510         dma_addr_t busaddr;
511         int i, copied = 0;
512         struct pci_dev *pdev = adapter->pdev;
513         struct sk_buff *first_skb = skb;
514         struct be_queue_info *txq = &adapter->tx_obj.q;
515         struct be_eth_wrb *wrb;
516         struct be_eth_hdr_wrb *hdr;
517         bool map_single = false;
518         u16 map_head;
519
520         hdr = queue_head_node(txq);
521         queue_head_inc(txq);
522         map_head = txq->head;
523
524         if (skb->len > skb->data_len) {
525                 int len = skb_headlen(skb);
526                 busaddr = pci_map_single(pdev, skb->data, len,
527                                          PCI_DMA_TODEVICE);
528                 if (pci_dma_mapping_error(pdev, busaddr))
529                         goto dma_err;
530                 map_single = true;
531                 wrb = queue_head_node(txq);
532                 wrb_fill(wrb, busaddr, len);
533                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
534                 queue_head_inc(txq);
535                 copied += len;
536         }
537
538         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
539                 struct skb_frag_struct *frag =
540                         &skb_shinfo(skb)->frags[i];
541                 busaddr = pci_map_page(pdev, frag->page,
542                                        frag->page_offset,
543                                        frag->size, PCI_DMA_TODEVICE);
544                 if (pci_dma_mapping_error(pdev, busaddr))
545                         goto dma_err;
546                 wrb = queue_head_node(txq);
547                 wrb_fill(wrb, busaddr, frag->size);
548                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549                 queue_head_inc(txq);
550                 copied += frag->size;
551         }
552
553         if (dummy_wrb) {
554                 wrb = queue_head_node(txq);
555                 wrb_fill(wrb, 0, 0);
556                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
557                 queue_head_inc(txq);
558         }
559
560         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
561         be_dws_cpu_to_le(hdr, sizeof(*hdr));
562
563         return copied;
564 dma_err:
565         txq->head = map_head;
566         while (copied) {
567                 wrb = queue_head_node(txq);
568                 unmap_tx_frag(pdev, wrb, map_single);
569                 map_single = false;
570                 copied -= wrb->frag_len;
571                 queue_head_inc(txq);
572         }
573         return 0;
574 }
575
576 static netdev_tx_t be_xmit(struct sk_buff *skb,
577                         struct net_device *netdev)
578 {
579         struct be_adapter *adapter = netdev_priv(netdev);
580         struct be_tx_obj *tx_obj = &adapter->tx_obj;
581         struct be_queue_info *txq = &tx_obj->q;
582         u32 wrb_cnt = 0, copied = 0;
583         u32 start = txq->head;
584         bool dummy_wrb, stopped = false;
585
586         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
587
588         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
589         if (copied) {
590                 /* record the sent skb in the sent_skb table */
591                 BUG_ON(tx_obj->sent_skb_list[start]);
592                 tx_obj->sent_skb_list[start] = skb;
593
594                 /* Ensure txq has space for the next skb; Else stop the queue
595                  * *BEFORE* ringing the tx doorbell, so that we serialze the
596                  * tx compls of the current transmit which'll wake up the queue
597                  */
598                 atomic_add(wrb_cnt, &txq->used);
599                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
600                                                                 txq->len) {
601                         netif_stop_queue(netdev);
602                         stopped = true;
603                 }
604
605                 be_txq_notify(adapter, txq->id, wrb_cnt);
606
607                 be_tx_stats_update(adapter, wrb_cnt, copied,
608                                 skb_shinfo(skb)->gso_segs, stopped);
609         } else {
610                 txq->head = start;
611                 dev_kfree_skb_any(skb);
612         }
613         return NETDEV_TX_OK;
614 }
615
616 static int be_change_mtu(struct net_device *netdev, int new_mtu)
617 {
618         struct be_adapter *adapter = netdev_priv(netdev);
619         if (new_mtu < BE_MIN_MTU ||
620                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
621                                         (ETH_HLEN + ETH_FCS_LEN))) {
622                 dev_info(&adapter->pdev->dev,
623                         "MTU must be between %d and %d bytes\n",
624                         BE_MIN_MTU,
625                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
626                 return -EINVAL;
627         }
628         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
629                         netdev->mtu, new_mtu);
630         netdev->mtu = new_mtu;
631         return 0;
632 }
633
634 /*
635  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
636  * If the user configures more, place BE in vlan promiscuous mode.
637  */
638 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
639 {
640         u16 vtag[BE_NUM_VLANS_SUPPORTED];
641         u16 ntags = 0, i;
642         int status = 0;
643         u32 if_handle;
644
645         if (vf) {
646                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
647                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
648                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
649         }
650
651         if (adapter->vlans_added <= adapter->max_vlans)  {
652                 /* Construct VLAN Table to give to HW */
653                 for (i = 0; i < VLAN_N_VID; i++) {
654                         if (adapter->vlan_tag[i]) {
655                                 vtag[ntags] = cpu_to_le16(i);
656                                 ntags++;
657                         }
658                 }
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         vtag, ntags, 1, 0);
661         } else {
662                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
663                                         NULL, 0, 1, 1);
664         }
665
666         return status;
667 }
668
669 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
670 {
671         struct be_adapter *adapter = netdev_priv(netdev);
672
673         adapter->vlan_grp = grp;
674 }
675
676 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
677 {
678         struct be_adapter *adapter = netdev_priv(netdev);
679
680         adapter->vlans_added++;
681         if (!be_physfn(adapter))
682                 return;
683
684         adapter->vlan_tag[vid] = 1;
685         if (adapter->vlans_added <= (adapter->max_vlans + 1))
686                 be_vid_config(adapter, false, 0);
687 }
688
689 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
690 {
691         struct be_adapter *adapter = netdev_priv(netdev);
692
693         adapter->vlans_added--;
694         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
695
696         if (!be_physfn(adapter))
697                 return;
698
699         adapter->vlan_tag[vid] = 0;
700         if (adapter->vlans_added <= adapter->max_vlans)
701                 be_vid_config(adapter, false, 0);
702 }
703
704 static void be_set_multicast_list(struct net_device *netdev)
705 {
706         struct be_adapter *adapter = netdev_priv(netdev);
707
708         if (netdev->flags & IFF_PROMISC) {
709                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
710                 adapter->promiscuous = true;
711                 goto done;
712         }
713
714         /* BE was previously in promiscous mode; disable it */
715         if (adapter->promiscuous) {
716                 adapter->promiscuous = false;
717                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
718         }
719
720         /* Enable multicast promisc if num configured exceeds what we support */
721         if (netdev->flags & IFF_ALLMULTI ||
722             netdev_mc_count(netdev) > BE_MAX_MC) {
723                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
724                                 &adapter->mc_cmd_mem);
725                 goto done;
726         }
727
728         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
729                 &adapter->mc_cmd_mem);
730 done:
731         return;
732 }
733
734 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
735 {
736         struct be_adapter *adapter = netdev_priv(netdev);
737         int status;
738
739         if (!adapter->sriov_enabled)
740                 return -EPERM;
741
742         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
743                 return -EINVAL;
744
745         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
746                 status = be_cmd_pmac_del(adapter,
747                                         adapter->vf_cfg[vf].vf_if_handle,
748                                         adapter->vf_cfg[vf].vf_pmac_id);
749
750         status = be_cmd_pmac_add(adapter, mac,
751                                 adapter->vf_cfg[vf].vf_if_handle,
752                                 &adapter->vf_cfg[vf].vf_pmac_id);
753
754         if (status)
755                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
756                                 mac, vf);
757         else
758                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
759
760         return status;
761 }
762
763 static int be_get_vf_config(struct net_device *netdev, int vf,
764                         struct ifla_vf_info *vi)
765 {
766         struct be_adapter *adapter = netdev_priv(netdev);
767
768         if (!adapter->sriov_enabled)
769                 return -EPERM;
770
771         if (vf >= num_vfs)
772                 return -EINVAL;
773
774         vi->vf = vf;
775         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
776         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
777         vi->qos = 0;
778         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
779
780         return 0;
781 }
782
783 static int be_set_vf_vlan(struct net_device *netdev,
784                         int vf, u16 vlan, u8 qos)
785 {
786         struct be_adapter *adapter = netdev_priv(netdev);
787         int status = 0;
788
789         if (!adapter->sriov_enabled)
790                 return -EPERM;
791
792         if ((vf >= num_vfs) || (vlan > 4095))
793                 return -EINVAL;
794
795         if (vlan) {
796                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
797                 adapter->vlans_added++;
798         } else {
799                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
800                 adapter->vlans_added--;
801         }
802
803         status = be_vid_config(adapter, true, vf);
804
805         if (status)
806                 dev_info(&adapter->pdev->dev,
807                                 "VLAN %d config on VF %d failed\n", vlan, vf);
808         return status;
809 }
810
811 static int be_set_vf_tx_rate(struct net_device *netdev,
812                         int vf, int rate)
813 {
814         struct be_adapter *adapter = netdev_priv(netdev);
815         int status = 0;
816
817         if (!adapter->sriov_enabled)
818                 return -EPERM;
819
820         if ((vf >= num_vfs) || (rate < 0))
821                 return -EINVAL;
822
823         if (rate > 10000)
824                 rate = 10000;
825
826         adapter->vf_cfg[vf].vf_tx_rate = rate;
827         status = be_cmd_set_qos(adapter, rate / 10, vf);
828
829         if (status)
830                 dev_info(&adapter->pdev->dev,
831                                 "tx rate %d on VF %d failed\n", rate, vf);
832         return status;
833 }
834
835 static void be_rx_rate_update(struct be_rx_obj *rxo)
836 {
837         struct be_rx_stats *stats = &rxo->stats;
838         ulong now = jiffies;
839
840         /* Wrapped around */
841         if (time_before(now, stats->rx_jiffies)) {
842                 stats->rx_jiffies = now;
843                 return;
844         }
845
846         /* Update the rate once in two seconds */
847         if ((now - stats->rx_jiffies) < 2 * HZ)
848                 return;
849
850         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
851                                 now - stats->rx_jiffies);
852         stats->rx_jiffies = now;
853         stats->rx_bytes_prev = stats->rx_bytes;
854 }
855
856 static void be_rx_stats_update(struct be_rx_obj *rxo,
857                 u32 pktsize, u16 numfrags, u8 pkt_type)
858 {
859         struct be_rx_stats *stats = &rxo->stats;
860
861         stats->rx_compl++;
862         stats->rx_frags += numfrags;
863         stats->rx_bytes += pktsize;
864         stats->rx_pkts++;
865         if (pkt_type == BE_MULTICAST_PACKET)
866                 stats->rx_mcast_pkts++;
867 }
868
869 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
870 {
871         u8 l4_cksm, ipv6, ipcksm;
872
873         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
874         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
875         ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
876
877         /* Ignore ipcksm for ipv6 pkts */
878         return l4_cksm && (ipcksm || ipv6);
879 }
880
881 static struct be_rx_page_info *
882 get_rx_page_info(struct be_adapter *adapter,
883                 struct be_rx_obj *rxo,
884                 u16 frag_idx)
885 {
886         struct be_rx_page_info *rx_page_info;
887         struct be_queue_info *rxq = &rxo->q;
888
889         rx_page_info = &rxo->page_info_tbl[frag_idx];
890         BUG_ON(!rx_page_info->page);
891
892         if (rx_page_info->last_page_user) {
893                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
894                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
895                 rx_page_info->last_page_user = false;
896         }
897
898         atomic_dec(&rxq->used);
899         return rx_page_info;
900 }
901
902 /* Throwaway the data in the Rx completion */
903 static void be_rx_compl_discard(struct be_adapter *adapter,
904                 struct be_rx_obj *rxo,
905                 struct be_eth_rx_compl *rxcp)
906 {
907         struct be_queue_info *rxq = &rxo->q;
908         struct be_rx_page_info *page_info;
909         u16 rxq_idx, i, num_rcvd;
910
911         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
912         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
913
914          /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
915         if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
916
917                 rxo->last_frag_index = rxq_idx;
918
919                 for (i = 0; i < num_rcvd; i++) {
920                         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921                         put_page(page_info->page);
922                         memset(page_info, 0, sizeof(*page_info));
923                         index_inc(&rxq_idx, rxq->len);
924                 }
925         }
926 }
927
928 /*
929  * skb_fill_rx_data forms a complete skb for an ether frame
930  * indicated by rxcp.
931  */
932 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
933                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
934                         u16 num_rcvd)
935 {
936         struct be_queue_info *rxq = &rxo->q;
937         struct be_rx_page_info *page_info;
938         u16 rxq_idx, i, j;
939         u32 pktsize, hdr_len, curr_frag_len, size;
940         u8 *start;
941         u8 pkt_type;
942
943         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
944         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
945         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
946
947         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
948
949         start = page_address(page_info->page) + page_info->page_offset;
950         prefetch(start);
951
952         /* Copy data in the first descriptor of this completion */
953         curr_frag_len = min(pktsize, rx_frag_size);
954
955         /* Copy the header portion into skb_data */
956         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
957         memcpy(skb->data, start, hdr_len);
958         skb->len = curr_frag_len;
959         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
960                 /* Complete packet has now been moved to data */
961                 put_page(page_info->page);
962                 skb->data_len = 0;
963                 skb->tail += curr_frag_len;
964         } else {
965                 skb_shinfo(skb)->nr_frags = 1;
966                 skb_shinfo(skb)->frags[0].page = page_info->page;
967                 skb_shinfo(skb)->frags[0].page_offset =
968                                         page_info->page_offset + hdr_len;
969                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
970                 skb->data_len = curr_frag_len - hdr_len;
971                 skb->tail += hdr_len;
972         }
973         page_info->page = NULL;
974
975         if (pktsize <= rx_frag_size) {
976                 BUG_ON(num_rcvd != 1);
977                 goto done;
978         }
979
980         /* More frags present for this completion */
981         size = pktsize;
982         for (i = 1, j = 0; i < num_rcvd; i++) {
983                 size -= curr_frag_len;
984                 index_inc(&rxq_idx, rxq->len);
985                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
986
987                 curr_frag_len = min(size, rx_frag_size);
988
989                 /* Coalesce all frags from the same physical page in one slot */
990                 if (page_info->page_offset == 0) {
991                         /* Fresh page */
992                         j++;
993                         skb_shinfo(skb)->frags[j].page = page_info->page;
994                         skb_shinfo(skb)->frags[j].page_offset =
995                                                         page_info->page_offset;
996                         skb_shinfo(skb)->frags[j].size = 0;
997                         skb_shinfo(skb)->nr_frags++;
998                 } else {
999                         put_page(page_info->page);
1000                 }
1001
1002                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1003                 skb->len += curr_frag_len;
1004                 skb->data_len += curr_frag_len;
1005
1006                 page_info->page = NULL;
1007         }
1008         BUG_ON(j > MAX_SKB_FRAGS);
1009
1010 done:
1011         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1012 }
1013
1014 /* Process the RX completion indicated by rxcp when GRO is disabled */
1015 static void be_rx_compl_process(struct be_adapter *adapter,
1016                         struct be_rx_obj *rxo,
1017                         struct be_eth_rx_compl *rxcp)
1018 {
1019         struct sk_buff *skb;
1020         u32 vlanf, vid;
1021         u16 num_rcvd;
1022         u8 vtm;
1023
1024         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1025
1026         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1027         if (unlikely(!skb)) {
1028                 if (net_ratelimit())
1029                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1030                 be_rx_compl_discard(adapter, rxo, rxcp);
1031                 return;
1032         }
1033
1034         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1035
1036         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1037                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1038         else
1039                 skb_checksum_none_assert(skb);
1040
1041         skb->truesize = skb->len + sizeof(struct sk_buff);
1042         skb->protocol = eth_type_trans(skb, adapter->netdev);
1043
1044         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1045         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1046
1047         /* vlanf could be wrongly set in some cards.
1048          * ignore if vtm is not set */
1049         if ((adapter->function_mode & 0x400) && !vtm)
1050                 vlanf = 0;
1051
1052         if (unlikely(vlanf)) {
1053                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1054                         kfree_skb(skb);
1055                         return;
1056                 }
1057                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1058                 if (!lancer_chip(adapter))
1059                         vid = swab16(vid);
1060                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1061         } else {
1062                 netif_receive_skb(skb);
1063         }
1064 }
1065
1066 /* Process the RX completion indicated by rxcp when GRO is enabled */
1067 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1068                 struct be_rx_obj *rxo,
1069                 struct be_eth_rx_compl *rxcp)
1070 {
1071         struct be_rx_page_info *page_info;
1072         struct sk_buff *skb = NULL;
1073         struct be_queue_info *rxq = &rxo->q;
1074         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1075         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1076         u16 i, rxq_idx = 0, vid, j;
1077         u8 vtm;
1078         u8 pkt_type;
1079
1080         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1081         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1082         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1083         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1084         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1085         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1086
1087         /* vlanf could be wrongly set in some cards.
1088          * ignore if vtm is not set */
1089         if ((adapter->function_mode & 0x400) && !vtm)
1090                 vlanf = 0;
1091
1092         skb = napi_get_frags(&eq_obj->napi);
1093         if (!skb) {
1094                 be_rx_compl_discard(adapter, rxo, rxcp);
1095                 return;
1096         }
1097
1098         remaining = pkt_size;
1099         for (i = 0, j = -1; i < num_rcvd; i++) {
1100                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1101
1102                 curr_frag_len = min(remaining, rx_frag_size);
1103
1104                 /* Coalesce all frags from the same physical page in one slot */
1105                 if (i == 0 || page_info->page_offset == 0) {
1106                         /* First frag or Fresh page */
1107                         j++;
1108                         skb_shinfo(skb)->frags[j].page = page_info->page;
1109                         skb_shinfo(skb)->frags[j].page_offset =
1110                                                         page_info->page_offset;
1111                         skb_shinfo(skb)->frags[j].size = 0;
1112                 } else {
1113                         put_page(page_info->page);
1114                 }
1115                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1116
1117                 remaining -= curr_frag_len;
1118                 index_inc(&rxq_idx, rxq->len);
1119                 memset(page_info, 0, sizeof(*page_info));
1120         }
1121         BUG_ON(j > MAX_SKB_FRAGS);
1122
1123         skb_shinfo(skb)->nr_frags = j + 1;
1124         skb->len = pkt_size;
1125         skb->data_len = pkt_size;
1126         skb->truesize += pkt_size;
1127         skb->ip_summed = CHECKSUM_UNNECESSARY;
1128
1129         if (likely(!vlanf)) {
1130                 napi_gro_frags(&eq_obj->napi);
1131         } else {
1132                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1133                 if (!lancer_chip(adapter))
1134                         vid = swab16(vid);
1135
1136                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1137                         return;
1138
1139                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1140         }
1141
1142         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1143 }
1144
1145 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1146 {
1147         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1148
1149         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1150                 return NULL;
1151
1152         rmb();
1153         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1154
1155         queue_tail_inc(&rxo->cq);
1156         return rxcp;
1157 }
1158
1159 /* To reset the valid bit, we need to reset the whole word as
1160  * when walking the queue the valid entries are little-endian
1161  * and invalid entries are host endian
1162  */
1163 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1164 {
1165         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1166 }
1167
1168 static inline struct page *be_alloc_pages(u32 size)
1169 {
1170         gfp_t alloc_flags = GFP_ATOMIC;
1171         u32 order = get_order(size);
1172         if (order > 0)
1173                 alloc_flags |= __GFP_COMP;
1174         return  alloc_pages(alloc_flags, order);
1175 }
1176
1177 /*
1178  * Allocate a page, split it to fragments of size rx_frag_size and post as
1179  * receive buffers to BE
1180  */
1181 static void be_post_rx_frags(struct be_rx_obj *rxo)
1182 {
1183         struct be_adapter *adapter = rxo->adapter;
1184         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1185         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1186         struct be_queue_info *rxq = &rxo->q;
1187         struct page *pagep = NULL;
1188         struct be_eth_rx_d *rxd;
1189         u64 page_dmaaddr = 0, frag_dmaaddr;
1190         u32 posted, page_offset = 0;
1191
1192         page_info = &rxo->page_info_tbl[rxq->head];
1193         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1194                 if (!pagep) {
1195                         pagep = be_alloc_pages(adapter->big_page_size);
1196                         if (unlikely(!pagep)) {
1197                                 rxo->stats.rx_post_fail++;
1198                                 break;
1199                         }
1200                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1201                                                 adapter->big_page_size,
1202                                                 PCI_DMA_FROMDEVICE);
1203                         page_info->page_offset = 0;
1204                 } else {
1205                         get_page(pagep);
1206                         page_info->page_offset = page_offset + rx_frag_size;
1207                 }
1208                 page_offset = page_info->page_offset;
1209                 page_info->page = pagep;
1210                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1211                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1212
1213                 rxd = queue_head_node(rxq);
1214                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1215                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1216
1217                 /* Any space left in the current big page for another frag? */
1218                 if ((page_offset + rx_frag_size + rx_frag_size) >
1219                                         adapter->big_page_size) {
1220                         pagep = NULL;
1221                         page_info->last_page_user = true;
1222                 }
1223
1224                 prev_page_info = page_info;
1225                 queue_head_inc(rxq);
1226                 page_info = &page_info_tbl[rxq->head];
1227         }
1228         if (pagep)
1229                 prev_page_info->last_page_user = true;
1230
1231         if (posted) {
1232                 atomic_add(posted, &rxq->used);
1233                 be_rxq_notify(adapter, rxq->id, posted);
1234         } else if (atomic_read(&rxq->used) == 0) {
1235                 /* Let be_worker replenish when memory is available */
1236                 rxo->rx_post_starved = true;
1237         }
1238 }
1239
1240 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1241 {
1242         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1243
1244         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1245                 return NULL;
1246
1247         rmb();
1248         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1249
1250         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1251
1252         queue_tail_inc(tx_cq);
1253         return txcp;
1254 }
1255
1256 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1257 {
1258         struct be_queue_info *txq = &adapter->tx_obj.q;
1259         struct be_eth_wrb *wrb;
1260         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1261         struct sk_buff *sent_skb;
1262         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1263         bool unmap_skb_hdr = true;
1264
1265         sent_skb = sent_skbs[txq->tail];
1266         BUG_ON(!sent_skb);
1267         sent_skbs[txq->tail] = NULL;
1268
1269         /* skip header wrb */
1270         queue_tail_inc(txq);
1271
1272         do {
1273                 cur_index = txq->tail;
1274                 wrb = queue_tail_node(txq);
1275                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1276                                         skb_headlen(sent_skb)));
1277                 unmap_skb_hdr = false;
1278
1279                 num_wrbs++;
1280                 queue_tail_inc(txq);
1281         } while (cur_index != last_index);
1282
1283         atomic_sub(num_wrbs, &txq->used);
1284
1285         kfree_skb(sent_skb);
1286 }
1287
1288 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1289 {
1290         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1291
1292         if (!eqe->evt)
1293                 return NULL;
1294
1295         rmb();
1296         eqe->evt = le32_to_cpu(eqe->evt);
1297         queue_tail_inc(&eq_obj->q);
1298         return eqe;
1299 }
1300
1301 static int event_handle(struct be_adapter *adapter,
1302                         struct be_eq_obj *eq_obj)
1303 {
1304         struct be_eq_entry *eqe;
1305         u16 num = 0;
1306
1307         while ((eqe = event_get(eq_obj)) != NULL) {
1308                 eqe->evt = 0;
1309                 num++;
1310         }
1311
1312         /* Deal with any spurious interrupts that come
1313          * without events
1314          */
1315         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1316         if (num)
1317                 napi_schedule(&eq_obj->napi);
1318
1319         return num;
1320 }
1321
1322 /* Just read and notify events without processing them.
1323  * Used at the time of destroying event queues */
1324 static void be_eq_clean(struct be_adapter *adapter,
1325                         struct be_eq_obj *eq_obj)
1326 {
1327         struct be_eq_entry *eqe;
1328         u16 num = 0;
1329
1330         while ((eqe = event_get(eq_obj)) != NULL) {
1331                 eqe->evt = 0;
1332                 num++;
1333         }
1334
1335         if (num)
1336                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1337 }
1338
1339 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1340 {
1341         struct be_rx_page_info *page_info;
1342         struct be_queue_info *rxq = &rxo->q;
1343         struct be_queue_info *rx_cq = &rxo->cq;
1344         struct be_eth_rx_compl *rxcp;
1345         u16 tail;
1346
1347         /* First cleanup pending rx completions */
1348         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1349                 be_rx_compl_discard(adapter, rxo, rxcp);
1350                 be_rx_compl_reset(rxcp);
1351                 be_cq_notify(adapter, rx_cq->id, false, 1);
1352         }
1353
1354         /* Then free posted rx buffer that were not used */
1355         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1356         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1357                 page_info = get_rx_page_info(adapter, rxo, tail);
1358                 put_page(page_info->page);
1359                 memset(page_info, 0, sizeof(*page_info));
1360         }
1361         BUG_ON(atomic_read(&rxq->used));
1362 }
1363
1364 static void be_tx_compl_clean(struct be_adapter *adapter)
1365 {
1366         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1367         struct be_queue_info *txq = &adapter->tx_obj.q;
1368         struct be_eth_tx_compl *txcp;
1369         u16 end_idx, cmpl = 0, timeo = 0;
1370         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1371         struct sk_buff *sent_skb;
1372         bool dummy_wrb;
1373
1374         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1375         do {
1376                 while ((txcp = be_tx_compl_get(tx_cq))) {
1377                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1378                                         wrb_index, txcp);
1379                         be_tx_compl_process(adapter, end_idx);
1380                         cmpl++;
1381                 }
1382                 if (cmpl) {
1383                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1384                         cmpl = 0;
1385                 }
1386
1387                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1388                         break;
1389
1390                 mdelay(1);
1391         } while (true);
1392
1393         if (atomic_read(&txq->used))
1394                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1395                         atomic_read(&txq->used));
1396
1397         /* free posted tx for which compls will never arrive */
1398         while (atomic_read(&txq->used)) {
1399                 sent_skb = sent_skbs[txq->tail];
1400                 end_idx = txq->tail;
1401                 index_adv(&end_idx,
1402                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403                         txq->len);
1404                 be_tx_compl_process(adapter, end_idx);
1405         }
1406 }
1407
1408 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1409 {
1410         struct be_queue_info *q;
1411
1412         q = &adapter->mcc_obj.q;
1413         if (q->created)
1414                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1415         be_queue_free(adapter, q);
1416
1417         q = &adapter->mcc_obj.cq;
1418         if (q->created)
1419                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1420         be_queue_free(adapter, q);
1421 }
1422
1423 /* Must be called only after TX qs are created as MCC shares TX EQ */
1424 static int be_mcc_queues_create(struct be_adapter *adapter)
1425 {
1426         struct be_queue_info *q, *cq;
1427
1428         /* Alloc MCC compl queue */
1429         cq = &adapter->mcc_obj.cq;
1430         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1431                         sizeof(struct be_mcc_compl)))
1432                 goto err;
1433
1434         /* Ask BE to create MCC compl queue; share TX's eq */
1435         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1436                 goto mcc_cq_free;
1437
1438         /* Alloc MCC queue */
1439         q = &adapter->mcc_obj.q;
1440         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1441                 goto mcc_cq_destroy;
1442
1443         /* Ask BE to create MCC queue */
1444         if (be_cmd_mccq_create(adapter, q, cq))
1445                 goto mcc_q_free;
1446
1447         return 0;
1448
1449 mcc_q_free:
1450         be_queue_free(adapter, q);
1451 mcc_cq_destroy:
1452         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1453 mcc_cq_free:
1454         be_queue_free(adapter, cq);
1455 err:
1456         return -1;
1457 }
1458
1459 static void be_tx_queues_destroy(struct be_adapter *adapter)
1460 {
1461         struct be_queue_info *q;
1462
1463         q = &adapter->tx_obj.q;
1464         if (q->created)
1465                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1466         be_queue_free(adapter, q);
1467
1468         q = &adapter->tx_obj.cq;
1469         if (q->created)
1470                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1471         be_queue_free(adapter, q);
1472
1473         /* Clear any residual events */
1474         be_eq_clean(adapter, &adapter->tx_eq);
1475
1476         q = &adapter->tx_eq.q;
1477         if (q->created)
1478                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1479         be_queue_free(adapter, q);
1480 }
1481
1482 static int be_tx_queues_create(struct be_adapter *adapter)
1483 {
1484         struct be_queue_info *eq, *q, *cq;
1485
1486         adapter->tx_eq.max_eqd = 0;
1487         adapter->tx_eq.min_eqd = 0;
1488         adapter->tx_eq.cur_eqd = 96;
1489         adapter->tx_eq.enable_aic = false;
1490         /* Alloc Tx Event queue */
1491         eq = &adapter->tx_eq.q;
1492         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1493                 return -1;
1494
1495         /* Ask BE to create Tx Event queue */
1496         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1497                 goto tx_eq_free;
1498
1499         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1500
1501
1502         /* Alloc TX eth compl queue */
1503         cq = &adapter->tx_obj.cq;
1504         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1505                         sizeof(struct be_eth_tx_compl)))
1506                 goto tx_eq_destroy;
1507
1508         /* Ask BE to create Tx eth compl queue */
1509         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1510                 goto tx_cq_free;
1511
1512         /* Alloc TX eth queue */
1513         q = &adapter->tx_obj.q;
1514         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1515                 goto tx_cq_destroy;
1516
1517         /* Ask BE to create Tx eth queue */
1518         if (be_cmd_txq_create(adapter, q, cq))
1519                 goto tx_q_free;
1520         return 0;
1521
1522 tx_q_free:
1523         be_queue_free(adapter, q);
1524 tx_cq_destroy:
1525         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1526 tx_cq_free:
1527         be_queue_free(adapter, cq);
1528 tx_eq_destroy:
1529         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1530 tx_eq_free:
1531         be_queue_free(adapter, eq);
1532         return -1;
1533 }
1534
1535 static void be_rx_queues_destroy(struct be_adapter *adapter)
1536 {
1537         struct be_queue_info *q;
1538         struct be_rx_obj *rxo;
1539         int i;
1540
1541         for_all_rx_queues(adapter, rxo, i) {
1542                 q = &rxo->q;
1543                 if (q->created) {
1544                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1545                         /* After the rxq is invalidated, wait for a grace time
1546                          * of 1ms for all dma to end and the flush compl to
1547                          * arrive
1548                          */
1549                         mdelay(1);
1550                         be_rx_q_clean(adapter, rxo);
1551                 }
1552                 be_queue_free(adapter, q);
1553
1554                 q = &rxo->cq;
1555                 if (q->created)
1556                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1557                 be_queue_free(adapter, q);
1558
1559                 /* Clear any residual events */
1560                 q = &rxo->rx_eq.q;
1561                 if (q->created) {
1562                         be_eq_clean(adapter, &rxo->rx_eq);
1563                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1564                 }
1565                 be_queue_free(adapter, q);
1566         }
1567 }
1568
1569 static int be_rx_queues_create(struct be_adapter *adapter)
1570 {
1571         struct be_queue_info *eq, *q, *cq;
1572         struct be_rx_obj *rxo;
1573         int rc, i;
1574
1575         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1576         for_all_rx_queues(adapter, rxo, i) {
1577                 rxo->adapter = adapter;
1578                 /* Init last_frag_index so that the frag index in the first
1579                  * completion will never match */
1580                 rxo->last_frag_index = 0xffff;
1581                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1582                 rxo->rx_eq.enable_aic = true;
1583
1584                 /* EQ */
1585                 eq = &rxo->rx_eq.q;
1586                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1587                                         sizeof(struct be_eq_entry));
1588                 if (rc)
1589                         goto err;
1590
1591                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1592                 if (rc)
1593                         goto err;
1594
1595                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1596
1597                 /* CQ */
1598                 cq = &rxo->cq;
1599                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1600                                 sizeof(struct be_eth_rx_compl));
1601                 if (rc)
1602                         goto err;
1603
1604                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1605                 if (rc)
1606                         goto err;
1607                 /* Rx Q */
1608                 q = &rxo->q;
1609                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1610                                 sizeof(struct be_eth_rx_d));
1611                 if (rc)
1612                         goto err;
1613
1614                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1615                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1616                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1617                 if (rc)
1618                         goto err;
1619         }
1620
1621         if (be_multi_rxq(adapter)) {
1622                 u8 rsstable[MAX_RSS_QS];
1623
1624                 for_all_rss_queues(adapter, rxo, i)
1625                         rsstable[i] = rxo->rss_id;
1626
1627                 rc = be_cmd_rss_config(adapter, rsstable,
1628                         adapter->num_rx_qs - 1);
1629                 if (rc)
1630                         goto err;
1631         }
1632
1633         return 0;
1634 err:
1635         be_rx_queues_destroy(adapter);
1636         return -1;
1637 }
1638
1639 static bool event_peek(struct be_eq_obj *eq_obj)
1640 {
1641         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1642         if (!eqe->evt)
1643                 return false;
1644         else
1645                 return true;
1646 }
1647
1648 static irqreturn_t be_intx(int irq, void *dev)
1649 {
1650         struct be_adapter *adapter = dev;
1651         struct be_rx_obj *rxo;
1652         int isr, i, tx = 0 , rx = 0;
1653
1654         if (lancer_chip(adapter)) {
1655                 if (event_peek(&adapter->tx_eq))
1656                         tx = event_handle(adapter, &adapter->tx_eq);
1657                 for_all_rx_queues(adapter, rxo, i) {
1658                         if (event_peek(&rxo->rx_eq))
1659                                 rx |= event_handle(adapter, &rxo->rx_eq);
1660                 }
1661
1662                 if (!(tx || rx))
1663                         return IRQ_NONE;
1664
1665         } else {
1666                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1667                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1668                 if (!isr)
1669                         return IRQ_NONE;
1670
1671                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1672                         event_handle(adapter, &adapter->tx_eq);
1673
1674                 for_all_rx_queues(adapter, rxo, i) {
1675                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1676                                 event_handle(adapter, &rxo->rx_eq);
1677                 }
1678         }
1679
1680         return IRQ_HANDLED;
1681 }
1682
1683 static irqreturn_t be_msix_rx(int irq, void *dev)
1684 {
1685         struct be_rx_obj *rxo = dev;
1686         struct be_adapter *adapter = rxo->adapter;
1687
1688         event_handle(adapter, &rxo->rx_eq);
1689
1690         return IRQ_HANDLED;
1691 }
1692
1693 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1694 {
1695         struct be_adapter *adapter = dev;
1696
1697         event_handle(adapter, &adapter->tx_eq);
1698
1699         return IRQ_HANDLED;
1700 }
1701
1702 static inline bool do_gro(struct be_rx_obj *rxo,
1703                         struct be_eth_rx_compl *rxcp, u8 err)
1704 {
1705         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1706
1707         if (err)
1708                 rxo->stats.rxcp_err++;
1709
1710         return (tcp_frame && !err) ? true : false;
1711 }
1712
1713 static int be_poll_rx(struct napi_struct *napi, int budget)
1714 {
1715         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1716         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1717         struct be_adapter *adapter = rxo->adapter;
1718         struct be_queue_info *rx_cq = &rxo->cq;
1719         struct be_eth_rx_compl *rxcp;
1720         u32 work_done;
1721         u16 frag_index, num_rcvd;
1722         u8 err;
1723
1724         rxo->stats.rx_polls++;
1725         for (work_done = 0; work_done < budget; work_done++) {
1726                 rxcp = be_rx_compl_get(rxo);
1727                 if (!rxcp)
1728                         break;
1729
1730                 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1731                 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1732                                                                 rxcp);
1733                 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734                                                                 rxcp);
1735
1736                 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737                 if (likely(frag_index != rxo->last_frag_index &&
1738                                 num_rcvd != 0)) {
1739                         rxo->last_frag_index = frag_index;
1740
1741                         if (do_gro(rxo, rxcp, err))
1742                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743                         else
1744                                 be_rx_compl_process(adapter, rxo, rxcp);
1745                 }
1746
1747                 be_rx_compl_reset(rxcp);
1748         }
1749
1750         /* Refill the queue */
1751         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1752                 be_post_rx_frags(rxo);
1753
1754         /* All consumed */
1755         if (work_done < budget) {
1756                 napi_complete(napi);
1757                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1758         } else {
1759                 /* More to be consumed; continue with interrupts disabled */
1760                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1761         }
1762         return work_done;
1763 }
1764
1765 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1766  * For TX/MCC we don't honour budget; consume everything
1767  */
1768 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1769 {
1770         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1771         struct be_adapter *adapter =
1772                 container_of(tx_eq, struct be_adapter, tx_eq);
1773         struct be_queue_info *txq = &adapter->tx_obj.q;
1774         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1775         struct be_eth_tx_compl *txcp;
1776         int tx_compl = 0, mcc_compl, status = 0;
1777         u16 end_idx;
1778
1779         while ((txcp = be_tx_compl_get(tx_cq))) {
1780                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1781                                 wrb_index, txcp);
1782                 be_tx_compl_process(adapter, end_idx);
1783                 tx_compl++;
1784         }
1785
1786         mcc_compl = be_process_mcc(adapter, &status);
1787
1788         napi_complete(napi);
1789
1790         if (mcc_compl) {
1791                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1792                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1793         }
1794
1795         if (tx_compl) {
1796                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1797
1798                 /* As Tx wrbs have been freed up, wake up netdev queue if
1799                  * it was stopped due to lack of tx wrbs.
1800                  */
1801                 if (netif_queue_stopped(adapter->netdev) &&
1802                         atomic_read(&txq->used) < txq->len / 2) {
1803                         netif_wake_queue(adapter->netdev);
1804                 }
1805
1806                 tx_stats(adapter)->be_tx_events++;
1807                 tx_stats(adapter)->be_tx_compl += tx_compl;
1808         }
1809
1810         return 1;
1811 }
1812
1813 void be_detect_dump_ue(struct be_adapter *adapter)
1814 {
1815         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1816         u32 i;
1817
1818         pci_read_config_dword(adapter->pdev,
1819                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1820         pci_read_config_dword(adapter->pdev,
1821                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1822         pci_read_config_dword(adapter->pdev,
1823                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1824         pci_read_config_dword(adapter->pdev,
1825                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1826
1827         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1828         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1829
1830         if (ue_status_lo || ue_status_hi) {
1831                 adapter->ue_detected = true;
1832                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1833         }
1834
1835         if (ue_status_lo) {
1836                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1837                         if (ue_status_lo & 1)
1838                                 dev_err(&adapter->pdev->dev,
1839                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1840                 }
1841         }
1842         if (ue_status_hi) {
1843                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1844                         if (ue_status_hi & 1)
1845                                 dev_err(&adapter->pdev->dev,
1846                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1847                 }
1848         }
1849
1850 }
1851
1852 static void be_worker(struct work_struct *work)
1853 {
1854         struct be_adapter *adapter =
1855                 container_of(work, struct be_adapter, work.work);
1856         struct be_rx_obj *rxo;
1857         int i;
1858
1859         /* when interrupts are not yet enabled, just reap any pending
1860         * mcc completions */
1861         if (!netif_running(adapter->netdev)) {
1862                 int mcc_compl, status = 0;
1863
1864                 mcc_compl = be_process_mcc(adapter, &status);
1865
1866                 if (mcc_compl) {
1867                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1868                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1869                 }
1870                 goto reschedule;
1871         }
1872
1873         if (!adapter->stats_ioctl_sent)
1874                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1875
1876         be_tx_rate_update(adapter);
1877
1878         for_all_rx_queues(adapter, rxo, i) {
1879                 be_rx_rate_update(rxo);
1880                 be_rx_eqd_update(adapter, rxo);
1881
1882                 if (rxo->rx_post_starved) {
1883                         rxo->rx_post_starved = false;
1884                         be_post_rx_frags(rxo);
1885                 }
1886         }
1887         if (!adapter->ue_detected && !lancer_chip(adapter))
1888                 be_detect_dump_ue(adapter);
1889
1890 reschedule:
1891         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1892 }
1893
1894 static void be_msix_disable(struct be_adapter *adapter)
1895 {
1896         if (adapter->msix_enabled) {
1897                 pci_disable_msix(adapter->pdev);
1898                 adapter->msix_enabled = false;
1899         }
1900 }
1901
1902 static int be_num_rxqs_get(struct be_adapter *adapter)
1903 {
1904         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1905                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1906                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1907         } else {
1908                 dev_warn(&adapter->pdev->dev,
1909                         "No support for multiple RX queues\n");
1910                 return 1;
1911         }
1912 }
1913
1914 static void be_msix_enable(struct be_adapter *adapter)
1915 {
1916 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1917         int i, status;
1918
1919         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1920
1921         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1922                 adapter->msix_entries[i].entry = i;
1923
1924         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1925                         adapter->num_rx_qs + 1);
1926         if (status == 0) {
1927                 goto done;
1928         } else if (status >= BE_MIN_MSIX_VECTORS) {
1929                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1930                                 status) == 0) {
1931                         adapter->num_rx_qs = status - 1;
1932                         dev_warn(&adapter->pdev->dev,
1933                                 "Could alloc only %d MSIx vectors. "
1934                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1935                         goto done;
1936                 }
1937         }
1938         return;
1939 done:
1940         adapter->msix_enabled = true;
1941 }
1942
1943 static void be_sriov_enable(struct be_adapter *adapter)
1944 {
1945         be_check_sriov_fn_type(adapter);
1946 #ifdef CONFIG_PCI_IOV
1947         if (be_physfn(adapter) && num_vfs) {
1948                 int status;
1949
1950                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1951                 adapter->sriov_enabled = status ? false : true;
1952         }
1953 #endif
1954 }
1955
1956 static void be_sriov_disable(struct be_adapter *adapter)
1957 {
1958 #ifdef CONFIG_PCI_IOV
1959         if (adapter->sriov_enabled) {
1960                 pci_disable_sriov(adapter->pdev);
1961                 adapter->sriov_enabled = false;
1962         }
1963 #endif
1964 }
1965
1966 static inline int be_msix_vec_get(struct be_adapter *adapter,
1967                                         struct be_eq_obj *eq_obj)
1968 {
1969         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1970 }
1971
1972 static int be_request_irq(struct be_adapter *adapter,
1973                 struct be_eq_obj *eq_obj,
1974                 void *handler, char *desc, void *context)
1975 {
1976         struct net_device *netdev = adapter->netdev;
1977         int vec;
1978
1979         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1980         vec = be_msix_vec_get(adapter, eq_obj);
1981         return request_irq(vec, handler, 0, eq_obj->desc, context);
1982 }
1983
1984 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1985                         void *context)
1986 {
1987         int vec = be_msix_vec_get(adapter, eq_obj);
1988         free_irq(vec, context);
1989 }
1990
1991 static int be_msix_register(struct be_adapter *adapter)
1992 {
1993         struct be_rx_obj *rxo;
1994         int status, i;
1995         char qname[10];
1996
1997         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1998                                 adapter);
1999         if (status)
2000                 goto err;
2001
2002         for_all_rx_queues(adapter, rxo, i) {
2003                 sprintf(qname, "rxq%d", i);
2004                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2005                                 qname, rxo);
2006                 if (status)
2007                         goto err_msix;
2008         }
2009
2010         return 0;
2011
2012 err_msix:
2013         be_free_irq(adapter, &adapter->tx_eq, adapter);
2014
2015         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2016                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2017
2018 err:
2019         dev_warn(&adapter->pdev->dev,
2020                 "MSIX Request IRQ failed - err %d\n", status);
2021         pci_disable_msix(adapter->pdev);
2022         adapter->msix_enabled = false;
2023         return status;
2024 }
2025
2026 static int be_irq_register(struct be_adapter *adapter)
2027 {
2028         struct net_device *netdev = adapter->netdev;
2029         int status;
2030
2031         if (adapter->msix_enabled) {
2032                 status = be_msix_register(adapter);
2033                 if (status == 0)
2034                         goto done;
2035                 /* INTx is not supported for VF */
2036                 if (!be_physfn(adapter))
2037                         return status;
2038         }
2039
2040         /* INTx */
2041         netdev->irq = adapter->pdev->irq;
2042         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2043                         adapter);
2044         if (status) {
2045                 dev_err(&adapter->pdev->dev,
2046                         "INTx request IRQ failed - err %d\n", status);
2047                 return status;
2048         }
2049 done:
2050         adapter->isr_registered = true;
2051         return 0;
2052 }
2053
2054 static void be_irq_unregister(struct be_adapter *adapter)
2055 {
2056         struct net_device *netdev = adapter->netdev;
2057         struct be_rx_obj *rxo;
2058         int i;
2059
2060         if (!adapter->isr_registered)
2061                 return;
2062
2063         /* INTx */
2064         if (!adapter->msix_enabled) {
2065                 free_irq(netdev->irq, adapter);
2066                 goto done;
2067         }
2068
2069         /* MSIx */
2070         be_free_irq(adapter, &adapter->tx_eq, adapter);
2071
2072         for_all_rx_queues(adapter, rxo, i)
2073                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2074
2075 done:
2076         adapter->isr_registered = false;
2077 }
2078
2079 static int be_close(struct net_device *netdev)
2080 {
2081         struct be_adapter *adapter = netdev_priv(netdev);
2082         struct be_rx_obj *rxo;
2083         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2084         int vec, i;
2085
2086         be_async_mcc_disable(adapter);
2087
2088         netif_stop_queue(netdev);
2089         netif_carrier_off(netdev);
2090         adapter->link_up = false;
2091
2092         if (!lancer_chip(adapter))
2093                 be_intr_set(adapter, false);
2094
2095         if (adapter->msix_enabled) {
2096                 vec = be_msix_vec_get(adapter, tx_eq);
2097                 synchronize_irq(vec);
2098
2099                 for_all_rx_queues(adapter, rxo, i) {
2100                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2101                         synchronize_irq(vec);
2102                 }
2103         } else {
2104                 synchronize_irq(netdev->irq);
2105         }
2106         be_irq_unregister(adapter);
2107
2108         for_all_rx_queues(adapter, rxo, i)
2109                 napi_disable(&rxo->rx_eq.napi);
2110
2111         napi_disable(&tx_eq->napi);
2112
2113         /* Wait for all pending tx completions to arrive so that
2114          * all tx skbs are freed.
2115          */
2116         be_tx_compl_clean(adapter);
2117
2118         return 0;
2119 }
2120
2121 static int be_open(struct net_device *netdev)
2122 {
2123         struct be_adapter *adapter = netdev_priv(netdev);
2124         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2125         struct be_rx_obj *rxo;
2126         bool link_up;
2127         int status, i;
2128         u8 mac_speed;
2129         u16 link_speed;
2130
2131         for_all_rx_queues(adapter, rxo, i) {
2132                 be_post_rx_frags(rxo);
2133                 napi_enable(&rxo->rx_eq.napi);
2134         }
2135         napi_enable(&tx_eq->napi);
2136
2137         be_irq_register(adapter);
2138
2139         if (!lancer_chip(adapter))
2140                 be_intr_set(adapter, true);
2141
2142         /* The evt queues are created in unarmed state; arm them */
2143         for_all_rx_queues(adapter, rxo, i) {
2144                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2145                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2146         }
2147         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2148
2149         /* Now that interrupts are on we can process async mcc */
2150         be_async_mcc_enable(adapter);
2151
2152         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2153                         &link_speed);
2154         if (status)
2155                 goto err;
2156         be_link_status_update(adapter, link_up);
2157
2158         if (be_physfn(adapter)) {
2159                 status = be_vid_config(adapter, false, 0);
2160                 if (status)
2161                         goto err;
2162
2163                 status = be_cmd_set_flow_control(adapter,
2164                                 adapter->tx_fc, adapter->rx_fc);
2165                 if (status)
2166                         goto err;
2167         }
2168
2169         return 0;
2170 err:
2171         be_close(adapter->netdev);
2172         return -EIO;
2173 }
2174
2175 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2176 {
2177         struct be_dma_mem cmd;
2178         int status = 0;
2179         u8 mac[ETH_ALEN];
2180
2181         memset(mac, 0, ETH_ALEN);
2182
2183         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2184         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2185         if (cmd.va == NULL)
2186                 return -1;
2187         memset(cmd.va, 0, cmd.size);
2188
2189         if (enable) {
2190                 status = pci_write_config_dword(adapter->pdev,
2191                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2192                 if (status) {
2193                         dev_err(&adapter->pdev->dev,
2194                                 "Could not enable Wake-on-lan\n");
2195                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2196                                         cmd.dma);
2197                         return status;
2198                 }
2199                 status = be_cmd_enable_magic_wol(adapter,
2200                                 adapter->netdev->dev_addr, &cmd);
2201                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2202                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2203         } else {
2204                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2205                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2206                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207         }
2208
2209         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2210         return status;
2211 }
2212
2213 /*
2214  * Generate a seed MAC address from the PF MAC Address using jhash.
2215  * MAC Address for VFs are assigned incrementally starting from the seed.
2216  * These addresses are programmed in the ASIC by the PF and the VF driver
2217  * queries for the MAC address during its probe.
2218  */
2219 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2220 {
2221         u32 vf = 0;
2222         int status = 0;
2223         u8 mac[ETH_ALEN];
2224
2225         be_vf_eth_addr_generate(adapter, mac);
2226
2227         for (vf = 0; vf < num_vfs; vf++) {
2228                 status = be_cmd_pmac_add(adapter, mac,
2229                                         adapter->vf_cfg[vf].vf_if_handle,
2230                                         &adapter->vf_cfg[vf].vf_pmac_id);
2231                 if (status)
2232                         dev_err(&adapter->pdev->dev,
2233                                 "Mac address add failed for VF %d\n", vf);
2234                 else
2235                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2236
2237                 mac[5] += 1;
2238         }
2239         return status;
2240 }
2241
2242 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2243 {
2244         u32 vf;
2245
2246         for (vf = 0; vf < num_vfs; vf++) {
2247                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248                         be_cmd_pmac_del(adapter,
2249                                         adapter->vf_cfg[vf].vf_if_handle,
2250                                         adapter->vf_cfg[vf].vf_pmac_id);
2251         }
2252 }
2253
2254 static int be_setup(struct be_adapter *adapter)
2255 {
2256         struct net_device *netdev = adapter->netdev;
2257         u32 cap_flags, en_flags, vf = 0;
2258         int status;
2259         u8 mac[ETH_ALEN];
2260
2261         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2262
2263         if (be_physfn(adapter)) {
2264                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2265                                 BE_IF_FLAGS_PROMISCUOUS |
2266                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2267                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2268
2269                 if (be_multi_rxq(adapter)) {
2270                         cap_flags |= BE_IF_FLAGS_RSS;
2271                         en_flags |= BE_IF_FLAGS_RSS;
2272                 }
2273         }
2274
2275         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2276                         netdev->dev_addr, false/* pmac_invalid */,
2277                         &adapter->if_handle, &adapter->pmac_id, 0);
2278         if (status != 0)
2279                 goto do_none;
2280
2281         if (be_physfn(adapter)) {
2282                 while (vf < num_vfs) {
2283                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2284                                         | BE_IF_FLAGS_BROADCAST;
2285                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2286                                         mac, true,
2287                                         &adapter->vf_cfg[vf].vf_if_handle,
2288                                         NULL, vf+1);
2289                         if (status) {
2290                                 dev_err(&adapter->pdev->dev,
2291                                 "Interface Create failed for VF %d\n", vf);
2292                                 goto if_destroy;
2293                         }
2294                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2295                         vf++;
2296                 }
2297         } else if (!be_physfn(adapter)) {
2298                 status = be_cmd_mac_addr_query(adapter, mac,
2299                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300                 if (!status) {
2301                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2302                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2303                 }
2304         }
2305
2306         status = be_tx_queues_create(adapter);
2307         if (status != 0)
2308                 goto if_destroy;
2309
2310         status = be_rx_queues_create(adapter);
2311         if (status != 0)
2312                 goto tx_qs_destroy;
2313
2314         status = be_mcc_queues_create(adapter);
2315         if (status != 0)
2316                 goto rx_qs_destroy;
2317
2318         if (be_physfn(adapter)) {
2319                 status = be_vf_eth_addr_config(adapter);
2320                 if (status)
2321                         goto mcc_q_destroy;
2322         }
2323
2324         adapter->link_speed = -1;
2325
2326         return 0;
2327
2328 mcc_q_destroy:
2329         if (be_physfn(adapter))
2330                 be_vf_eth_addr_rem(adapter);
2331         be_mcc_queues_destroy(adapter);
2332 rx_qs_destroy:
2333         be_rx_queues_destroy(adapter);
2334 tx_qs_destroy:
2335         be_tx_queues_destroy(adapter);
2336 if_destroy:
2337         for (vf = 0; vf < num_vfs; vf++)
2338                 if (adapter->vf_cfg[vf].vf_if_handle)
2339                         be_cmd_if_destroy(adapter,
2340                                         adapter->vf_cfg[vf].vf_if_handle);
2341         be_cmd_if_destroy(adapter, adapter->if_handle);
2342 do_none:
2343         return status;
2344 }
2345
2346 static int be_clear(struct be_adapter *adapter)
2347 {
2348         if (be_physfn(adapter))
2349                 be_vf_eth_addr_rem(adapter);
2350
2351         be_mcc_queues_destroy(adapter);
2352         be_rx_queues_destroy(adapter);
2353         be_tx_queues_destroy(adapter);
2354
2355         be_cmd_if_destroy(adapter, adapter->if_handle);
2356
2357         /* tell fw we're done with firing cmds */
2358         be_cmd_fw_clean(adapter);
2359         return 0;
2360 }
2361
2362
2363 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2364 static bool be_flash_redboot(struct be_adapter *adapter,
2365                         const u8 *p, u32 img_start, int image_size,
2366                         int hdr_size)
2367 {
2368         u32 crc_offset;
2369         u8 flashed_crc[4];
2370         int status;
2371
2372         crc_offset = hdr_size + img_start + image_size - 4;
2373
2374         p += crc_offset;
2375
2376         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2377                         (image_size - 4));
2378         if (status) {
2379                 dev_err(&adapter->pdev->dev,
2380                 "could not get crc from flash, not flashing redboot\n");
2381                 return false;
2382         }
2383
2384         /*update redboot only if crc does not match*/
2385         if (!memcmp(flashed_crc, p, 4))
2386                 return false;
2387         else
2388                 return true;
2389 }
2390
2391 static int be_flash_data(struct be_adapter *adapter,
2392                         const struct firmware *fw,
2393                         struct be_dma_mem *flash_cmd, int num_of_images)
2394
2395 {
2396         int status = 0, i, filehdr_size = 0;
2397         u32 total_bytes = 0, flash_op;
2398         int num_bytes;
2399         const u8 *p = fw->data;
2400         struct be_cmd_write_flashrom *req = flash_cmd->va;
2401         const struct flash_comp *pflashcomp;
2402         int num_comp;
2403
2404         static const struct flash_comp gen3_flash_types[9] = {
2405                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2406                         FLASH_IMAGE_MAX_SIZE_g3},
2407                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2408                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2409                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2410                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2412                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2414                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2416                         FLASH_IMAGE_MAX_SIZE_g3},
2417                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2418                         FLASH_IMAGE_MAX_SIZE_g3},
2419                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2420                         FLASH_IMAGE_MAX_SIZE_g3},
2421                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2422                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2423         };
2424         static const struct flash_comp gen2_flash_types[8] = {
2425                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2426                         FLASH_IMAGE_MAX_SIZE_g2},
2427                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2428                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2429                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2430                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2432                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2434                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2436                         FLASH_IMAGE_MAX_SIZE_g2},
2437                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2438                         FLASH_IMAGE_MAX_SIZE_g2},
2439                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2440                          FLASH_IMAGE_MAX_SIZE_g2}
2441         };
2442
2443         if (adapter->generation == BE_GEN3) {
2444                 pflashcomp = gen3_flash_types;
2445                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2446                 num_comp = ARRAY_SIZE(gen3_flash_types);
2447         } else {
2448                 pflashcomp = gen2_flash_types;
2449                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2450                 num_comp = ARRAY_SIZE(gen2_flash_types);
2451         }
2452         for (i = 0; i < num_comp; i++) {
2453                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2454                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2455                         continue;
2456                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457                         (!be_flash_redboot(adapter, fw->data,
2458                          pflashcomp[i].offset, pflashcomp[i].size,
2459                          filehdr_size)))
2460                         continue;
2461                 p = fw->data;
2462                 p += filehdr_size + pflashcomp[i].offset
2463                         + (num_of_images * sizeof(struct image_hdr));
2464         if (p + pflashcomp[i].size > fw->data + fw->size)
2465                 return -1;
2466         total_bytes = pflashcomp[i].size;
2467                 while (total_bytes) {
2468                         if (total_bytes > 32*1024)
2469                                 num_bytes = 32*1024;
2470                         else
2471                                 num_bytes = total_bytes;
2472                         total_bytes -= num_bytes;
2473
2474                         if (!total_bytes)
2475                                 flash_op = FLASHROM_OPER_FLASH;
2476                         else
2477                                 flash_op = FLASHROM_OPER_SAVE;
2478                         memcpy(req->params.data_buf, p, num_bytes);
2479                         p += num_bytes;
2480                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2481                                 pflashcomp[i].optype, flash_op, num_bytes);
2482                         if (status) {
2483                                 dev_err(&adapter->pdev->dev,
2484                                         "cmd to write to flash rom failed.\n");
2485                                 return -1;
2486                         }
2487                         yield();
2488                 }
2489         }
2490         return 0;
2491 }
2492
2493 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2494 {
2495         if (fhdr == NULL)
2496                 return 0;
2497         if (fhdr->build[0] == '3')
2498                 return BE_GEN3;
2499         else if (fhdr->build[0] == '2')
2500                 return BE_GEN2;
2501         else
2502                 return 0;
2503 }
2504
2505 int be_load_fw(struct be_adapter *adapter, u8 *func)
2506 {
2507         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2508         const struct firmware *fw;
2509         struct flash_file_hdr_g2 *fhdr;
2510         struct flash_file_hdr_g3 *fhdr3;
2511         struct image_hdr *img_hdr_ptr = NULL;
2512         struct be_dma_mem flash_cmd;
2513         int status, i = 0, num_imgs = 0;
2514         const u8 *p;
2515
2516         if (!netif_running(adapter->netdev)) {
2517                 dev_err(&adapter->pdev->dev,
2518                         "Firmware load not allowed (interface is down)\n");
2519                 return -EPERM;
2520         }
2521
2522         strcpy(fw_file, func);
2523
2524         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2525         if (status)
2526                 goto fw_exit;
2527
2528         p = fw->data;
2529         fhdr = (struct flash_file_hdr_g2 *) p;
2530         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531
2532         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2533         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2534                                         &flash_cmd.dma);
2535         if (!flash_cmd.va) {
2536                 status = -ENOMEM;
2537                 dev_err(&adapter->pdev->dev,
2538                         "Memory allocation failure while flashing\n");
2539                 goto fw_exit;
2540         }
2541
2542         if ((adapter->generation == BE_GEN3) &&
2543                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2544                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2545                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2546                 for (i = 0; i < num_imgs; i++) {
2547                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2548                                         (sizeof(struct flash_file_hdr_g3) +
2549                                          i * sizeof(struct image_hdr)));
2550                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2551                                 status = be_flash_data(adapter, fw, &flash_cmd,
2552                                                         num_imgs);
2553                 }
2554         } else if ((adapter->generation == BE_GEN2) &&
2555                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2556                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2557         } else {
2558                 dev_err(&adapter->pdev->dev,
2559                         "UFI and Interface are not compatible for flashing\n");
2560                 status = -1;
2561         }
2562
2563         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2564                                 flash_cmd.dma);
2565         if (status) {
2566                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2567                 goto fw_exit;
2568         }
2569
2570         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2571
2572 fw_exit:
2573         release_firmware(fw);
2574         return status;
2575 }
2576
2577 static struct net_device_ops be_netdev_ops = {
2578         .ndo_open               = be_open,
2579         .ndo_stop               = be_close,
2580         .ndo_start_xmit         = be_xmit,
2581         .ndo_set_rx_mode        = be_set_multicast_list,
2582         .ndo_set_mac_address    = be_mac_addr_set,
2583         .ndo_change_mtu         = be_change_mtu,
2584         .ndo_validate_addr      = eth_validate_addr,
2585         .ndo_vlan_rx_register   = be_vlan_register,
2586         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2587         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2588         .ndo_set_vf_mac         = be_set_vf_mac,
2589         .ndo_set_vf_vlan        = be_set_vf_vlan,
2590         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2591         .ndo_get_vf_config      = be_get_vf_config
2592 };
2593
2594 static void be_netdev_init(struct net_device *netdev)
2595 {
2596         struct be_adapter *adapter = netdev_priv(netdev);
2597         struct be_rx_obj *rxo;
2598         int i;
2599
2600         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2601                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2603                 NETIF_F_GRO | NETIF_F_TSO6;
2604
2605         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2607
2608         if (lancer_chip(adapter))
2609                 netdev->vlan_features |= NETIF_F_TSO6;
2610
2611         netdev->flags |= IFF_MULTICAST;
2612
2613         adapter->rx_csum = true;
2614
2615         /* Default settings for Rx and Tx flow control */
2616         adapter->rx_fc = true;
2617         adapter->tx_fc = true;
2618
2619         netif_set_gso_max_size(netdev, 65535);
2620
2621         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2622
2623         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2624
2625         for_all_rx_queues(adapter, rxo, i)
2626                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2627                                 BE_NAPI_WEIGHT);
2628
2629         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2630                 BE_NAPI_WEIGHT);
2631
2632         netif_stop_queue(netdev);
2633 }
2634
2635 static void be_unmap_pci_bars(struct be_adapter *adapter)
2636 {
2637         if (adapter->csr)
2638                 iounmap(adapter->csr);
2639         if (adapter->db)
2640                 iounmap(adapter->db);
2641         if (adapter->pcicfg && be_physfn(adapter))
2642                 iounmap(adapter->pcicfg);
2643 }
2644
2645 static int be_map_pci_bars(struct be_adapter *adapter)
2646 {
2647         u8 __iomem *addr;
2648         int pcicfg_reg, db_reg;
2649
2650         if (lancer_chip(adapter)) {
2651                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652                         pci_resource_len(adapter->pdev, 0));
2653                 if (addr == NULL)
2654                         return -ENOMEM;
2655                 adapter->db = addr;
2656                 return 0;
2657         }
2658
2659         if (be_physfn(adapter)) {
2660                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2661                                 pci_resource_len(adapter->pdev, 2));
2662                 if (addr == NULL)
2663                         return -ENOMEM;
2664                 adapter->csr = addr;
2665         }
2666
2667         if (adapter->generation == BE_GEN2) {
2668                 pcicfg_reg = 1;
2669                 db_reg = 4;
2670         } else {
2671                 pcicfg_reg = 0;
2672                 if (be_physfn(adapter))
2673                         db_reg = 4;
2674                 else
2675                         db_reg = 0;
2676         }
2677         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2678                                 pci_resource_len(adapter->pdev, db_reg));
2679         if (addr == NULL)
2680                 goto pci_map_err;
2681         adapter->db = addr;
2682
2683         if (be_physfn(adapter)) {
2684                 addr = ioremap_nocache(
2685                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2686                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2687                 if (addr == NULL)
2688                         goto pci_map_err;
2689                 adapter->pcicfg = addr;
2690         } else
2691                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2692
2693         return 0;
2694 pci_map_err:
2695         be_unmap_pci_bars(adapter);
2696         return -ENOMEM;
2697 }
2698
2699
2700 static void be_ctrl_cleanup(struct be_adapter *adapter)
2701 {
2702         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2703
2704         be_unmap_pci_bars(adapter);
2705
2706         if (mem->va)
2707                 pci_free_consistent(adapter->pdev, mem->size,
2708                         mem->va, mem->dma);
2709
2710         mem = &adapter->mc_cmd_mem;
2711         if (mem->va)
2712                 pci_free_consistent(adapter->pdev, mem->size,
2713                         mem->va, mem->dma);
2714 }
2715
2716 static int be_ctrl_init(struct be_adapter *adapter)
2717 {
2718         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2719         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2720         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2721         int status;
2722
2723         status = be_map_pci_bars(adapter);
2724         if (status)
2725                 goto done;
2726
2727         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2729                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2730         if (!mbox_mem_alloc->va) {
2731                 status = -ENOMEM;
2732                 goto unmap_pci_bars;
2733         }
2734
2735         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2736         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2737         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2738         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2739
2740         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2741         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2742                         &mc_cmd_mem->dma);
2743         if (mc_cmd_mem->va == NULL) {
2744                 status = -ENOMEM;
2745                 goto free_mbox;
2746         }
2747         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2748
2749         mutex_init(&adapter->mbox_lock);
2750         spin_lock_init(&adapter->mcc_lock);
2751         spin_lock_init(&adapter->mcc_cq_lock);
2752
2753         init_completion(&adapter->flash_compl);
2754         pci_save_state(adapter->pdev);
2755         return 0;
2756
2757 free_mbox:
2758         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2759                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2760
2761 unmap_pci_bars:
2762         be_unmap_pci_bars(adapter);
2763
2764 done:
2765         return status;
2766 }
2767
2768 static void be_stats_cleanup(struct be_adapter *adapter)
2769 {
2770         struct be_dma_mem *cmd = &adapter->stats_cmd;
2771
2772         if (cmd->va)
2773                 pci_free_consistent(adapter->pdev, cmd->size,
2774                         cmd->va, cmd->dma);
2775 }
2776
2777 static int be_stats_init(struct be_adapter *adapter)
2778 {
2779         struct be_dma_mem *cmd = &adapter->stats_cmd;
2780
2781         cmd->size = sizeof(struct be_cmd_req_get_stats);
2782         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2783         if (cmd->va == NULL)
2784                 return -1;
2785         memset(cmd->va, 0, cmd->size);
2786         return 0;
2787 }
2788
2789 static void __devexit be_remove(struct pci_dev *pdev)
2790 {
2791         struct be_adapter *adapter = pci_get_drvdata(pdev);
2792
2793         if (!adapter)
2794                 return;
2795
2796         cancel_delayed_work_sync(&adapter->work);
2797
2798         unregister_netdev(adapter->netdev);
2799
2800         be_clear(adapter);
2801
2802         be_stats_cleanup(adapter);
2803
2804         be_ctrl_cleanup(adapter);
2805
2806         be_sriov_disable(adapter);
2807
2808         be_msix_disable(adapter);
2809
2810         pci_set_drvdata(pdev, NULL);
2811         pci_release_regions(pdev);
2812         pci_disable_device(pdev);
2813
2814         free_netdev(adapter->netdev);
2815 }
2816
2817 static int be_get_config(struct be_adapter *adapter)
2818 {
2819         int status;
2820         u8 mac[ETH_ALEN];
2821
2822         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2823         if (status)
2824                 return status;
2825
2826         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2827                         &adapter->function_mode, &adapter->function_caps);
2828         if (status)
2829                 return status;
2830
2831         memset(mac, 0, ETH_ALEN);
2832
2833         if (be_physfn(adapter)) {
2834                 status = be_cmd_mac_addr_query(adapter, mac,
2835                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2836
2837                 if (status)
2838                         return status;
2839
2840                 if (!is_valid_ether_addr(mac))
2841                         return -EADDRNOTAVAIL;
2842
2843                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2844                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2845         }
2846
2847         if (adapter->function_mode & 0x400)
2848                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2849         else
2850                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851
2852         return 0;
2853 }
2854
2855 static int be_dev_family_check(struct be_adapter *adapter)
2856 {
2857         struct pci_dev *pdev = adapter->pdev;
2858         u32 sli_intf = 0, if_type;
2859
2860         switch (pdev->device) {
2861         case BE_DEVICE_ID1:
2862         case OC_DEVICE_ID1:
2863                 adapter->generation = BE_GEN2;
2864                 break;
2865         case BE_DEVICE_ID2:
2866         case OC_DEVICE_ID2:
2867                 adapter->generation = BE_GEN3;
2868                 break;
2869         case OC_DEVICE_ID3:
2870                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872                                                 SLI_INTF_IF_TYPE_SHIFT;
2873
2874                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875                         if_type != 0x02) {
2876                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877                         return -EINVAL;
2878                 }
2879                 if (num_vfs > 0) {
2880                         dev_err(&pdev->dev, "VFs not supported\n");
2881                         return -EINVAL;
2882                 }
2883                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884                                          SLI_INTF_FAMILY_SHIFT);
2885                 adapter->generation = BE_GEN3;
2886                 break;
2887         default:
2888                 adapter->generation = 0;
2889         }
2890         return 0;
2891 }
2892
2893 static int __devinit be_probe(struct pci_dev *pdev,
2894                         const struct pci_device_id *pdev_id)
2895 {
2896         int status = 0;
2897         struct be_adapter *adapter;
2898         struct net_device *netdev;
2899
2900         status = pci_enable_device(pdev);
2901         if (status)
2902                 goto do_none;
2903
2904         status = pci_request_regions(pdev, DRV_NAME);
2905         if (status)
2906                 goto disable_dev;
2907         pci_set_master(pdev);
2908
2909         netdev = alloc_etherdev(sizeof(struct be_adapter));
2910         if (netdev == NULL) {
2911                 status = -ENOMEM;
2912                 goto rel_reg;
2913         }
2914         adapter = netdev_priv(netdev);
2915         adapter->pdev = pdev;
2916         pci_set_drvdata(pdev, adapter);
2917
2918         status = be_dev_family_check(adapter);
2919         if (status)
2920                 goto free_netdev;
2921
2922         adapter->netdev = netdev;
2923         SET_NETDEV_DEV(netdev, &pdev->dev);
2924
2925         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2926         if (!status) {
2927                 netdev->features |= NETIF_F_HIGHDMA;
2928         } else {
2929                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2930                 if (status) {
2931                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932                         goto free_netdev;
2933                 }
2934         }
2935
2936         be_sriov_enable(adapter);
2937
2938         status = be_ctrl_init(adapter);
2939         if (status)
2940                 goto free_netdev;
2941
2942         /* sync up with fw's ready state */
2943         if (be_physfn(adapter)) {
2944                 status = be_cmd_POST(adapter);
2945                 if (status)
2946                         goto ctrl_clean;
2947         }
2948
2949         /* tell fw we're ready to fire cmds */
2950         status = be_cmd_fw_init(adapter);
2951         if (status)
2952                 goto ctrl_clean;
2953
2954         if (be_physfn(adapter)) {
2955                 status = be_cmd_reset_function(adapter);
2956                 if (status)
2957                         goto ctrl_clean;
2958         }
2959
2960         status = be_stats_init(adapter);
2961         if (status)
2962                 goto ctrl_clean;
2963
2964         status = be_get_config(adapter);
2965         if (status)
2966                 goto stats_clean;
2967
2968         be_msix_enable(adapter);
2969
2970         INIT_DELAYED_WORK(&adapter->work, be_worker);
2971
2972         status = be_setup(adapter);
2973         if (status)
2974                 goto msix_disable;
2975
2976         be_netdev_init(netdev);
2977         status = register_netdev(netdev);
2978         if (status != 0)
2979                 goto unsetup;
2980         netif_carrier_off(netdev);
2981
2982         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2983         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2984         return 0;
2985
2986 unsetup:
2987         be_clear(adapter);
2988 msix_disable:
2989         be_msix_disable(adapter);
2990 stats_clean:
2991         be_stats_cleanup(adapter);
2992 ctrl_clean:
2993         be_ctrl_cleanup(adapter);
2994 free_netdev:
2995         be_sriov_disable(adapter);
2996         free_netdev(netdev);
2997         pci_set_drvdata(pdev, NULL);
2998 rel_reg:
2999         pci_release_regions(pdev);
3000 disable_dev:
3001         pci_disable_device(pdev);
3002 do_none:
3003         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3004         return status;
3005 }
3006
3007 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3008 {
3009         struct be_adapter *adapter = pci_get_drvdata(pdev);
3010         struct net_device *netdev =  adapter->netdev;
3011
3012         if (adapter->wol)
3013                 be_setup_wol(adapter, true);
3014
3015         netif_device_detach(netdev);
3016         if (netif_running(netdev)) {
3017                 rtnl_lock();
3018                 be_close(netdev);
3019                 rtnl_unlock();
3020         }
3021         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3022         be_clear(adapter);
3023
3024         pci_save_state(pdev);
3025         pci_disable_device(pdev);
3026         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3027         return 0;
3028 }
3029
3030 static int be_resume(struct pci_dev *pdev)
3031 {
3032         int status = 0;
3033         struct be_adapter *adapter = pci_get_drvdata(pdev);
3034         struct net_device *netdev =  adapter->netdev;
3035
3036         netif_device_detach(netdev);
3037
3038         status = pci_enable_device(pdev);
3039         if (status)
3040                 return status;
3041
3042         pci_set_power_state(pdev, 0);
3043         pci_restore_state(pdev);
3044
3045         /* tell fw we're ready to fire cmds */
3046         status = be_cmd_fw_init(adapter);
3047         if (status)
3048                 return status;
3049
3050         be_setup(adapter);
3051         if (netif_running(netdev)) {
3052                 rtnl_lock();
3053                 be_open(netdev);
3054                 rtnl_unlock();
3055         }
3056         netif_device_attach(netdev);
3057
3058         if (adapter->wol)
3059                 be_setup_wol(adapter, false);
3060         return 0;
3061 }
3062
3063 /*
3064  * An FLR will stop BE from DMAing any data.
3065  */
3066 static void be_shutdown(struct pci_dev *pdev)
3067 {
3068         struct be_adapter *adapter = pci_get_drvdata(pdev);
3069         struct net_device *netdev =  adapter->netdev;
3070
3071         netif_device_detach(netdev);
3072
3073         be_cmd_reset_function(adapter);
3074
3075         if (adapter->wol)
3076                 be_setup_wol(adapter, true);
3077
3078         pci_disable_device(pdev);
3079 }
3080
3081 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3082                                 pci_channel_state_t state)
3083 {
3084         struct be_adapter *adapter = pci_get_drvdata(pdev);
3085         struct net_device *netdev =  adapter->netdev;
3086
3087         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3088
3089         adapter->eeh_err = true;
3090
3091         netif_device_detach(netdev);
3092
3093         if (netif_running(netdev)) {
3094                 rtnl_lock();
3095                 be_close(netdev);
3096                 rtnl_unlock();
3097         }
3098         be_clear(adapter);
3099
3100         if (state == pci_channel_io_perm_failure)
3101                 return PCI_ERS_RESULT_DISCONNECT;
3102
3103         pci_disable_device(pdev);
3104
3105         return PCI_ERS_RESULT_NEED_RESET;
3106 }
3107
3108 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3109 {
3110         struct be_adapter *adapter = pci_get_drvdata(pdev);
3111         int status;
3112
3113         dev_info(&adapter->pdev->dev, "EEH reset\n");
3114         adapter->eeh_err = false;
3115
3116         status = pci_enable_device(pdev);
3117         if (status)
3118                 return PCI_ERS_RESULT_DISCONNECT;
3119
3120         pci_set_master(pdev);
3121         pci_set_power_state(pdev, 0);
3122         pci_restore_state(pdev);
3123
3124         /* Check if card is ok and fw is ready */
3125         status = be_cmd_POST(adapter);
3126         if (status)
3127                 return PCI_ERS_RESULT_DISCONNECT;
3128
3129         return PCI_ERS_RESULT_RECOVERED;
3130 }
3131
3132 static void be_eeh_resume(struct pci_dev *pdev)
3133 {
3134         int status = 0;
3135         struct be_adapter *adapter = pci_get_drvdata(pdev);
3136         struct net_device *netdev =  adapter->netdev;
3137
3138         dev_info(&adapter->pdev->dev, "EEH resume\n");
3139
3140         pci_save_state(pdev);
3141
3142         /* tell fw we're ready to fire cmds */
3143         status = be_cmd_fw_init(adapter);
3144         if (status)
3145                 goto err;
3146
3147         status = be_setup(adapter);
3148         if (status)
3149                 goto err;
3150
3151         if (netif_running(netdev)) {
3152                 status = be_open(netdev);
3153                 if (status)
3154                         goto err;
3155         }
3156         netif_device_attach(netdev);
3157         return;
3158 err:
3159         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3160 }
3161
3162 static struct pci_error_handlers be_eeh_handlers = {
3163         .error_detected = be_eeh_err_detected,
3164         .slot_reset = be_eeh_reset,
3165         .resume = be_eeh_resume,
3166 };
3167
3168 static struct pci_driver be_driver = {
3169         .name = DRV_NAME,
3170         .id_table = be_dev_ids,
3171         .probe = be_probe,
3172         .remove = be_remove,
3173         .suspend = be_suspend,
3174         .resume = be_resume,
3175         .shutdown = be_shutdown,
3176         .err_handler = &be_eeh_handlers
3177 };
3178
3179 static int __init be_init_module(void)
3180 {
3181         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3182             rx_frag_size != 2048) {
3183                 printk(KERN_WARNING DRV_NAME
3184                         " : Module param rx_frag_size must be 2048/4096/8192."
3185                         " Using 2048\n");
3186                 rx_frag_size = 2048;
3187         }
3188
3189         if (num_vfs > 32) {
3190                 printk(KERN_WARNING DRV_NAME
3191                         " : Module param num_vfs must not be greater than 32."
3192                         "Using 32\n");
3193                 num_vfs = 32;
3194         }
3195
3196         return pci_register_driver(&be_driver);
3197 }
3198 module_init(be_init_module);
3199
3200 static void __exit be_exit_module(void)
3201 {
3202         pci_unregister_driver(&be_driver);
3203 }
3204 module_exit(be_exit_module);