be2net: use device model DMA API
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
240         if (status)
241                 return status;
242
243         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
244                         adapter->if_handle, &adapter->pmac_id);
245 netdev_addr:
246         if (!status)
247                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248
249         return status;
250 }
251
252 void netdev_stats_update(struct be_adapter *adapter)
253 {
254         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
255         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
256         struct be_port_rxf_stats *port_stats =
257                         &rxf_stats->port[adapter->port_num];
258         struct net_device_stats *dev_stats = &adapter->netdev->stats;
259         struct be_erx_stats *erx_stats = &hw_stats->erx;
260         struct be_rx_obj *rxo;
261         int i;
262
263         memset(dev_stats, 0, sizeof(*dev_stats));
264         for_all_rx_queues(adapter, rxo, i) {
265                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
266                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
267                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
268                 /*  no space in linux buffers: best possible approximation */
269                 dev_stats->rx_dropped +=
270                         erx_stats->rx_drops_no_fragments[rxo->q.id];
271         }
272
273         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
274         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
275
276         /* bad pkts received */
277         dev_stats->rx_errors = port_stats->rx_crc_errors +
278                 port_stats->rx_alignment_symbol_errors +
279                 port_stats->rx_in_range_errors +
280                 port_stats->rx_out_range_errors +
281                 port_stats->rx_frame_too_long +
282                 port_stats->rx_dropped_too_small +
283                 port_stats->rx_dropped_too_short +
284                 port_stats->rx_dropped_header_too_small +
285                 port_stats->rx_dropped_tcp_length +
286                 port_stats->rx_dropped_runt +
287                 port_stats->rx_tcp_checksum_errs +
288                 port_stats->rx_ip_checksum_errs +
289                 port_stats->rx_udp_checksum_errs;
290
291         /* detailed rx errors */
292         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
293                 port_stats->rx_out_range_errors +
294                 port_stats->rx_frame_too_long;
295
296         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
297
298         /* frame alignment errors */
299         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
300
301         /* receiver fifo overrun */
302         /* drops_no_pbuf is no per i/f, it's per BE card */
303         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
304                                         port_stats->rx_input_fifo_overflow +
305                                         rxf_stats->rx_drops_no_pbuf;
306 }
307
308 void be_link_status_update(struct be_adapter *adapter, bool link_up)
309 {
310         struct net_device *netdev = adapter->netdev;
311
312         /* If link came up or went down */
313         if (adapter->link_up != link_up) {
314                 adapter->link_speed = -1;
315                 if (link_up) {
316                         netif_start_queue(netdev);
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_stop_queue(netdev);
321                         netif_carrier_off(netdev);
322                         printk(KERN_INFO "%s: Link down\n", netdev->name);
323                 }
324                 adapter->link_up = link_up;
325         }
326 }
327
328 /* Update the EQ delay n BE based on the RX frags consumed / sec */
329 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
330 {
331         struct be_eq_obj *rx_eq = &rxo->rx_eq;
332         struct be_rx_stats *stats = &rxo->stats;
333         ulong now = jiffies;
334         u32 eqd;
335
336         if (!rx_eq->enable_aic)
337                 return;
338
339         /* Wrapped around */
340         if (time_before(now, stats->rx_fps_jiffies)) {
341                 stats->rx_fps_jiffies = now;
342                 return;
343         }
344
345         /* Update once a second */
346         if ((now - stats->rx_fps_jiffies) < HZ)
347                 return;
348
349         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
350                         ((now - stats->rx_fps_jiffies) / HZ);
351
352         stats->rx_fps_jiffies = now;
353         stats->prev_rx_frags = stats->rx_frags;
354         eqd = stats->rx_fps / 110000;
355         eqd = eqd << 3;
356         if (eqd > rx_eq->max_eqd)
357                 eqd = rx_eq->max_eqd;
358         if (eqd < rx_eq->min_eqd)
359                 eqd = rx_eq->min_eqd;
360         if (eqd < 10)
361                 eqd = 0;
362         if (eqd != rx_eq->cur_eqd)
363                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
364
365         rx_eq->cur_eqd = eqd;
366 }
367
368 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
369 {
370         u64 rate = bytes;
371
372         do_div(rate, ticks / HZ);
373         rate <<= 3;                     /* bytes/sec -> bits/sec */
374         do_div(rate, 1000000ul);        /* MB/Sec */
375
376         return rate;
377 }
378
379 static void be_tx_rate_update(struct be_adapter *adapter)
380 {
381         struct be_tx_stats *stats = tx_stats(adapter);
382         ulong now = jiffies;
383
384         /* Wrapped around? */
385         if (time_before(now, stats->be_tx_jiffies)) {
386                 stats->be_tx_jiffies = now;
387                 return;
388         }
389
390         /* Update tx rate once in two seconds */
391         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
392                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
393                                                   - stats->be_tx_bytes_prev,
394                                                  now - stats->be_tx_jiffies);
395                 stats->be_tx_jiffies = now;
396                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
397         }
398 }
399
400 static void be_tx_stats_update(struct be_adapter *adapter,
401                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
402 {
403         struct be_tx_stats *stats = tx_stats(adapter);
404         stats->be_tx_reqs++;
405         stats->be_tx_wrbs += wrb_cnt;
406         stats->be_tx_bytes += copied;
407         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
408         if (stopped)
409                 stats->be_tx_stops++;
410 }
411
412 /* Determine number of WRB entries needed to xmit data in an skb */
413 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
414                                                                 bool *dummy)
415 {
416         int cnt = (skb->len > skb->data_len);
417
418         cnt += skb_shinfo(skb)->nr_frags;
419
420         /* to account for hdr wrb */
421         cnt++;
422         if (lancer_chip(adapter) || !(cnt & 1)) {
423                 *dummy = false;
424         } else {
425                 /* add a dummy to make it an even num */
426                 cnt++;
427                 *dummy = true;
428         }
429         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
430         return cnt;
431 }
432
433 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
434 {
435         wrb->frag_pa_hi = upper_32_bits(addr);
436         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
437         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
438 }
439
440 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
441                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
442 {
443         u8 vlan_prio = 0;
444         u16 vlan_tag = 0;
445
446         memset(hdr, 0, sizeof(*hdr));
447
448         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
450         if (skb_is_gso(skb)) {
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453                         hdr, skb_shinfo(skb)->gso_size);
454                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
455                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
456                 if (lancer_chip(adapter) && adapter->sli_family  ==
457                                                         LANCER_A0_SLI_FAMILY) {
458                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
459                         if (is_tcp_pkt(skb))
460                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
461                                                                 tcpcs, hdr, 1);
462                         else if (is_udp_pkt(skb))
463                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
464                                                                 udpcs, hdr, 1);
465                 }
466         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
467                 if (is_tcp_pkt(skb))
468                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
469                 else if (is_udp_pkt(skb))
470                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
471         }
472
473         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
474                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
475                 vlan_tag = vlan_tx_tag_get(skb);
476                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
477                 /* If vlan priority provided by OS is NOT in available bmap */
478                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
479                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
480                                         adapter->recommended_prio;
481                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
482         }
483
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
487         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
488 }
489
490 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
491                 bool unmap_single)
492 {
493         dma_addr_t dma;
494
495         be_dws_le_to_cpu(wrb, sizeof(*wrb));
496
497         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
498         if (wrb->frag_len) {
499                 if (unmap_single)
500                         dma_unmap_single(dev, dma, wrb->frag_len,
501                                          DMA_TO_DEVICE);
502                 else
503                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
504         }
505 }
506
507 static int make_tx_wrbs(struct be_adapter *adapter,
508                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
509 {
510         dma_addr_t busaddr;
511         int i, copied = 0;
512         struct device *dev = &adapter->pdev->dev;
513         struct sk_buff *first_skb = skb;
514         struct be_queue_info *txq = &adapter->tx_obj.q;
515         struct be_eth_wrb *wrb;
516         struct be_eth_hdr_wrb *hdr;
517         bool map_single = false;
518         u16 map_head;
519
520         hdr = queue_head_node(txq);
521         queue_head_inc(txq);
522         map_head = txq->head;
523
524         if (skb->len > skb->data_len) {
525                 int len = skb_headlen(skb);
526                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
527                 if (dma_mapping_error(dev, busaddr))
528                         goto dma_err;
529                 map_single = true;
530                 wrb = queue_head_node(txq);
531                 wrb_fill(wrb, busaddr, len);
532                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
533                 queue_head_inc(txq);
534                 copied += len;
535         }
536
537         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
538                 struct skb_frag_struct *frag =
539                         &skb_shinfo(skb)->frags[i];
540                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
541                                        frag->size, DMA_TO_DEVICE);
542                 if (dma_mapping_error(dev, busaddr))
543                         goto dma_err;
544                 wrb = queue_head_node(txq);
545                 wrb_fill(wrb, busaddr, frag->size);
546                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
547                 queue_head_inc(txq);
548                 copied += frag->size;
549         }
550
551         if (dummy_wrb) {
552                 wrb = queue_head_node(txq);
553                 wrb_fill(wrb, 0, 0);
554                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
555                 queue_head_inc(txq);
556         }
557
558         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
559         be_dws_cpu_to_le(hdr, sizeof(*hdr));
560
561         return copied;
562 dma_err:
563         txq->head = map_head;
564         while (copied) {
565                 wrb = queue_head_node(txq);
566                 unmap_tx_frag(dev, wrb, map_single);
567                 map_single = false;
568                 copied -= wrb->frag_len;
569                 queue_head_inc(txq);
570         }
571         return 0;
572 }
573
574 static netdev_tx_t be_xmit(struct sk_buff *skb,
575                         struct net_device *netdev)
576 {
577         struct be_adapter *adapter = netdev_priv(netdev);
578         struct be_tx_obj *tx_obj = &adapter->tx_obj;
579         struct be_queue_info *txq = &tx_obj->q;
580         u32 wrb_cnt = 0, copied = 0;
581         u32 start = txq->head;
582         bool dummy_wrb, stopped = false;
583
584         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
585
586         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
587         if (copied) {
588                 /* record the sent skb in the sent_skb table */
589                 BUG_ON(tx_obj->sent_skb_list[start]);
590                 tx_obj->sent_skb_list[start] = skb;
591
592                 /* Ensure txq has space for the next skb; Else stop the queue
593                  * *BEFORE* ringing the tx doorbell, so that we serialze the
594                  * tx compls of the current transmit which'll wake up the queue
595                  */
596                 atomic_add(wrb_cnt, &txq->used);
597                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
598                                                                 txq->len) {
599                         netif_stop_queue(netdev);
600                         stopped = true;
601                 }
602
603                 be_txq_notify(adapter, txq->id, wrb_cnt);
604
605                 be_tx_stats_update(adapter, wrb_cnt, copied,
606                                 skb_shinfo(skb)->gso_segs, stopped);
607         } else {
608                 txq->head = start;
609                 dev_kfree_skb_any(skb);
610         }
611         return NETDEV_TX_OK;
612 }
613
614 static int be_change_mtu(struct net_device *netdev, int new_mtu)
615 {
616         struct be_adapter *adapter = netdev_priv(netdev);
617         if (new_mtu < BE_MIN_MTU ||
618                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
619                                         (ETH_HLEN + ETH_FCS_LEN))) {
620                 dev_info(&adapter->pdev->dev,
621                         "MTU must be between %d and %d bytes\n",
622                         BE_MIN_MTU,
623                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
624                 return -EINVAL;
625         }
626         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
627                         netdev->mtu, new_mtu);
628         netdev->mtu = new_mtu;
629         return 0;
630 }
631
632 /*
633  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
634  * If the user configures more, place BE in vlan promiscuous mode.
635  */
636 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
637 {
638         u16 vtag[BE_NUM_VLANS_SUPPORTED];
639         u16 ntags = 0, i;
640         int status = 0;
641         u32 if_handle;
642
643         if (vf) {
644                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
645                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
646                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
647         }
648
649         if (adapter->vlans_added <= adapter->max_vlans)  {
650                 /* Construct VLAN Table to give to HW */
651                 for (i = 0; i < VLAN_N_VID; i++) {
652                         if (adapter->vlan_tag[i]) {
653                                 vtag[ntags] = cpu_to_le16(i);
654                                 ntags++;
655                         }
656                 }
657                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
658                                         vtag, ntags, 1, 0);
659         } else {
660                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
661                                         NULL, 0, 1, 1);
662         }
663
664         return status;
665 }
666
667 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
668 {
669         struct be_adapter *adapter = netdev_priv(netdev);
670
671         adapter->vlan_grp = grp;
672 }
673
674 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
675 {
676         struct be_adapter *adapter = netdev_priv(netdev);
677
678         adapter->vlans_added++;
679         if (!be_physfn(adapter))
680                 return;
681
682         adapter->vlan_tag[vid] = 1;
683         if (adapter->vlans_added <= (adapter->max_vlans + 1))
684                 be_vid_config(adapter, false, 0);
685 }
686
687 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
688 {
689         struct be_adapter *adapter = netdev_priv(netdev);
690
691         adapter->vlans_added--;
692         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
693
694         if (!be_physfn(adapter))
695                 return;
696
697         adapter->vlan_tag[vid] = 0;
698         if (adapter->vlans_added <= adapter->max_vlans)
699                 be_vid_config(adapter, false, 0);
700 }
701
702 static void be_set_multicast_list(struct net_device *netdev)
703 {
704         struct be_adapter *adapter = netdev_priv(netdev);
705
706         if (netdev->flags & IFF_PROMISC) {
707                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
708                 adapter->promiscuous = true;
709                 goto done;
710         }
711
712         /* BE was previously in promiscous mode; disable it */
713         if (adapter->promiscuous) {
714                 adapter->promiscuous = false;
715                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
716         }
717
718         /* Enable multicast promisc if num configured exceeds what we support */
719         if (netdev->flags & IFF_ALLMULTI ||
720             netdev_mc_count(netdev) > BE_MAX_MC) {
721                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
722                                 &adapter->mc_cmd_mem);
723                 goto done;
724         }
725
726         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
727                 &adapter->mc_cmd_mem);
728 done:
729         return;
730 }
731
732 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
733 {
734         struct be_adapter *adapter = netdev_priv(netdev);
735         int status;
736
737         if (!adapter->sriov_enabled)
738                 return -EPERM;
739
740         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
741                 return -EINVAL;
742
743         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
744                 status = be_cmd_pmac_del(adapter,
745                                         adapter->vf_cfg[vf].vf_if_handle,
746                                         adapter->vf_cfg[vf].vf_pmac_id);
747
748         status = be_cmd_pmac_add(adapter, mac,
749                                 adapter->vf_cfg[vf].vf_if_handle,
750                                 &adapter->vf_cfg[vf].vf_pmac_id);
751
752         if (status)
753                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
754                                 mac, vf);
755         else
756                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
757
758         return status;
759 }
760
761 static int be_get_vf_config(struct net_device *netdev, int vf,
762                         struct ifla_vf_info *vi)
763 {
764         struct be_adapter *adapter = netdev_priv(netdev);
765
766         if (!adapter->sriov_enabled)
767                 return -EPERM;
768
769         if (vf >= num_vfs)
770                 return -EINVAL;
771
772         vi->vf = vf;
773         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
774         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
775         vi->qos = 0;
776         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
777
778         return 0;
779 }
780
781 static int be_set_vf_vlan(struct net_device *netdev,
782                         int vf, u16 vlan, u8 qos)
783 {
784         struct be_adapter *adapter = netdev_priv(netdev);
785         int status = 0;
786
787         if (!adapter->sriov_enabled)
788                 return -EPERM;
789
790         if ((vf >= num_vfs) || (vlan > 4095))
791                 return -EINVAL;
792
793         if (vlan) {
794                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
795                 adapter->vlans_added++;
796         } else {
797                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
798                 adapter->vlans_added--;
799         }
800
801         status = be_vid_config(adapter, true, vf);
802
803         if (status)
804                 dev_info(&adapter->pdev->dev,
805                                 "VLAN %d config on VF %d failed\n", vlan, vf);
806         return status;
807 }
808
809 static int be_set_vf_tx_rate(struct net_device *netdev,
810                         int vf, int rate)
811 {
812         struct be_adapter *adapter = netdev_priv(netdev);
813         int status = 0;
814
815         if (!adapter->sriov_enabled)
816                 return -EPERM;
817
818         if ((vf >= num_vfs) || (rate < 0))
819                 return -EINVAL;
820
821         if (rate > 10000)
822                 rate = 10000;
823
824         adapter->vf_cfg[vf].vf_tx_rate = rate;
825         status = be_cmd_set_qos(adapter, rate / 10, vf);
826
827         if (status)
828                 dev_info(&adapter->pdev->dev,
829                                 "tx rate %d on VF %d failed\n", rate, vf);
830         return status;
831 }
832
833 static void be_rx_rate_update(struct be_rx_obj *rxo)
834 {
835         struct be_rx_stats *stats = &rxo->stats;
836         ulong now = jiffies;
837
838         /* Wrapped around */
839         if (time_before(now, stats->rx_jiffies)) {
840                 stats->rx_jiffies = now;
841                 return;
842         }
843
844         /* Update the rate once in two seconds */
845         if ((now - stats->rx_jiffies) < 2 * HZ)
846                 return;
847
848         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
849                                 now - stats->rx_jiffies);
850         stats->rx_jiffies = now;
851         stats->rx_bytes_prev = stats->rx_bytes;
852 }
853
854 static void be_rx_stats_update(struct be_rx_obj *rxo,
855                 u32 pktsize, u16 numfrags, u8 pkt_type)
856 {
857         struct be_rx_stats *stats = &rxo->stats;
858
859         stats->rx_compl++;
860         stats->rx_frags += numfrags;
861         stats->rx_bytes += pktsize;
862         stats->rx_pkts++;
863         if (pkt_type == BE_MULTICAST_PACKET)
864                 stats->rx_mcast_pkts++;
865 }
866
867 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
868 {
869         u8 l4_cksm, ipv6, ipcksm;
870
871         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
872         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
873         ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
874
875         /* Ignore ipcksm for ipv6 pkts */
876         return l4_cksm && (ipcksm || ipv6);
877 }
878
879 static struct be_rx_page_info *
880 get_rx_page_info(struct be_adapter *adapter,
881                 struct be_rx_obj *rxo,
882                 u16 frag_idx)
883 {
884         struct be_rx_page_info *rx_page_info;
885         struct be_queue_info *rxq = &rxo->q;
886
887         rx_page_info = &rxo->page_info_tbl[frag_idx];
888         BUG_ON(!rx_page_info->page);
889
890         if (rx_page_info->last_page_user) {
891                 dma_unmap_page(&adapter->pdev->dev,
892                                dma_unmap_addr(rx_page_info, bus),
893                                adapter->big_page_size, DMA_FROM_DEVICE);
894                 rx_page_info->last_page_user = false;
895         }
896
897         atomic_dec(&rxq->used);
898         return rx_page_info;
899 }
900
901 /* Throwaway the data in the Rx completion */
902 static void be_rx_compl_discard(struct be_adapter *adapter,
903                 struct be_rx_obj *rxo,
904                 struct be_eth_rx_compl *rxcp)
905 {
906         struct be_queue_info *rxq = &rxo->q;
907         struct be_rx_page_info *page_info;
908         u16 rxq_idx, i, num_rcvd;
909
910         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
911         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
912
913          /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
914         if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
915
916                 rxo->last_frag_index = rxq_idx;
917
918                 for (i = 0; i < num_rcvd; i++) {
919                         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
920                         put_page(page_info->page);
921                         memset(page_info, 0, sizeof(*page_info));
922                         index_inc(&rxq_idx, rxq->len);
923                 }
924         }
925 }
926
927 /*
928  * skb_fill_rx_data forms a complete skb for an ether frame
929  * indicated by rxcp.
930  */
931 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
932                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
933                         u16 num_rcvd)
934 {
935         struct be_queue_info *rxq = &rxo->q;
936         struct be_rx_page_info *page_info;
937         u16 rxq_idx, i, j;
938         u32 pktsize, hdr_len, curr_frag_len, size;
939         u8 *start;
940         u8 pkt_type;
941
942         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
943         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
944         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
945
946         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
947
948         start = page_address(page_info->page) + page_info->page_offset;
949         prefetch(start);
950
951         /* Copy data in the first descriptor of this completion */
952         curr_frag_len = min(pktsize, rx_frag_size);
953
954         /* Copy the header portion into skb_data */
955         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
956         memcpy(skb->data, start, hdr_len);
957         skb->len = curr_frag_len;
958         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
959                 /* Complete packet has now been moved to data */
960                 put_page(page_info->page);
961                 skb->data_len = 0;
962                 skb->tail += curr_frag_len;
963         } else {
964                 skb_shinfo(skb)->nr_frags = 1;
965                 skb_shinfo(skb)->frags[0].page = page_info->page;
966                 skb_shinfo(skb)->frags[0].page_offset =
967                                         page_info->page_offset + hdr_len;
968                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
969                 skb->data_len = curr_frag_len - hdr_len;
970                 skb->tail += hdr_len;
971         }
972         page_info->page = NULL;
973
974         if (pktsize <= rx_frag_size) {
975                 BUG_ON(num_rcvd != 1);
976                 goto done;
977         }
978
979         /* More frags present for this completion */
980         size = pktsize;
981         for (i = 1, j = 0; i < num_rcvd; i++) {
982                 size -= curr_frag_len;
983                 index_inc(&rxq_idx, rxq->len);
984                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
985
986                 curr_frag_len = min(size, rx_frag_size);
987
988                 /* Coalesce all frags from the same physical page in one slot */
989                 if (page_info->page_offset == 0) {
990                         /* Fresh page */
991                         j++;
992                         skb_shinfo(skb)->frags[j].page = page_info->page;
993                         skb_shinfo(skb)->frags[j].page_offset =
994                                                         page_info->page_offset;
995                         skb_shinfo(skb)->frags[j].size = 0;
996                         skb_shinfo(skb)->nr_frags++;
997                 } else {
998                         put_page(page_info->page);
999                 }
1000
1001                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1002                 skb->len += curr_frag_len;
1003                 skb->data_len += curr_frag_len;
1004
1005                 page_info->page = NULL;
1006         }
1007         BUG_ON(j > MAX_SKB_FRAGS);
1008
1009 done:
1010         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1011 }
1012
1013 /* Process the RX completion indicated by rxcp when GRO is disabled */
1014 static void be_rx_compl_process(struct be_adapter *adapter,
1015                         struct be_rx_obj *rxo,
1016                         struct be_eth_rx_compl *rxcp)
1017 {
1018         struct sk_buff *skb;
1019         u32 vlanf, vid;
1020         u16 num_rcvd;
1021         u8 vtm;
1022
1023         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1024
1025         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1026         if (unlikely(!skb)) {
1027                 if (net_ratelimit())
1028                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1029                 be_rx_compl_discard(adapter, rxo, rxcp);
1030                 return;
1031         }
1032
1033         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1034
1035         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1036                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1037         else
1038                 skb_checksum_none_assert(skb);
1039
1040         skb->truesize = skb->len + sizeof(struct sk_buff);
1041         skb->protocol = eth_type_trans(skb, adapter->netdev);
1042
1043         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1044         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1045
1046         /* vlanf could be wrongly set in some cards.
1047          * ignore if vtm is not set */
1048         if ((adapter->function_mode & 0x400) && !vtm)
1049                 vlanf = 0;
1050
1051         if (unlikely(vlanf)) {
1052                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1053                         kfree_skb(skb);
1054                         return;
1055                 }
1056                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1057                 if (!lancer_chip(adapter))
1058                         vid = swab16(vid);
1059                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1060         } else {
1061                 netif_receive_skb(skb);
1062         }
1063 }
1064
1065 /* Process the RX completion indicated by rxcp when GRO is enabled */
1066 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1067                 struct be_rx_obj *rxo,
1068                 struct be_eth_rx_compl *rxcp)
1069 {
1070         struct be_rx_page_info *page_info;
1071         struct sk_buff *skb = NULL;
1072         struct be_queue_info *rxq = &rxo->q;
1073         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1074         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1075         u16 i, rxq_idx = 0, vid, j;
1076         u8 vtm;
1077         u8 pkt_type;
1078
1079         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1080         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1081         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1082         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1083         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1084         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1085
1086         /* vlanf could be wrongly set in some cards.
1087          * ignore if vtm is not set */
1088         if ((adapter->function_mode & 0x400) && !vtm)
1089                 vlanf = 0;
1090
1091         skb = napi_get_frags(&eq_obj->napi);
1092         if (!skb) {
1093                 be_rx_compl_discard(adapter, rxo, rxcp);
1094                 return;
1095         }
1096
1097         remaining = pkt_size;
1098         for (i = 0, j = -1; i < num_rcvd; i++) {
1099                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1100
1101                 curr_frag_len = min(remaining, rx_frag_size);
1102
1103                 /* Coalesce all frags from the same physical page in one slot */
1104                 if (i == 0 || page_info->page_offset == 0) {
1105                         /* First frag or Fresh page */
1106                         j++;
1107                         skb_shinfo(skb)->frags[j].page = page_info->page;
1108                         skb_shinfo(skb)->frags[j].page_offset =
1109                                                         page_info->page_offset;
1110                         skb_shinfo(skb)->frags[j].size = 0;
1111                 } else {
1112                         put_page(page_info->page);
1113                 }
1114                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1115
1116                 remaining -= curr_frag_len;
1117                 index_inc(&rxq_idx, rxq->len);
1118                 memset(page_info, 0, sizeof(*page_info));
1119         }
1120         BUG_ON(j > MAX_SKB_FRAGS);
1121
1122         skb_shinfo(skb)->nr_frags = j + 1;
1123         skb->len = pkt_size;
1124         skb->data_len = pkt_size;
1125         skb->truesize += pkt_size;
1126         skb->ip_summed = CHECKSUM_UNNECESSARY;
1127
1128         if (likely(!vlanf)) {
1129                 napi_gro_frags(&eq_obj->napi);
1130         } else {
1131                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1132                 if (!lancer_chip(adapter))
1133                         vid = swab16(vid);
1134
1135                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1136                         return;
1137
1138                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1139         }
1140
1141         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1142 }
1143
1144 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1145 {
1146         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1147
1148         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1149                 return NULL;
1150
1151         rmb();
1152         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1153
1154         queue_tail_inc(&rxo->cq);
1155         return rxcp;
1156 }
1157
1158 /* To reset the valid bit, we need to reset the whole word as
1159  * when walking the queue the valid entries are little-endian
1160  * and invalid entries are host endian
1161  */
1162 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1163 {
1164         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1165 }
1166
1167 static inline struct page *be_alloc_pages(u32 size)
1168 {
1169         gfp_t alloc_flags = GFP_ATOMIC;
1170         u32 order = get_order(size);
1171         if (order > 0)
1172                 alloc_flags |= __GFP_COMP;
1173         return  alloc_pages(alloc_flags, order);
1174 }
1175
1176 /*
1177  * Allocate a page, split it to fragments of size rx_frag_size and post as
1178  * receive buffers to BE
1179  */
1180 static void be_post_rx_frags(struct be_rx_obj *rxo)
1181 {
1182         struct be_adapter *adapter = rxo->adapter;
1183         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1184         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1185         struct be_queue_info *rxq = &rxo->q;
1186         struct page *pagep = NULL;
1187         struct be_eth_rx_d *rxd;
1188         u64 page_dmaaddr = 0, frag_dmaaddr;
1189         u32 posted, page_offset = 0;
1190
1191         page_info = &rxo->page_info_tbl[rxq->head];
1192         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1193                 if (!pagep) {
1194                         pagep = be_alloc_pages(adapter->big_page_size);
1195                         if (unlikely(!pagep)) {
1196                                 rxo->stats.rx_post_fail++;
1197                                 break;
1198                         }
1199                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1200                                                     0, adapter->big_page_size,
1201                                                     DMA_FROM_DEVICE);
1202                         page_info->page_offset = 0;
1203                 } else {
1204                         get_page(pagep);
1205                         page_info->page_offset = page_offset + rx_frag_size;
1206                 }
1207                 page_offset = page_info->page_offset;
1208                 page_info->page = pagep;
1209                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1210                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1211
1212                 rxd = queue_head_node(rxq);
1213                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1214                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1215
1216                 /* Any space left in the current big page for another frag? */
1217                 if ((page_offset + rx_frag_size + rx_frag_size) >
1218                                         adapter->big_page_size) {
1219                         pagep = NULL;
1220                         page_info->last_page_user = true;
1221                 }
1222
1223                 prev_page_info = page_info;
1224                 queue_head_inc(rxq);
1225                 page_info = &page_info_tbl[rxq->head];
1226         }
1227         if (pagep)
1228                 prev_page_info->last_page_user = true;
1229
1230         if (posted) {
1231                 atomic_add(posted, &rxq->used);
1232                 be_rxq_notify(adapter, rxq->id, posted);
1233         } else if (atomic_read(&rxq->used) == 0) {
1234                 /* Let be_worker replenish when memory is available */
1235                 rxo->rx_post_starved = true;
1236         }
1237 }
1238
1239 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1240 {
1241         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1242
1243         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1244                 return NULL;
1245
1246         rmb();
1247         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1248
1249         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1250
1251         queue_tail_inc(tx_cq);
1252         return txcp;
1253 }
1254
1255 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1256 {
1257         struct be_queue_info *txq = &adapter->tx_obj.q;
1258         struct be_eth_wrb *wrb;
1259         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1260         struct sk_buff *sent_skb;
1261         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1262         bool unmap_skb_hdr = true;
1263
1264         sent_skb = sent_skbs[txq->tail];
1265         BUG_ON(!sent_skb);
1266         sent_skbs[txq->tail] = NULL;
1267
1268         /* skip header wrb */
1269         queue_tail_inc(txq);
1270
1271         do {
1272                 cur_index = txq->tail;
1273                 wrb = queue_tail_node(txq);
1274                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1275                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1276                 unmap_skb_hdr = false;
1277
1278                 num_wrbs++;
1279                 queue_tail_inc(txq);
1280         } while (cur_index != last_index);
1281
1282         atomic_sub(num_wrbs, &txq->used);
1283
1284         kfree_skb(sent_skb);
1285 }
1286
1287 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1288 {
1289         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1290
1291         if (!eqe->evt)
1292                 return NULL;
1293
1294         rmb();
1295         eqe->evt = le32_to_cpu(eqe->evt);
1296         queue_tail_inc(&eq_obj->q);
1297         return eqe;
1298 }
1299
1300 static int event_handle(struct be_adapter *adapter,
1301                         struct be_eq_obj *eq_obj)
1302 {
1303         struct be_eq_entry *eqe;
1304         u16 num = 0;
1305
1306         while ((eqe = event_get(eq_obj)) != NULL) {
1307                 eqe->evt = 0;
1308                 num++;
1309         }
1310
1311         /* Deal with any spurious interrupts that come
1312          * without events
1313          */
1314         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1315         if (num)
1316                 napi_schedule(&eq_obj->napi);
1317
1318         return num;
1319 }
1320
1321 /* Just read and notify events without processing them.
1322  * Used at the time of destroying event queues */
1323 static void be_eq_clean(struct be_adapter *adapter,
1324                         struct be_eq_obj *eq_obj)
1325 {
1326         struct be_eq_entry *eqe;
1327         u16 num = 0;
1328
1329         while ((eqe = event_get(eq_obj)) != NULL) {
1330                 eqe->evt = 0;
1331                 num++;
1332         }
1333
1334         if (num)
1335                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1336 }
1337
1338 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1339 {
1340         struct be_rx_page_info *page_info;
1341         struct be_queue_info *rxq = &rxo->q;
1342         struct be_queue_info *rx_cq = &rxo->cq;
1343         struct be_eth_rx_compl *rxcp;
1344         u16 tail;
1345
1346         /* First cleanup pending rx completions */
1347         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1348                 be_rx_compl_discard(adapter, rxo, rxcp);
1349                 be_rx_compl_reset(rxcp);
1350                 be_cq_notify(adapter, rx_cq->id, false, 1);
1351         }
1352
1353         /* Then free posted rx buffer that were not used */
1354         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1355         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1356                 page_info = get_rx_page_info(adapter, rxo, tail);
1357                 put_page(page_info->page);
1358                 memset(page_info, 0, sizeof(*page_info));
1359         }
1360         BUG_ON(atomic_read(&rxq->used));
1361 }
1362
1363 static void be_tx_compl_clean(struct be_adapter *adapter)
1364 {
1365         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1366         struct be_queue_info *txq = &adapter->tx_obj.q;
1367         struct be_eth_tx_compl *txcp;
1368         u16 end_idx, cmpl = 0, timeo = 0;
1369         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1370         struct sk_buff *sent_skb;
1371         bool dummy_wrb;
1372
1373         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1374         do {
1375                 while ((txcp = be_tx_compl_get(tx_cq))) {
1376                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1377                                         wrb_index, txcp);
1378                         be_tx_compl_process(adapter, end_idx);
1379                         cmpl++;
1380                 }
1381                 if (cmpl) {
1382                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1383                         cmpl = 0;
1384                 }
1385
1386                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1387                         break;
1388
1389                 mdelay(1);
1390         } while (true);
1391
1392         if (atomic_read(&txq->used))
1393                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1394                         atomic_read(&txq->used));
1395
1396         /* free posted tx for which compls will never arrive */
1397         while (atomic_read(&txq->used)) {
1398                 sent_skb = sent_skbs[txq->tail];
1399                 end_idx = txq->tail;
1400                 index_adv(&end_idx,
1401                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1402                         txq->len);
1403                 be_tx_compl_process(adapter, end_idx);
1404         }
1405 }
1406
1407 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1408 {
1409         struct be_queue_info *q;
1410
1411         q = &adapter->mcc_obj.q;
1412         if (q->created)
1413                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1414         be_queue_free(adapter, q);
1415
1416         q = &adapter->mcc_obj.cq;
1417         if (q->created)
1418                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1419         be_queue_free(adapter, q);
1420 }
1421
1422 /* Must be called only after TX qs are created as MCC shares TX EQ */
1423 static int be_mcc_queues_create(struct be_adapter *adapter)
1424 {
1425         struct be_queue_info *q, *cq;
1426
1427         /* Alloc MCC compl queue */
1428         cq = &adapter->mcc_obj.cq;
1429         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1430                         sizeof(struct be_mcc_compl)))
1431                 goto err;
1432
1433         /* Ask BE to create MCC compl queue; share TX's eq */
1434         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1435                 goto mcc_cq_free;
1436
1437         /* Alloc MCC queue */
1438         q = &adapter->mcc_obj.q;
1439         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1440                 goto mcc_cq_destroy;
1441
1442         /* Ask BE to create MCC queue */
1443         if (be_cmd_mccq_create(adapter, q, cq))
1444                 goto mcc_q_free;
1445
1446         return 0;
1447
1448 mcc_q_free:
1449         be_queue_free(adapter, q);
1450 mcc_cq_destroy:
1451         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1452 mcc_cq_free:
1453         be_queue_free(adapter, cq);
1454 err:
1455         return -1;
1456 }
1457
1458 static void be_tx_queues_destroy(struct be_adapter *adapter)
1459 {
1460         struct be_queue_info *q;
1461
1462         q = &adapter->tx_obj.q;
1463         if (q->created)
1464                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1465         be_queue_free(adapter, q);
1466
1467         q = &adapter->tx_obj.cq;
1468         if (q->created)
1469                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1470         be_queue_free(adapter, q);
1471
1472         /* Clear any residual events */
1473         be_eq_clean(adapter, &adapter->tx_eq);
1474
1475         q = &adapter->tx_eq.q;
1476         if (q->created)
1477                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1478         be_queue_free(adapter, q);
1479 }
1480
1481 static int be_tx_queues_create(struct be_adapter *adapter)
1482 {
1483         struct be_queue_info *eq, *q, *cq;
1484
1485         adapter->tx_eq.max_eqd = 0;
1486         adapter->tx_eq.min_eqd = 0;
1487         adapter->tx_eq.cur_eqd = 96;
1488         adapter->tx_eq.enable_aic = false;
1489         /* Alloc Tx Event queue */
1490         eq = &adapter->tx_eq.q;
1491         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1492                 return -1;
1493
1494         /* Ask BE to create Tx Event queue */
1495         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1496                 goto tx_eq_free;
1497
1498         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1499
1500
1501         /* Alloc TX eth compl queue */
1502         cq = &adapter->tx_obj.cq;
1503         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1504                         sizeof(struct be_eth_tx_compl)))
1505                 goto tx_eq_destroy;
1506
1507         /* Ask BE to create Tx eth compl queue */
1508         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1509                 goto tx_cq_free;
1510
1511         /* Alloc TX eth queue */
1512         q = &adapter->tx_obj.q;
1513         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1514                 goto tx_cq_destroy;
1515
1516         /* Ask BE to create Tx eth queue */
1517         if (be_cmd_txq_create(adapter, q, cq))
1518                 goto tx_q_free;
1519         return 0;
1520
1521 tx_q_free:
1522         be_queue_free(adapter, q);
1523 tx_cq_destroy:
1524         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1525 tx_cq_free:
1526         be_queue_free(adapter, cq);
1527 tx_eq_destroy:
1528         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1529 tx_eq_free:
1530         be_queue_free(adapter, eq);
1531         return -1;
1532 }
1533
1534 static void be_rx_queues_destroy(struct be_adapter *adapter)
1535 {
1536         struct be_queue_info *q;
1537         struct be_rx_obj *rxo;
1538         int i;
1539
1540         for_all_rx_queues(adapter, rxo, i) {
1541                 q = &rxo->q;
1542                 if (q->created) {
1543                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1544                         /* After the rxq is invalidated, wait for a grace time
1545                          * of 1ms for all dma to end and the flush compl to
1546                          * arrive
1547                          */
1548                         mdelay(1);
1549                         be_rx_q_clean(adapter, rxo);
1550                 }
1551                 be_queue_free(adapter, q);
1552
1553                 q = &rxo->cq;
1554                 if (q->created)
1555                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1556                 be_queue_free(adapter, q);
1557
1558                 /* Clear any residual events */
1559                 q = &rxo->rx_eq.q;
1560                 if (q->created) {
1561                         be_eq_clean(adapter, &rxo->rx_eq);
1562                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1563                 }
1564                 be_queue_free(adapter, q);
1565         }
1566 }
1567
1568 static int be_rx_queues_create(struct be_adapter *adapter)
1569 {
1570         struct be_queue_info *eq, *q, *cq;
1571         struct be_rx_obj *rxo;
1572         int rc, i;
1573
1574         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1575         for_all_rx_queues(adapter, rxo, i) {
1576                 rxo->adapter = adapter;
1577                 /* Init last_frag_index so that the frag index in the first
1578                  * completion will never match */
1579                 rxo->last_frag_index = 0xffff;
1580                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1581                 rxo->rx_eq.enable_aic = true;
1582
1583                 /* EQ */
1584                 eq = &rxo->rx_eq.q;
1585                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1586                                         sizeof(struct be_eq_entry));
1587                 if (rc)
1588                         goto err;
1589
1590                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1591                 if (rc)
1592                         goto err;
1593
1594                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1595
1596                 /* CQ */
1597                 cq = &rxo->cq;
1598                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1599                                 sizeof(struct be_eth_rx_compl));
1600                 if (rc)
1601                         goto err;
1602
1603                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1604                 if (rc)
1605                         goto err;
1606                 /* Rx Q */
1607                 q = &rxo->q;
1608                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1609                                 sizeof(struct be_eth_rx_d));
1610                 if (rc)
1611                         goto err;
1612
1613                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1614                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1615                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1616                 if (rc)
1617                         goto err;
1618         }
1619
1620         if (be_multi_rxq(adapter)) {
1621                 u8 rsstable[MAX_RSS_QS];
1622
1623                 for_all_rss_queues(adapter, rxo, i)
1624                         rsstable[i] = rxo->rss_id;
1625
1626                 rc = be_cmd_rss_config(adapter, rsstable,
1627                         adapter->num_rx_qs - 1);
1628                 if (rc)
1629                         goto err;
1630         }
1631
1632         return 0;
1633 err:
1634         be_rx_queues_destroy(adapter);
1635         return -1;
1636 }
1637
1638 static bool event_peek(struct be_eq_obj *eq_obj)
1639 {
1640         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1641         if (!eqe->evt)
1642                 return false;
1643         else
1644                 return true;
1645 }
1646
1647 static irqreturn_t be_intx(int irq, void *dev)
1648 {
1649         struct be_adapter *adapter = dev;
1650         struct be_rx_obj *rxo;
1651         int isr, i, tx = 0 , rx = 0;
1652
1653         if (lancer_chip(adapter)) {
1654                 if (event_peek(&adapter->tx_eq))
1655                         tx = event_handle(adapter, &adapter->tx_eq);
1656                 for_all_rx_queues(adapter, rxo, i) {
1657                         if (event_peek(&rxo->rx_eq))
1658                                 rx |= event_handle(adapter, &rxo->rx_eq);
1659                 }
1660
1661                 if (!(tx || rx))
1662                         return IRQ_NONE;
1663
1664         } else {
1665                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1666                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1667                 if (!isr)
1668                         return IRQ_NONE;
1669
1670                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1671                         event_handle(adapter, &adapter->tx_eq);
1672
1673                 for_all_rx_queues(adapter, rxo, i) {
1674                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1675                                 event_handle(adapter, &rxo->rx_eq);
1676                 }
1677         }
1678
1679         return IRQ_HANDLED;
1680 }
1681
1682 static irqreturn_t be_msix_rx(int irq, void *dev)
1683 {
1684         struct be_rx_obj *rxo = dev;
1685         struct be_adapter *adapter = rxo->adapter;
1686
1687         event_handle(adapter, &rxo->rx_eq);
1688
1689         return IRQ_HANDLED;
1690 }
1691
1692 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1693 {
1694         struct be_adapter *adapter = dev;
1695
1696         event_handle(adapter, &adapter->tx_eq);
1697
1698         return IRQ_HANDLED;
1699 }
1700
1701 static inline bool do_gro(struct be_rx_obj *rxo,
1702                         struct be_eth_rx_compl *rxcp, u8 err)
1703 {
1704         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1705
1706         if (err)
1707                 rxo->stats.rxcp_err++;
1708
1709         return (tcp_frame && !err) ? true : false;
1710 }
1711
1712 static int be_poll_rx(struct napi_struct *napi, int budget)
1713 {
1714         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1715         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1716         struct be_adapter *adapter = rxo->adapter;
1717         struct be_queue_info *rx_cq = &rxo->cq;
1718         struct be_eth_rx_compl *rxcp;
1719         u32 work_done;
1720         u16 frag_index, num_rcvd;
1721         u8 err;
1722
1723         rxo->stats.rx_polls++;
1724         for (work_done = 0; work_done < budget; work_done++) {
1725                 rxcp = be_rx_compl_get(rxo);
1726                 if (!rxcp)
1727                         break;
1728
1729                 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1730                 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1731                                                                 rxcp);
1732                 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1733                                                                 rxcp);
1734
1735                 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1736                 if (likely(frag_index != rxo->last_frag_index &&
1737                                 num_rcvd != 0)) {
1738                         rxo->last_frag_index = frag_index;
1739
1740                         if (do_gro(rxo, rxcp, err))
1741                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1742                         else
1743                                 be_rx_compl_process(adapter, rxo, rxcp);
1744                 }
1745
1746                 be_rx_compl_reset(rxcp);
1747         }
1748
1749         /* Refill the queue */
1750         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1751                 be_post_rx_frags(rxo);
1752
1753         /* All consumed */
1754         if (work_done < budget) {
1755                 napi_complete(napi);
1756                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1757         } else {
1758                 /* More to be consumed; continue with interrupts disabled */
1759                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1760         }
1761         return work_done;
1762 }
1763
1764 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1765  * For TX/MCC we don't honour budget; consume everything
1766  */
1767 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1768 {
1769         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1770         struct be_adapter *adapter =
1771                 container_of(tx_eq, struct be_adapter, tx_eq);
1772         struct be_queue_info *txq = &adapter->tx_obj.q;
1773         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1774         struct be_eth_tx_compl *txcp;
1775         int tx_compl = 0, mcc_compl, status = 0;
1776         u16 end_idx;
1777
1778         while ((txcp = be_tx_compl_get(tx_cq))) {
1779                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1780                                 wrb_index, txcp);
1781                 be_tx_compl_process(adapter, end_idx);
1782                 tx_compl++;
1783         }
1784
1785         mcc_compl = be_process_mcc(adapter, &status);
1786
1787         napi_complete(napi);
1788
1789         if (mcc_compl) {
1790                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1791                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1792         }
1793
1794         if (tx_compl) {
1795                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1796
1797                 /* As Tx wrbs have been freed up, wake up netdev queue if
1798                  * it was stopped due to lack of tx wrbs.
1799                  */
1800                 if (netif_queue_stopped(adapter->netdev) &&
1801                         atomic_read(&txq->used) < txq->len / 2) {
1802                         netif_wake_queue(adapter->netdev);
1803                 }
1804
1805                 tx_stats(adapter)->be_tx_events++;
1806                 tx_stats(adapter)->be_tx_compl += tx_compl;
1807         }
1808
1809         return 1;
1810 }
1811
1812 void be_detect_dump_ue(struct be_adapter *adapter)
1813 {
1814         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1815         u32 i;
1816
1817         pci_read_config_dword(adapter->pdev,
1818                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1819         pci_read_config_dword(adapter->pdev,
1820                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1821         pci_read_config_dword(adapter->pdev,
1822                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1823         pci_read_config_dword(adapter->pdev,
1824                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1825
1826         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1827         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1828
1829         if (ue_status_lo || ue_status_hi) {
1830                 adapter->ue_detected = true;
1831                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1832         }
1833
1834         if (ue_status_lo) {
1835                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1836                         if (ue_status_lo & 1)
1837                                 dev_err(&adapter->pdev->dev,
1838                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1839                 }
1840         }
1841         if (ue_status_hi) {
1842                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1843                         if (ue_status_hi & 1)
1844                                 dev_err(&adapter->pdev->dev,
1845                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1846                 }
1847         }
1848
1849 }
1850
1851 static void be_worker(struct work_struct *work)
1852 {
1853         struct be_adapter *adapter =
1854                 container_of(work, struct be_adapter, work.work);
1855         struct be_rx_obj *rxo;
1856         int i;
1857
1858         /* when interrupts are not yet enabled, just reap any pending
1859         * mcc completions */
1860         if (!netif_running(adapter->netdev)) {
1861                 int mcc_compl, status = 0;
1862
1863                 mcc_compl = be_process_mcc(adapter, &status);
1864
1865                 if (mcc_compl) {
1866                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1867                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1868                 }
1869                 goto reschedule;
1870         }
1871
1872         if (!adapter->stats_ioctl_sent)
1873                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1874
1875         be_tx_rate_update(adapter);
1876
1877         for_all_rx_queues(adapter, rxo, i) {
1878                 be_rx_rate_update(rxo);
1879                 be_rx_eqd_update(adapter, rxo);
1880
1881                 if (rxo->rx_post_starved) {
1882                         rxo->rx_post_starved = false;
1883                         be_post_rx_frags(rxo);
1884                 }
1885         }
1886         if (!adapter->ue_detected && !lancer_chip(adapter))
1887                 be_detect_dump_ue(adapter);
1888
1889 reschedule:
1890         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1891 }
1892
1893 static void be_msix_disable(struct be_adapter *adapter)
1894 {
1895         if (adapter->msix_enabled) {
1896                 pci_disable_msix(adapter->pdev);
1897                 adapter->msix_enabled = false;
1898         }
1899 }
1900
1901 static int be_num_rxqs_get(struct be_adapter *adapter)
1902 {
1903         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1904                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1905                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1906         } else {
1907                 dev_warn(&adapter->pdev->dev,
1908                         "No support for multiple RX queues\n");
1909                 return 1;
1910         }
1911 }
1912
1913 static void be_msix_enable(struct be_adapter *adapter)
1914 {
1915 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1916         int i, status;
1917
1918         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1919
1920         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1921                 adapter->msix_entries[i].entry = i;
1922
1923         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1924                         adapter->num_rx_qs + 1);
1925         if (status == 0) {
1926                 goto done;
1927         } else if (status >= BE_MIN_MSIX_VECTORS) {
1928                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1929                                 status) == 0) {
1930                         adapter->num_rx_qs = status - 1;
1931                         dev_warn(&adapter->pdev->dev,
1932                                 "Could alloc only %d MSIx vectors. "
1933                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1934                         goto done;
1935                 }
1936         }
1937         return;
1938 done:
1939         adapter->msix_enabled = true;
1940 }
1941
1942 static void be_sriov_enable(struct be_adapter *adapter)
1943 {
1944         be_check_sriov_fn_type(adapter);
1945 #ifdef CONFIG_PCI_IOV
1946         if (be_physfn(adapter) && num_vfs) {
1947                 int status;
1948
1949                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1950                 adapter->sriov_enabled = status ? false : true;
1951         }
1952 #endif
1953 }
1954
1955 static void be_sriov_disable(struct be_adapter *adapter)
1956 {
1957 #ifdef CONFIG_PCI_IOV
1958         if (adapter->sriov_enabled) {
1959                 pci_disable_sriov(adapter->pdev);
1960                 adapter->sriov_enabled = false;
1961         }
1962 #endif
1963 }
1964
1965 static inline int be_msix_vec_get(struct be_adapter *adapter,
1966                                         struct be_eq_obj *eq_obj)
1967 {
1968         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1969 }
1970
1971 static int be_request_irq(struct be_adapter *adapter,
1972                 struct be_eq_obj *eq_obj,
1973                 void *handler, char *desc, void *context)
1974 {
1975         struct net_device *netdev = adapter->netdev;
1976         int vec;
1977
1978         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1979         vec = be_msix_vec_get(adapter, eq_obj);
1980         return request_irq(vec, handler, 0, eq_obj->desc, context);
1981 }
1982
1983 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1984                         void *context)
1985 {
1986         int vec = be_msix_vec_get(adapter, eq_obj);
1987         free_irq(vec, context);
1988 }
1989
1990 static int be_msix_register(struct be_adapter *adapter)
1991 {
1992         struct be_rx_obj *rxo;
1993         int status, i;
1994         char qname[10];
1995
1996         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1997                                 adapter);
1998         if (status)
1999                 goto err;
2000
2001         for_all_rx_queues(adapter, rxo, i) {
2002                 sprintf(qname, "rxq%d", i);
2003                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2004                                 qname, rxo);
2005                 if (status)
2006                         goto err_msix;
2007         }
2008
2009         return 0;
2010
2011 err_msix:
2012         be_free_irq(adapter, &adapter->tx_eq, adapter);
2013
2014         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2015                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2016
2017 err:
2018         dev_warn(&adapter->pdev->dev,
2019                 "MSIX Request IRQ failed - err %d\n", status);
2020         pci_disable_msix(adapter->pdev);
2021         adapter->msix_enabled = false;
2022         return status;
2023 }
2024
2025 static int be_irq_register(struct be_adapter *adapter)
2026 {
2027         struct net_device *netdev = adapter->netdev;
2028         int status;
2029
2030         if (adapter->msix_enabled) {
2031                 status = be_msix_register(adapter);
2032                 if (status == 0)
2033                         goto done;
2034                 /* INTx is not supported for VF */
2035                 if (!be_physfn(adapter))
2036                         return status;
2037         }
2038
2039         /* INTx */
2040         netdev->irq = adapter->pdev->irq;
2041         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2042                         adapter);
2043         if (status) {
2044                 dev_err(&adapter->pdev->dev,
2045                         "INTx request IRQ failed - err %d\n", status);
2046                 return status;
2047         }
2048 done:
2049         adapter->isr_registered = true;
2050         return 0;
2051 }
2052
2053 static void be_irq_unregister(struct be_adapter *adapter)
2054 {
2055         struct net_device *netdev = adapter->netdev;
2056         struct be_rx_obj *rxo;
2057         int i;
2058
2059         if (!adapter->isr_registered)
2060                 return;
2061
2062         /* INTx */
2063         if (!adapter->msix_enabled) {
2064                 free_irq(netdev->irq, adapter);
2065                 goto done;
2066         }
2067
2068         /* MSIx */
2069         be_free_irq(adapter, &adapter->tx_eq, adapter);
2070
2071         for_all_rx_queues(adapter, rxo, i)
2072                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2073
2074 done:
2075         adapter->isr_registered = false;
2076 }
2077
2078 static int be_close(struct net_device *netdev)
2079 {
2080         struct be_adapter *adapter = netdev_priv(netdev);
2081         struct be_rx_obj *rxo;
2082         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2083         int vec, i;
2084
2085         be_async_mcc_disable(adapter);
2086
2087         netif_stop_queue(netdev);
2088         netif_carrier_off(netdev);
2089         adapter->link_up = false;
2090
2091         if (!lancer_chip(adapter))
2092                 be_intr_set(adapter, false);
2093
2094         if (adapter->msix_enabled) {
2095                 vec = be_msix_vec_get(adapter, tx_eq);
2096                 synchronize_irq(vec);
2097
2098                 for_all_rx_queues(adapter, rxo, i) {
2099                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2100                         synchronize_irq(vec);
2101                 }
2102         } else {
2103                 synchronize_irq(netdev->irq);
2104         }
2105         be_irq_unregister(adapter);
2106
2107         for_all_rx_queues(adapter, rxo, i)
2108                 napi_disable(&rxo->rx_eq.napi);
2109
2110         napi_disable(&tx_eq->napi);
2111
2112         /* Wait for all pending tx completions to arrive so that
2113          * all tx skbs are freed.
2114          */
2115         be_tx_compl_clean(adapter);
2116
2117         return 0;
2118 }
2119
2120 static int be_open(struct net_device *netdev)
2121 {
2122         struct be_adapter *adapter = netdev_priv(netdev);
2123         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2124         struct be_rx_obj *rxo;
2125         bool link_up;
2126         int status, i;
2127         u8 mac_speed;
2128         u16 link_speed;
2129
2130         for_all_rx_queues(adapter, rxo, i) {
2131                 be_post_rx_frags(rxo);
2132                 napi_enable(&rxo->rx_eq.napi);
2133         }
2134         napi_enable(&tx_eq->napi);
2135
2136         be_irq_register(adapter);
2137
2138         if (!lancer_chip(adapter))
2139                 be_intr_set(adapter, true);
2140
2141         /* The evt queues are created in unarmed state; arm them */
2142         for_all_rx_queues(adapter, rxo, i) {
2143                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2144                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2145         }
2146         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2147
2148         /* Now that interrupts are on we can process async mcc */
2149         be_async_mcc_enable(adapter);
2150
2151         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2152                         &link_speed);
2153         if (status)
2154                 goto err;
2155         be_link_status_update(adapter, link_up);
2156
2157         if (be_physfn(adapter)) {
2158                 status = be_vid_config(adapter, false, 0);
2159                 if (status)
2160                         goto err;
2161
2162                 status = be_cmd_set_flow_control(adapter,
2163                                 adapter->tx_fc, adapter->rx_fc);
2164                 if (status)
2165                         goto err;
2166         }
2167
2168         return 0;
2169 err:
2170         be_close(adapter->netdev);
2171         return -EIO;
2172 }
2173
2174 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2175 {
2176         struct be_dma_mem cmd;
2177         int status = 0;
2178         u8 mac[ETH_ALEN];
2179
2180         memset(mac, 0, ETH_ALEN);
2181
2182         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2183         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2184                                     GFP_KERNEL);
2185         if (cmd.va == NULL)
2186                 return -1;
2187         memset(cmd.va, 0, cmd.size);
2188
2189         if (enable) {
2190                 status = pci_write_config_dword(adapter->pdev,
2191                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2192                 if (status) {
2193                         dev_err(&adapter->pdev->dev,
2194                                 "Could not enable Wake-on-lan\n");
2195                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2196                                           cmd.dma);
2197                         return status;
2198                 }
2199                 status = be_cmd_enable_magic_wol(adapter,
2200                                 adapter->netdev->dev_addr, &cmd);
2201                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2202                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2203         } else {
2204                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2205                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2206                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207         }
2208
2209         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2210         return status;
2211 }
2212
2213 /*
2214  * Generate a seed MAC address from the PF MAC Address using jhash.
2215  * MAC Address for VFs are assigned incrementally starting from the seed.
2216  * These addresses are programmed in the ASIC by the PF and the VF driver
2217  * queries for the MAC address during its probe.
2218  */
2219 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2220 {
2221         u32 vf = 0;
2222         int status = 0;
2223         u8 mac[ETH_ALEN];
2224
2225         be_vf_eth_addr_generate(adapter, mac);
2226
2227         for (vf = 0; vf < num_vfs; vf++) {
2228                 status = be_cmd_pmac_add(adapter, mac,
2229                                         adapter->vf_cfg[vf].vf_if_handle,
2230                                         &adapter->vf_cfg[vf].vf_pmac_id);
2231                 if (status)
2232                         dev_err(&adapter->pdev->dev,
2233                                 "Mac address add failed for VF %d\n", vf);
2234                 else
2235                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2236
2237                 mac[5] += 1;
2238         }
2239         return status;
2240 }
2241
2242 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2243 {
2244         u32 vf;
2245
2246         for (vf = 0; vf < num_vfs; vf++) {
2247                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248                         be_cmd_pmac_del(adapter,
2249                                         adapter->vf_cfg[vf].vf_if_handle,
2250                                         adapter->vf_cfg[vf].vf_pmac_id);
2251         }
2252 }
2253
2254 static int be_setup(struct be_adapter *adapter)
2255 {
2256         struct net_device *netdev = adapter->netdev;
2257         u32 cap_flags, en_flags, vf = 0;
2258         int status;
2259         u8 mac[ETH_ALEN];
2260
2261         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2262
2263         if (be_physfn(adapter)) {
2264                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2265                                 BE_IF_FLAGS_PROMISCUOUS |
2266                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2267                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2268
2269                 if (be_multi_rxq(adapter)) {
2270                         cap_flags |= BE_IF_FLAGS_RSS;
2271                         en_flags |= BE_IF_FLAGS_RSS;
2272                 }
2273         }
2274
2275         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2276                         netdev->dev_addr, false/* pmac_invalid */,
2277                         &adapter->if_handle, &adapter->pmac_id, 0);
2278         if (status != 0)
2279                 goto do_none;
2280
2281         if (be_physfn(adapter)) {
2282                 while (vf < num_vfs) {
2283                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2284                                         | BE_IF_FLAGS_BROADCAST;
2285                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2286                                         mac, true,
2287                                         &adapter->vf_cfg[vf].vf_if_handle,
2288                                         NULL, vf+1);
2289                         if (status) {
2290                                 dev_err(&adapter->pdev->dev,
2291                                 "Interface Create failed for VF %d\n", vf);
2292                                 goto if_destroy;
2293                         }
2294                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2295                         vf++;
2296                 }
2297         } else if (!be_physfn(adapter)) {
2298                 status = be_cmd_mac_addr_query(adapter, mac,
2299                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300                 if (!status) {
2301                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2302                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2303                 }
2304         }
2305
2306         status = be_tx_queues_create(adapter);
2307         if (status != 0)
2308                 goto if_destroy;
2309
2310         status = be_rx_queues_create(adapter);
2311         if (status != 0)
2312                 goto tx_qs_destroy;
2313
2314         status = be_mcc_queues_create(adapter);
2315         if (status != 0)
2316                 goto rx_qs_destroy;
2317
2318         if (be_physfn(adapter)) {
2319                 status = be_vf_eth_addr_config(adapter);
2320                 if (status)
2321                         goto mcc_q_destroy;
2322         }
2323
2324         adapter->link_speed = -1;
2325
2326         return 0;
2327
2328 mcc_q_destroy:
2329         if (be_physfn(adapter))
2330                 be_vf_eth_addr_rem(adapter);
2331         be_mcc_queues_destroy(adapter);
2332 rx_qs_destroy:
2333         be_rx_queues_destroy(adapter);
2334 tx_qs_destroy:
2335         be_tx_queues_destroy(adapter);
2336 if_destroy:
2337         for (vf = 0; vf < num_vfs; vf++)
2338                 if (adapter->vf_cfg[vf].vf_if_handle)
2339                         be_cmd_if_destroy(adapter,
2340                                         adapter->vf_cfg[vf].vf_if_handle);
2341         be_cmd_if_destroy(adapter, adapter->if_handle);
2342 do_none:
2343         return status;
2344 }
2345
2346 static int be_clear(struct be_adapter *adapter)
2347 {
2348         if (be_physfn(adapter))
2349                 be_vf_eth_addr_rem(adapter);
2350
2351         be_mcc_queues_destroy(adapter);
2352         be_rx_queues_destroy(adapter);
2353         be_tx_queues_destroy(adapter);
2354
2355         be_cmd_if_destroy(adapter, adapter->if_handle);
2356
2357         /* tell fw we're done with firing cmds */
2358         be_cmd_fw_clean(adapter);
2359         return 0;
2360 }
2361
2362
2363 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2364 static bool be_flash_redboot(struct be_adapter *adapter,
2365                         const u8 *p, u32 img_start, int image_size,
2366                         int hdr_size)
2367 {
2368         u32 crc_offset;
2369         u8 flashed_crc[4];
2370         int status;
2371
2372         crc_offset = hdr_size + img_start + image_size - 4;
2373
2374         p += crc_offset;
2375
2376         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2377                         (image_size - 4));
2378         if (status) {
2379                 dev_err(&adapter->pdev->dev,
2380                 "could not get crc from flash, not flashing redboot\n");
2381                 return false;
2382         }
2383
2384         /*update redboot only if crc does not match*/
2385         if (!memcmp(flashed_crc, p, 4))
2386                 return false;
2387         else
2388                 return true;
2389 }
2390
2391 static int be_flash_data(struct be_adapter *adapter,
2392                         const struct firmware *fw,
2393                         struct be_dma_mem *flash_cmd, int num_of_images)
2394
2395 {
2396         int status = 0, i, filehdr_size = 0;
2397         u32 total_bytes = 0, flash_op;
2398         int num_bytes;
2399         const u8 *p = fw->data;
2400         struct be_cmd_write_flashrom *req = flash_cmd->va;
2401         const struct flash_comp *pflashcomp;
2402         int num_comp;
2403
2404         static const struct flash_comp gen3_flash_types[9] = {
2405                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2406                         FLASH_IMAGE_MAX_SIZE_g3},
2407                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2408                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2409                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2410                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2412                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2414                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2416                         FLASH_IMAGE_MAX_SIZE_g3},
2417                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2418                         FLASH_IMAGE_MAX_SIZE_g3},
2419                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2420                         FLASH_IMAGE_MAX_SIZE_g3},
2421                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2422                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2423         };
2424         static const struct flash_comp gen2_flash_types[8] = {
2425                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2426                         FLASH_IMAGE_MAX_SIZE_g2},
2427                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2428                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2429                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2430                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2432                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2434                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2436                         FLASH_IMAGE_MAX_SIZE_g2},
2437                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2438                         FLASH_IMAGE_MAX_SIZE_g2},
2439                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2440                          FLASH_IMAGE_MAX_SIZE_g2}
2441         };
2442
2443         if (adapter->generation == BE_GEN3) {
2444                 pflashcomp = gen3_flash_types;
2445                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2446                 num_comp = ARRAY_SIZE(gen3_flash_types);
2447         } else {
2448                 pflashcomp = gen2_flash_types;
2449                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2450                 num_comp = ARRAY_SIZE(gen2_flash_types);
2451         }
2452         for (i = 0; i < num_comp; i++) {
2453                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2454                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2455                         continue;
2456                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457                         (!be_flash_redboot(adapter, fw->data,
2458                          pflashcomp[i].offset, pflashcomp[i].size,
2459                          filehdr_size)))
2460                         continue;
2461                 p = fw->data;
2462                 p += filehdr_size + pflashcomp[i].offset
2463                         + (num_of_images * sizeof(struct image_hdr));
2464         if (p + pflashcomp[i].size > fw->data + fw->size)
2465                 return -1;
2466         total_bytes = pflashcomp[i].size;
2467                 while (total_bytes) {
2468                         if (total_bytes > 32*1024)
2469                                 num_bytes = 32*1024;
2470                         else
2471                                 num_bytes = total_bytes;
2472                         total_bytes -= num_bytes;
2473
2474                         if (!total_bytes)
2475                                 flash_op = FLASHROM_OPER_FLASH;
2476                         else
2477                                 flash_op = FLASHROM_OPER_SAVE;
2478                         memcpy(req->params.data_buf, p, num_bytes);
2479                         p += num_bytes;
2480                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2481                                 pflashcomp[i].optype, flash_op, num_bytes);
2482                         if (status) {
2483                                 dev_err(&adapter->pdev->dev,
2484                                         "cmd to write to flash rom failed.\n");
2485                                 return -1;
2486                         }
2487                         yield();
2488                 }
2489         }
2490         return 0;
2491 }
2492
2493 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2494 {
2495         if (fhdr == NULL)
2496                 return 0;
2497         if (fhdr->build[0] == '3')
2498                 return BE_GEN3;
2499         else if (fhdr->build[0] == '2')
2500                 return BE_GEN2;
2501         else
2502                 return 0;
2503 }
2504
2505 int be_load_fw(struct be_adapter *adapter, u8 *func)
2506 {
2507         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2508         const struct firmware *fw;
2509         struct flash_file_hdr_g2 *fhdr;
2510         struct flash_file_hdr_g3 *fhdr3;
2511         struct image_hdr *img_hdr_ptr = NULL;
2512         struct be_dma_mem flash_cmd;
2513         int status, i = 0, num_imgs = 0;
2514         const u8 *p;
2515
2516         if (!netif_running(adapter->netdev)) {
2517                 dev_err(&adapter->pdev->dev,
2518                         "Firmware load not allowed (interface is down)\n");
2519                 return -EPERM;
2520         }
2521
2522         strcpy(fw_file, func);
2523
2524         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2525         if (status)
2526                 goto fw_exit;
2527
2528         p = fw->data;
2529         fhdr = (struct flash_file_hdr_g2 *) p;
2530         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531
2532         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2533         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2534                                           &flash_cmd.dma, GFP_KERNEL);
2535         if (!flash_cmd.va) {
2536                 status = -ENOMEM;
2537                 dev_err(&adapter->pdev->dev,
2538                         "Memory allocation failure while flashing\n");
2539                 goto fw_exit;
2540         }
2541
2542         if ((adapter->generation == BE_GEN3) &&
2543                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2544                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2545                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2546                 for (i = 0; i < num_imgs; i++) {
2547                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2548                                         (sizeof(struct flash_file_hdr_g3) +
2549                                          i * sizeof(struct image_hdr)));
2550                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2551                                 status = be_flash_data(adapter, fw, &flash_cmd,
2552                                                         num_imgs);
2553                 }
2554         } else if ((adapter->generation == BE_GEN2) &&
2555                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2556                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2557         } else {
2558                 dev_err(&adapter->pdev->dev,
2559                         "UFI and Interface are not compatible for flashing\n");
2560                 status = -1;
2561         }
2562
2563         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2564                           flash_cmd.dma);
2565         if (status) {
2566                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2567                 goto fw_exit;
2568         }
2569
2570         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2571
2572 fw_exit:
2573         release_firmware(fw);
2574         return status;
2575 }
2576
2577 static struct net_device_ops be_netdev_ops = {
2578         .ndo_open               = be_open,
2579         .ndo_stop               = be_close,
2580         .ndo_start_xmit         = be_xmit,
2581         .ndo_set_rx_mode        = be_set_multicast_list,
2582         .ndo_set_mac_address    = be_mac_addr_set,
2583         .ndo_change_mtu         = be_change_mtu,
2584         .ndo_validate_addr      = eth_validate_addr,
2585         .ndo_vlan_rx_register   = be_vlan_register,
2586         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2587         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2588         .ndo_set_vf_mac         = be_set_vf_mac,
2589         .ndo_set_vf_vlan        = be_set_vf_vlan,
2590         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2591         .ndo_get_vf_config      = be_get_vf_config
2592 };
2593
2594 static void be_netdev_init(struct net_device *netdev)
2595 {
2596         struct be_adapter *adapter = netdev_priv(netdev);
2597         struct be_rx_obj *rxo;
2598         int i;
2599
2600         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2601                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2603                 NETIF_F_GRO | NETIF_F_TSO6;
2604
2605         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2607
2608         if (lancer_chip(adapter))
2609                 netdev->vlan_features |= NETIF_F_TSO6;
2610
2611         netdev->flags |= IFF_MULTICAST;
2612
2613         adapter->rx_csum = true;
2614
2615         /* Default settings for Rx and Tx flow control */
2616         adapter->rx_fc = true;
2617         adapter->tx_fc = true;
2618
2619         netif_set_gso_max_size(netdev, 65535);
2620
2621         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2622
2623         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2624
2625         for_all_rx_queues(adapter, rxo, i)
2626                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2627                                 BE_NAPI_WEIGHT);
2628
2629         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2630                 BE_NAPI_WEIGHT);
2631
2632         netif_stop_queue(netdev);
2633 }
2634
2635 static void be_unmap_pci_bars(struct be_adapter *adapter)
2636 {
2637         if (adapter->csr)
2638                 iounmap(adapter->csr);
2639         if (adapter->db)
2640                 iounmap(adapter->db);
2641         if (adapter->pcicfg && be_physfn(adapter))
2642                 iounmap(adapter->pcicfg);
2643 }
2644
2645 static int be_map_pci_bars(struct be_adapter *adapter)
2646 {
2647         u8 __iomem *addr;
2648         int pcicfg_reg, db_reg;
2649
2650         if (lancer_chip(adapter)) {
2651                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652                         pci_resource_len(adapter->pdev, 0));
2653                 if (addr == NULL)
2654                         return -ENOMEM;
2655                 adapter->db = addr;
2656                 return 0;
2657         }
2658
2659         if (be_physfn(adapter)) {
2660                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2661                                 pci_resource_len(adapter->pdev, 2));
2662                 if (addr == NULL)
2663                         return -ENOMEM;
2664                 adapter->csr = addr;
2665         }
2666
2667         if (adapter->generation == BE_GEN2) {
2668                 pcicfg_reg = 1;
2669                 db_reg = 4;
2670         } else {
2671                 pcicfg_reg = 0;
2672                 if (be_physfn(adapter))
2673                         db_reg = 4;
2674                 else
2675                         db_reg = 0;
2676         }
2677         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2678                                 pci_resource_len(adapter->pdev, db_reg));
2679         if (addr == NULL)
2680                 goto pci_map_err;
2681         adapter->db = addr;
2682
2683         if (be_physfn(adapter)) {
2684                 addr = ioremap_nocache(
2685                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2686                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2687                 if (addr == NULL)
2688                         goto pci_map_err;
2689                 adapter->pcicfg = addr;
2690         } else
2691                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2692
2693         return 0;
2694 pci_map_err:
2695         be_unmap_pci_bars(adapter);
2696         return -ENOMEM;
2697 }
2698
2699
2700 static void be_ctrl_cleanup(struct be_adapter *adapter)
2701 {
2702         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2703
2704         be_unmap_pci_bars(adapter);
2705
2706         if (mem->va)
2707                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2708                                   mem->dma);
2709
2710         mem = &adapter->mc_cmd_mem;
2711         if (mem->va)
2712                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2713                                   mem->dma);
2714 }
2715
2716 static int be_ctrl_init(struct be_adapter *adapter)
2717 {
2718         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2719         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2720         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2721         int status;
2722
2723         status = be_map_pci_bars(adapter);
2724         if (status)
2725                 goto done;
2726
2727         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2729                                                 mbox_mem_alloc->size,
2730                                                 &mbox_mem_alloc->dma,
2731                                                 GFP_KERNEL);
2732         if (!mbox_mem_alloc->va) {
2733                 status = -ENOMEM;
2734                 goto unmap_pci_bars;
2735         }
2736
2737         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2738         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2739         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2740         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2741
2742         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2743         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2744                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2745                                             GFP_KERNEL);
2746         if (mc_cmd_mem->va == NULL) {
2747                 status = -ENOMEM;
2748                 goto free_mbox;
2749         }
2750         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2751
2752         mutex_init(&adapter->mbox_lock);
2753         spin_lock_init(&adapter->mcc_lock);
2754         spin_lock_init(&adapter->mcc_cq_lock);
2755
2756         init_completion(&adapter->flash_compl);
2757         pci_save_state(adapter->pdev);
2758         return 0;
2759
2760 free_mbox:
2761         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2762                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2763
2764 unmap_pci_bars:
2765         be_unmap_pci_bars(adapter);
2766
2767 done:
2768         return status;
2769 }
2770
2771 static void be_stats_cleanup(struct be_adapter *adapter)
2772 {
2773         struct be_dma_mem *cmd = &adapter->stats_cmd;
2774
2775         if (cmd->va)
2776                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2777                                   cmd->va, cmd->dma);
2778 }
2779
2780 static int be_stats_init(struct be_adapter *adapter)
2781 {
2782         struct be_dma_mem *cmd = &adapter->stats_cmd;
2783
2784         cmd->size = sizeof(struct be_cmd_req_get_stats);
2785         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2786                                      GFP_KERNEL);
2787         if (cmd->va == NULL)
2788                 return -1;
2789         memset(cmd->va, 0, cmd->size);
2790         return 0;
2791 }
2792
2793 static void __devexit be_remove(struct pci_dev *pdev)
2794 {
2795         struct be_adapter *adapter = pci_get_drvdata(pdev);
2796
2797         if (!adapter)
2798                 return;
2799
2800         cancel_delayed_work_sync(&adapter->work);
2801
2802         unregister_netdev(adapter->netdev);
2803
2804         be_clear(adapter);
2805
2806         be_stats_cleanup(adapter);
2807
2808         be_ctrl_cleanup(adapter);
2809
2810         be_sriov_disable(adapter);
2811
2812         be_msix_disable(adapter);
2813
2814         pci_set_drvdata(pdev, NULL);
2815         pci_release_regions(pdev);
2816         pci_disable_device(pdev);
2817
2818         free_netdev(adapter->netdev);
2819 }
2820
2821 static int be_get_config(struct be_adapter *adapter)
2822 {
2823         int status;
2824         u8 mac[ETH_ALEN];
2825
2826         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2827         if (status)
2828                 return status;
2829
2830         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2831                         &adapter->function_mode, &adapter->function_caps);
2832         if (status)
2833                 return status;
2834
2835         memset(mac, 0, ETH_ALEN);
2836
2837         if (be_physfn(adapter)) {
2838                 status = be_cmd_mac_addr_query(adapter, mac,
2839                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2840
2841                 if (status)
2842                         return status;
2843
2844                 if (!is_valid_ether_addr(mac))
2845                         return -EADDRNOTAVAIL;
2846
2847                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2848                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2849         }
2850
2851         if (adapter->function_mode & 0x400)
2852                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2853         else
2854                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2855
2856         return 0;
2857 }
2858
2859 static int be_dev_family_check(struct be_adapter *adapter)
2860 {
2861         struct pci_dev *pdev = adapter->pdev;
2862         u32 sli_intf = 0, if_type;
2863
2864         switch (pdev->device) {
2865         case BE_DEVICE_ID1:
2866         case OC_DEVICE_ID1:
2867                 adapter->generation = BE_GEN2;
2868                 break;
2869         case BE_DEVICE_ID2:
2870         case OC_DEVICE_ID2:
2871                 adapter->generation = BE_GEN3;
2872                 break;
2873         case OC_DEVICE_ID3:
2874                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2875                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2876                                                 SLI_INTF_IF_TYPE_SHIFT;
2877
2878                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2879                         if_type != 0x02) {
2880                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2881                         return -EINVAL;
2882                 }
2883                 if (num_vfs > 0) {
2884                         dev_err(&pdev->dev, "VFs not supported\n");
2885                         return -EINVAL;
2886                 }
2887                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2888                                          SLI_INTF_FAMILY_SHIFT);
2889                 adapter->generation = BE_GEN3;
2890                 break;
2891         default:
2892                 adapter->generation = 0;
2893         }
2894         return 0;
2895 }
2896
2897 static int __devinit be_probe(struct pci_dev *pdev,
2898                         const struct pci_device_id *pdev_id)
2899 {
2900         int status = 0;
2901         struct be_adapter *adapter;
2902         struct net_device *netdev;
2903
2904         status = pci_enable_device(pdev);
2905         if (status)
2906                 goto do_none;
2907
2908         status = pci_request_regions(pdev, DRV_NAME);
2909         if (status)
2910                 goto disable_dev;
2911         pci_set_master(pdev);
2912
2913         netdev = alloc_etherdev(sizeof(struct be_adapter));
2914         if (netdev == NULL) {
2915                 status = -ENOMEM;
2916                 goto rel_reg;
2917         }
2918         adapter = netdev_priv(netdev);
2919         adapter->pdev = pdev;
2920         pci_set_drvdata(pdev, adapter);
2921
2922         status = be_dev_family_check(adapter);
2923         if (status)
2924                 goto free_netdev;
2925
2926         adapter->netdev = netdev;
2927         SET_NETDEV_DEV(netdev, &pdev->dev);
2928
2929         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2930         if (!status) {
2931                 netdev->features |= NETIF_F_HIGHDMA;
2932         } else {
2933                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2934                 if (status) {
2935                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2936                         goto free_netdev;
2937                 }
2938         }
2939
2940         be_sriov_enable(adapter);
2941
2942         status = be_ctrl_init(adapter);
2943         if (status)
2944                 goto free_netdev;
2945
2946         /* sync up with fw's ready state */
2947         if (be_physfn(adapter)) {
2948                 status = be_cmd_POST(adapter);
2949                 if (status)
2950                         goto ctrl_clean;
2951         }
2952
2953         /* tell fw we're ready to fire cmds */
2954         status = be_cmd_fw_init(adapter);
2955         if (status)
2956                 goto ctrl_clean;
2957
2958         if (be_physfn(adapter)) {
2959                 status = be_cmd_reset_function(adapter);
2960                 if (status)
2961                         goto ctrl_clean;
2962         }
2963
2964         status = be_stats_init(adapter);
2965         if (status)
2966                 goto ctrl_clean;
2967
2968         status = be_get_config(adapter);
2969         if (status)
2970                 goto stats_clean;
2971
2972         be_msix_enable(adapter);
2973
2974         INIT_DELAYED_WORK(&adapter->work, be_worker);
2975
2976         status = be_setup(adapter);
2977         if (status)
2978                 goto msix_disable;
2979
2980         be_netdev_init(netdev);
2981         status = register_netdev(netdev);
2982         if (status != 0)
2983                 goto unsetup;
2984         netif_carrier_off(netdev);
2985
2986         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2987         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2988         return 0;
2989
2990 unsetup:
2991         be_clear(adapter);
2992 msix_disable:
2993         be_msix_disable(adapter);
2994 stats_clean:
2995         be_stats_cleanup(adapter);
2996 ctrl_clean:
2997         be_ctrl_cleanup(adapter);
2998 free_netdev:
2999         be_sriov_disable(adapter);
3000         free_netdev(netdev);
3001         pci_set_drvdata(pdev, NULL);
3002 rel_reg:
3003         pci_release_regions(pdev);
3004 disable_dev:
3005         pci_disable_device(pdev);
3006 do_none:
3007         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3008         return status;
3009 }
3010
3011 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3012 {
3013         struct be_adapter *adapter = pci_get_drvdata(pdev);
3014         struct net_device *netdev =  adapter->netdev;
3015
3016         if (adapter->wol)
3017                 be_setup_wol(adapter, true);
3018
3019         netif_device_detach(netdev);
3020         if (netif_running(netdev)) {
3021                 rtnl_lock();
3022                 be_close(netdev);
3023                 rtnl_unlock();
3024         }
3025         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3026         be_clear(adapter);
3027
3028         pci_save_state(pdev);
3029         pci_disable_device(pdev);
3030         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3031         return 0;
3032 }
3033
3034 static int be_resume(struct pci_dev *pdev)
3035 {
3036         int status = 0;
3037         struct be_adapter *adapter = pci_get_drvdata(pdev);
3038         struct net_device *netdev =  adapter->netdev;
3039
3040         netif_device_detach(netdev);
3041
3042         status = pci_enable_device(pdev);
3043         if (status)
3044                 return status;
3045
3046         pci_set_power_state(pdev, 0);
3047         pci_restore_state(pdev);
3048
3049         /* tell fw we're ready to fire cmds */
3050         status = be_cmd_fw_init(adapter);
3051         if (status)
3052                 return status;
3053
3054         be_setup(adapter);
3055         if (netif_running(netdev)) {
3056                 rtnl_lock();
3057                 be_open(netdev);
3058                 rtnl_unlock();
3059         }
3060         netif_device_attach(netdev);
3061
3062         if (adapter->wol)
3063                 be_setup_wol(adapter, false);
3064         return 0;
3065 }
3066
3067 /*
3068  * An FLR will stop BE from DMAing any data.
3069  */
3070 static void be_shutdown(struct pci_dev *pdev)
3071 {
3072         struct be_adapter *adapter = pci_get_drvdata(pdev);
3073         struct net_device *netdev =  adapter->netdev;
3074
3075         netif_device_detach(netdev);
3076
3077         be_cmd_reset_function(adapter);
3078
3079         if (adapter->wol)
3080                 be_setup_wol(adapter, true);
3081
3082         pci_disable_device(pdev);
3083 }
3084
3085 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3086                                 pci_channel_state_t state)
3087 {
3088         struct be_adapter *adapter = pci_get_drvdata(pdev);
3089         struct net_device *netdev =  adapter->netdev;
3090
3091         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3092
3093         adapter->eeh_err = true;
3094
3095         netif_device_detach(netdev);
3096
3097         if (netif_running(netdev)) {
3098                 rtnl_lock();
3099                 be_close(netdev);
3100                 rtnl_unlock();
3101         }
3102         be_clear(adapter);
3103
3104         if (state == pci_channel_io_perm_failure)
3105                 return PCI_ERS_RESULT_DISCONNECT;
3106
3107         pci_disable_device(pdev);
3108
3109         return PCI_ERS_RESULT_NEED_RESET;
3110 }
3111
3112 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3113 {
3114         struct be_adapter *adapter = pci_get_drvdata(pdev);
3115         int status;
3116
3117         dev_info(&adapter->pdev->dev, "EEH reset\n");
3118         adapter->eeh_err = false;
3119
3120         status = pci_enable_device(pdev);
3121         if (status)
3122                 return PCI_ERS_RESULT_DISCONNECT;
3123
3124         pci_set_master(pdev);
3125         pci_set_power_state(pdev, 0);
3126         pci_restore_state(pdev);
3127
3128         /* Check if card is ok and fw is ready */
3129         status = be_cmd_POST(adapter);
3130         if (status)
3131                 return PCI_ERS_RESULT_DISCONNECT;
3132
3133         return PCI_ERS_RESULT_RECOVERED;
3134 }
3135
3136 static void be_eeh_resume(struct pci_dev *pdev)
3137 {
3138         int status = 0;
3139         struct be_adapter *adapter = pci_get_drvdata(pdev);
3140         struct net_device *netdev =  adapter->netdev;
3141
3142         dev_info(&adapter->pdev->dev, "EEH resume\n");
3143
3144         pci_save_state(pdev);
3145
3146         /* tell fw we're ready to fire cmds */
3147         status = be_cmd_fw_init(adapter);
3148         if (status)
3149                 goto err;
3150
3151         status = be_setup(adapter);
3152         if (status)
3153                 goto err;
3154
3155         if (netif_running(netdev)) {
3156                 status = be_open(netdev);
3157                 if (status)
3158                         goto err;
3159         }
3160         netif_device_attach(netdev);
3161         return;
3162 err:
3163         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3164 }
3165
3166 static struct pci_error_handlers be_eeh_handlers = {
3167         .error_detected = be_eeh_err_detected,
3168         .slot_reset = be_eeh_reset,
3169         .resume = be_eeh_resume,
3170 };
3171
3172 static struct pci_driver be_driver = {
3173         .name = DRV_NAME,
3174         .id_table = be_dev_ids,
3175         .probe = be_probe,
3176         .remove = be_remove,
3177         .suspend = be_suspend,
3178         .resume = be_resume,
3179         .shutdown = be_shutdown,
3180         .err_handler = &be_eeh_handlers
3181 };
3182
3183 static int __init be_init_module(void)
3184 {
3185         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3186             rx_frag_size != 2048) {
3187                 printk(KERN_WARNING DRV_NAME
3188                         " : Module param rx_frag_size must be 2048/4096/8192."
3189                         " Using 2048\n");
3190                 rx_frag_size = 2048;
3191         }
3192
3193         if (num_vfs > 32) {
3194                 printk(KERN_WARNING DRV_NAME
3195                         " : Module param num_vfs must not be greater than 32."
3196                         "Using 32\n");
3197                 num_vfs = 32;
3198         }
3199
3200         return pci_register_driver(&be_driver);
3201 }
3202 module_init(be_init_module);
3203
3204 static void __exit be_exit_module(void)
3205 {
3206         pci_unregister_driver(&be_driver);
3207 }
3208 module_exit(be_exit_module);