Merge branch 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscuous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 struct be_rx_compl_info *rxcp)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += rxcp->num_rcvd;
860         stats->rx_bytes += rxcp->pkt_size;
861         stats->rx_pkts++;
862         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864         if (rxcp->err)
865                 stats->rxcp_err++;
866 }
867
868 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
869 {
870         /* L4 checksum is not reliable for non TCP/UDP packets.
871          * Also ignore ipcksm for ipv6 pkts */
872         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
873                                 (rxcp->ip_csum || rxcp->ipv6);
874 }
875
876 static struct be_rx_page_info *
877 get_rx_page_info(struct be_adapter *adapter,
878                 struct be_rx_obj *rxo,
879                 u16 frag_idx)
880 {
881         struct be_rx_page_info *rx_page_info;
882         struct be_queue_info *rxq = &rxo->q;
883
884         rx_page_info = &rxo->page_info_tbl[frag_idx];
885         BUG_ON(!rx_page_info->page);
886
887         if (rx_page_info->last_page_user) {
888                 dma_unmap_page(&adapter->pdev->dev,
889                                dma_unmap_addr(rx_page_info, bus),
890                                adapter->big_page_size, DMA_FROM_DEVICE);
891                 rx_page_info->last_page_user = false;
892         }
893
894         atomic_dec(&rxq->used);
895         return rx_page_info;
896 }
897
898 /* Throwaway the data in the Rx completion */
899 static void be_rx_compl_discard(struct be_adapter *adapter,
900                 struct be_rx_obj *rxo,
901                 struct be_rx_compl_info *rxcp)
902 {
903         struct be_queue_info *rxq = &rxo->q;
904         struct be_rx_page_info *page_info;
905         u16 i, num_rcvd = rxcp->num_rcvd;
906
907         for (i = 0; i < num_rcvd; i++) {
908                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
909                 put_page(page_info->page);
910                 memset(page_info, 0, sizeof(*page_info));
911                 index_inc(&rxcp->rxq_idx, rxq->len);
912         }
913 }
914
915 /*
916  * skb_fill_rx_data forms a complete skb for an ether frame
917  * indicated by rxcp.
918  */
919 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
920                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
921 {
922         struct be_queue_info *rxq = &rxo->q;
923         struct be_rx_page_info *page_info;
924         u16 i, j;
925         u16 hdr_len, curr_frag_len, remaining;
926         u8 *start;
927
928         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
929         start = page_address(page_info->page) + page_info->page_offset;
930         prefetch(start);
931
932         /* Copy data in the first descriptor of this completion */
933         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
934
935         /* Copy the header portion into skb_data */
936         hdr_len = min(BE_HDR_LEN, curr_frag_len);
937         memcpy(skb->data, start, hdr_len);
938         skb->len = curr_frag_len;
939         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
940                 /* Complete packet has now been moved to data */
941                 put_page(page_info->page);
942                 skb->data_len = 0;
943                 skb->tail += curr_frag_len;
944         } else {
945                 skb_shinfo(skb)->nr_frags = 1;
946                 skb_shinfo(skb)->frags[0].page = page_info->page;
947                 skb_shinfo(skb)->frags[0].page_offset =
948                                         page_info->page_offset + hdr_len;
949                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
950                 skb->data_len = curr_frag_len - hdr_len;
951                 skb->tail += hdr_len;
952         }
953         page_info->page = NULL;
954
955         if (rxcp->pkt_size <= rx_frag_size) {
956                 BUG_ON(rxcp->num_rcvd != 1);
957                 return;
958         }
959
960         /* More frags present for this completion */
961         index_inc(&rxcp->rxq_idx, rxq->len);
962         remaining = rxcp->pkt_size - curr_frag_len;
963         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
964                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
965                 curr_frag_len = min(remaining, rx_frag_size);
966
967                 /* Coalesce all frags from the same physical page in one slot */
968                 if (page_info->page_offset == 0) {
969                         /* Fresh page */
970                         j++;
971                         skb_shinfo(skb)->frags[j].page = page_info->page;
972                         skb_shinfo(skb)->frags[j].page_offset =
973                                                         page_info->page_offset;
974                         skb_shinfo(skb)->frags[j].size = 0;
975                         skb_shinfo(skb)->nr_frags++;
976                 } else {
977                         put_page(page_info->page);
978                 }
979
980                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
981                 skb->len += curr_frag_len;
982                 skb->data_len += curr_frag_len;
983
984                 remaining -= curr_frag_len;
985                 index_inc(&rxcp->rxq_idx, rxq->len);
986                 page_info->page = NULL;
987         }
988         BUG_ON(j > MAX_SKB_FRAGS);
989 }
990
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993                         struct be_rx_obj *rxo,
994                         struct be_rx_compl_info *rxcp)
995 {
996         struct sk_buff *skb;
997
998         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
999         if (unlikely(!skb)) {
1000                 if (net_ratelimit())
1001                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1002                 be_rx_compl_discard(adapter, rxo, rxcp);
1003                 return;
1004         }
1005
1006         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007
1008         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1009                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010         else
1011                 skb_checksum_none_assert(skb);
1012
1013         skb->truesize = skb->len + sizeof(struct sk_buff);
1014         skb->protocol = eth_type_trans(skb, adapter->netdev);
1015
1016         if (unlikely(rxcp->vlanf)) {
1017                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1018                         kfree_skb(skb);
1019                         return;
1020                 }
1021                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1022                                         rxcp->vlan_tag);
1023         } else {
1024                 netif_receive_skb(skb);
1025         }
1026 }
1027
1028 /* Process the RX completion indicated by rxcp when GRO is enabled */
1029 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1030                 struct be_rx_obj *rxo,
1031                 struct be_rx_compl_info *rxcp)
1032 {
1033         struct be_rx_page_info *page_info;
1034         struct sk_buff *skb = NULL;
1035         struct be_queue_info *rxq = &rxo->q;
1036         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1037         u16 remaining, curr_frag_len;
1038         u16 i, j;
1039
1040         skb = napi_get_frags(&eq_obj->napi);
1041         if (!skb) {
1042                 be_rx_compl_discard(adapter, rxo, rxcp);
1043                 return;
1044         }
1045
1046         remaining = rxcp->pkt_size;
1047         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1048                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1049
1050                 curr_frag_len = min(remaining, rx_frag_size);
1051
1052                 /* Coalesce all frags from the same physical page in one slot */
1053                 if (i == 0 || page_info->page_offset == 0) {
1054                         /* First frag or Fresh page */
1055                         j++;
1056                         skb_shinfo(skb)->frags[j].page = page_info->page;
1057                         skb_shinfo(skb)->frags[j].page_offset =
1058                                                         page_info->page_offset;
1059                         skb_shinfo(skb)->frags[j].size = 0;
1060                 } else {
1061                         put_page(page_info->page);
1062                 }
1063                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1064
1065                 remaining -= curr_frag_len;
1066                 index_inc(&rxcp->rxq_idx, rxq->len);
1067                 memset(page_info, 0, sizeof(*page_info));
1068         }
1069         BUG_ON(j > MAX_SKB_FRAGS);
1070
1071         skb_shinfo(skb)->nr_frags = j + 1;
1072         skb->len = rxcp->pkt_size;
1073         skb->data_len = rxcp->pkt_size;
1074         skb->truesize += rxcp->pkt_size;
1075         skb->ip_summed = CHECKSUM_UNNECESSARY;
1076
1077         if (likely(!rxcp->vlanf))
1078                 napi_gro_frags(&eq_obj->napi);
1079         else
1080                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1081                                 rxcp->vlan_tag);
1082 }
1083
1084 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1085                                 struct be_eth_rx_compl *compl,
1086                                 struct be_rx_compl_info *rxcp)
1087 {
1088         rxcp->pkt_size =
1089                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1090         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1091         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1092         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1093         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1094         rxcp->ip_csum =
1095                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1096         rxcp->l4_csum =
1097                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1098         rxcp->ipv6 =
1099                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1100         rxcp->rxq_idx =
1101                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1102         rxcp->num_rcvd =
1103                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1104         rxcp->pkt_type =
1105                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1106         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1107         rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1108                                         compl);
1109 }
1110
1111 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1112                                 struct be_eth_rx_compl *compl,
1113                                 struct be_rx_compl_info *rxcp)
1114 {
1115         rxcp->pkt_size =
1116                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1117         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1118         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1119         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1120         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1121         rxcp->ip_csum =
1122                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1123         rxcp->l4_csum =
1124                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1125         rxcp->ipv6 =
1126                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1127         rxcp->rxq_idx =
1128                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1129         rxcp->num_rcvd =
1130                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1131         rxcp->pkt_type =
1132                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1133         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1134         rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1135                                         compl);
1136 }
1137
1138 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1139 {
1140         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1141         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1142         struct be_adapter *adapter = rxo->adapter;
1143
1144         /* For checking the valid bit it is Ok to use either definition as the
1145          * valid bit is at the same position in both v0 and v1 Rx compl */
1146         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1147                 return NULL;
1148
1149         rmb();
1150         be_dws_le_to_cpu(compl, sizeof(*compl));
1151
1152         if (adapter->be3_native)
1153                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1154         else
1155                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1156
1157         /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1158         if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1159                 rxcp->vlanf = 0;
1160
1161         if (!lancer_chip(adapter))
1162                 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1163
1164         if (((adapter->pvid & VLAN_VID_MASK) ==
1165                 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1166                 !adapter->vlan_tag[rxcp->vlan_tag])
1167                 rxcp->vlanf = 0;
1168
1169         /* As the compl has been parsed, reset it; we wont touch it again */
1170         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1171
1172         queue_tail_inc(&rxo->cq);
1173         return rxcp;
1174 }
1175
1176 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1177 {
1178         u32 order = get_order(size);
1179
1180         if (order > 0)
1181                 gfp |= __GFP_COMP;
1182         return  alloc_pages(gfp, order);
1183 }
1184
1185 /*
1186  * Allocate a page, split it to fragments of size rx_frag_size and post as
1187  * receive buffers to BE
1188  */
1189 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1190 {
1191         struct be_adapter *adapter = rxo->adapter;
1192         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1193         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1194         struct be_queue_info *rxq = &rxo->q;
1195         struct page *pagep = NULL;
1196         struct be_eth_rx_d *rxd;
1197         u64 page_dmaaddr = 0, frag_dmaaddr;
1198         u32 posted, page_offset = 0;
1199
1200         page_info = &rxo->page_info_tbl[rxq->head];
1201         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1202                 if (!pagep) {
1203                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1204                         if (unlikely(!pagep)) {
1205                                 rxo->stats.rx_post_fail++;
1206                                 break;
1207                         }
1208                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1209                                                     0, adapter->big_page_size,
1210                                                     DMA_FROM_DEVICE);
1211                         page_info->page_offset = 0;
1212                 } else {
1213                         get_page(pagep);
1214                         page_info->page_offset = page_offset + rx_frag_size;
1215                 }
1216                 page_offset = page_info->page_offset;
1217                 page_info->page = pagep;
1218                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1219                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1220
1221                 rxd = queue_head_node(rxq);
1222                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1223                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1224
1225                 /* Any space left in the current big page for another frag? */
1226                 if ((page_offset + rx_frag_size + rx_frag_size) >
1227                                         adapter->big_page_size) {
1228                         pagep = NULL;
1229                         page_info->last_page_user = true;
1230                 }
1231
1232                 prev_page_info = page_info;
1233                 queue_head_inc(rxq);
1234                 page_info = &page_info_tbl[rxq->head];
1235         }
1236         if (pagep)
1237                 prev_page_info->last_page_user = true;
1238
1239         if (posted) {
1240                 atomic_add(posted, &rxq->used);
1241                 be_rxq_notify(adapter, rxq->id, posted);
1242         } else if (atomic_read(&rxq->used) == 0) {
1243                 /* Let be_worker replenish when memory is available */
1244                 rxo->rx_post_starved = true;
1245         }
1246 }
1247
1248 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1249 {
1250         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1251
1252         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1253                 return NULL;
1254
1255         rmb();
1256         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1257
1258         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1259
1260         queue_tail_inc(tx_cq);
1261         return txcp;
1262 }
1263
1264 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1265 {
1266         struct be_queue_info *txq = &adapter->tx_obj.q;
1267         struct be_eth_wrb *wrb;
1268         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1269         struct sk_buff *sent_skb;
1270         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1271         bool unmap_skb_hdr = true;
1272
1273         sent_skb = sent_skbs[txq->tail];
1274         BUG_ON(!sent_skb);
1275         sent_skbs[txq->tail] = NULL;
1276
1277         /* skip header wrb */
1278         queue_tail_inc(txq);
1279
1280         do {
1281                 cur_index = txq->tail;
1282                 wrb = queue_tail_node(txq);
1283                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1284                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1285                 unmap_skb_hdr = false;
1286
1287                 num_wrbs++;
1288                 queue_tail_inc(txq);
1289         } while (cur_index != last_index);
1290
1291         atomic_sub(num_wrbs, &txq->used);
1292
1293         kfree_skb(sent_skb);
1294 }
1295
1296 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1297 {
1298         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1299
1300         if (!eqe->evt)
1301                 return NULL;
1302
1303         rmb();
1304         eqe->evt = le32_to_cpu(eqe->evt);
1305         queue_tail_inc(&eq_obj->q);
1306         return eqe;
1307 }
1308
1309 static int event_handle(struct be_adapter *adapter,
1310                         struct be_eq_obj *eq_obj)
1311 {
1312         struct be_eq_entry *eqe;
1313         u16 num = 0;
1314
1315         while ((eqe = event_get(eq_obj)) != NULL) {
1316                 eqe->evt = 0;
1317                 num++;
1318         }
1319
1320         /* Deal with any spurious interrupts that come
1321          * without events
1322          */
1323         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1324         if (num)
1325                 napi_schedule(&eq_obj->napi);
1326
1327         return num;
1328 }
1329
1330 /* Just read and notify events without processing them.
1331  * Used at the time of destroying event queues */
1332 static void be_eq_clean(struct be_adapter *adapter,
1333                         struct be_eq_obj *eq_obj)
1334 {
1335         struct be_eq_entry *eqe;
1336         u16 num = 0;
1337
1338         while ((eqe = event_get(eq_obj)) != NULL) {
1339                 eqe->evt = 0;
1340                 num++;
1341         }
1342
1343         if (num)
1344                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1345 }
1346
1347 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1348 {
1349         struct be_rx_page_info *page_info;
1350         struct be_queue_info *rxq = &rxo->q;
1351         struct be_queue_info *rx_cq = &rxo->cq;
1352         struct be_rx_compl_info *rxcp;
1353         u16 tail;
1354
1355         /* First cleanup pending rx completions */
1356         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1357                 be_rx_compl_discard(adapter, rxo, rxcp);
1358                 be_cq_notify(adapter, rx_cq->id, false, 1);
1359         }
1360
1361         /* Then free posted rx buffer that were not used */
1362         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1363         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1364                 page_info = get_rx_page_info(adapter, rxo, tail);
1365                 put_page(page_info->page);
1366                 memset(page_info, 0, sizeof(*page_info));
1367         }
1368         BUG_ON(atomic_read(&rxq->used));
1369 }
1370
1371 static void be_tx_compl_clean(struct be_adapter *adapter)
1372 {
1373         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1374         struct be_queue_info *txq = &adapter->tx_obj.q;
1375         struct be_eth_tx_compl *txcp;
1376         u16 end_idx, cmpl = 0, timeo = 0;
1377         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1378         struct sk_buff *sent_skb;
1379         bool dummy_wrb;
1380
1381         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1382         do {
1383                 while ((txcp = be_tx_compl_get(tx_cq))) {
1384                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1385                                         wrb_index, txcp);
1386                         be_tx_compl_process(adapter, end_idx);
1387                         cmpl++;
1388                 }
1389                 if (cmpl) {
1390                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1391                         cmpl = 0;
1392                 }
1393
1394                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1395                         break;
1396
1397                 mdelay(1);
1398         } while (true);
1399
1400         if (atomic_read(&txq->used))
1401                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1402                         atomic_read(&txq->used));
1403
1404         /* free posted tx for which compls will never arrive */
1405         while (atomic_read(&txq->used)) {
1406                 sent_skb = sent_skbs[txq->tail];
1407                 end_idx = txq->tail;
1408                 index_adv(&end_idx,
1409                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1410                         txq->len);
1411                 be_tx_compl_process(adapter, end_idx);
1412         }
1413 }
1414
1415 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1416 {
1417         struct be_queue_info *q;
1418
1419         q = &adapter->mcc_obj.q;
1420         if (q->created)
1421                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1422         be_queue_free(adapter, q);
1423
1424         q = &adapter->mcc_obj.cq;
1425         if (q->created)
1426                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1427         be_queue_free(adapter, q);
1428 }
1429
1430 /* Must be called only after TX qs are created as MCC shares TX EQ */
1431 static int be_mcc_queues_create(struct be_adapter *adapter)
1432 {
1433         struct be_queue_info *q, *cq;
1434
1435         /* Alloc MCC compl queue */
1436         cq = &adapter->mcc_obj.cq;
1437         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1438                         sizeof(struct be_mcc_compl)))
1439                 goto err;
1440
1441         /* Ask BE to create MCC compl queue; share TX's eq */
1442         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1443                 goto mcc_cq_free;
1444
1445         /* Alloc MCC queue */
1446         q = &adapter->mcc_obj.q;
1447         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1448                 goto mcc_cq_destroy;
1449
1450         /* Ask BE to create MCC queue */
1451         if (be_cmd_mccq_create(adapter, q, cq))
1452                 goto mcc_q_free;
1453
1454         return 0;
1455
1456 mcc_q_free:
1457         be_queue_free(adapter, q);
1458 mcc_cq_destroy:
1459         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1460 mcc_cq_free:
1461         be_queue_free(adapter, cq);
1462 err:
1463         return -1;
1464 }
1465
1466 static void be_tx_queues_destroy(struct be_adapter *adapter)
1467 {
1468         struct be_queue_info *q;
1469
1470         q = &adapter->tx_obj.q;
1471         if (q->created)
1472                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1473         be_queue_free(adapter, q);
1474
1475         q = &adapter->tx_obj.cq;
1476         if (q->created)
1477                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1478         be_queue_free(adapter, q);
1479
1480         /* Clear any residual events */
1481         be_eq_clean(adapter, &adapter->tx_eq);
1482
1483         q = &adapter->tx_eq.q;
1484         if (q->created)
1485                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1486         be_queue_free(adapter, q);
1487 }
1488
1489 static int be_tx_queues_create(struct be_adapter *adapter)
1490 {
1491         struct be_queue_info *eq, *q, *cq;
1492
1493         adapter->tx_eq.max_eqd = 0;
1494         adapter->tx_eq.min_eqd = 0;
1495         adapter->tx_eq.cur_eqd = 96;
1496         adapter->tx_eq.enable_aic = false;
1497         /* Alloc Tx Event queue */
1498         eq = &adapter->tx_eq.q;
1499         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1500                 return -1;
1501
1502         /* Ask BE to create Tx Event queue */
1503         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1504                 goto tx_eq_free;
1505
1506         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1507
1508
1509         /* Alloc TX eth compl queue */
1510         cq = &adapter->tx_obj.cq;
1511         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1512                         sizeof(struct be_eth_tx_compl)))
1513                 goto tx_eq_destroy;
1514
1515         /* Ask BE to create Tx eth compl queue */
1516         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1517                 goto tx_cq_free;
1518
1519         /* Alloc TX eth queue */
1520         q = &adapter->tx_obj.q;
1521         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1522                 goto tx_cq_destroy;
1523
1524         /* Ask BE to create Tx eth queue */
1525         if (be_cmd_txq_create(adapter, q, cq))
1526                 goto tx_q_free;
1527         return 0;
1528
1529 tx_q_free:
1530         be_queue_free(adapter, q);
1531 tx_cq_destroy:
1532         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1533 tx_cq_free:
1534         be_queue_free(adapter, cq);
1535 tx_eq_destroy:
1536         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1537 tx_eq_free:
1538         be_queue_free(adapter, eq);
1539         return -1;
1540 }
1541
1542 static void be_rx_queues_destroy(struct be_adapter *adapter)
1543 {
1544         struct be_queue_info *q;
1545         struct be_rx_obj *rxo;
1546         int i;
1547
1548         for_all_rx_queues(adapter, rxo, i) {
1549                 q = &rxo->q;
1550                 if (q->created) {
1551                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1552                         /* After the rxq is invalidated, wait for a grace time
1553                          * of 1ms for all dma to end and the flush compl to
1554                          * arrive
1555                          */
1556                         mdelay(1);
1557                         be_rx_q_clean(adapter, rxo);
1558                 }
1559                 be_queue_free(adapter, q);
1560
1561                 q = &rxo->cq;
1562                 if (q->created)
1563                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1564                 be_queue_free(adapter, q);
1565
1566                 /* Clear any residual events */
1567                 q = &rxo->rx_eq.q;
1568                 if (q->created) {
1569                         be_eq_clean(adapter, &rxo->rx_eq);
1570                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1571                 }
1572                 be_queue_free(adapter, q);
1573         }
1574 }
1575
1576 static int be_rx_queues_create(struct be_adapter *adapter)
1577 {
1578         struct be_queue_info *eq, *q, *cq;
1579         struct be_rx_obj *rxo;
1580         int rc, i;
1581
1582         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1583         for_all_rx_queues(adapter, rxo, i) {
1584                 rxo->adapter = adapter;
1585                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1586                 rxo->rx_eq.enable_aic = true;
1587
1588                 /* EQ */
1589                 eq = &rxo->rx_eq.q;
1590                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1591                                         sizeof(struct be_eq_entry));
1592                 if (rc)
1593                         goto err;
1594
1595                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1596                 if (rc)
1597                         goto err;
1598
1599                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1600
1601                 /* CQ */
1602                 cq = &rxo->cq;
1603                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1604                                 sizeof(struct be_eth_rx_compl));
1605                 if (rc)
1606                         goto err;
1607
1608                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1609                 if (rc)
1610                         goto err;
1611                 /* Rx Q */
1612                 q = &rxo->q;
1613                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1614                                 sizeof(struct be_eth_rx_d));
1615                 if (rc)
1616                         goto err;
1617
1618                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1619                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1620                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1621                 if (rc)
1622                         goto err;
1623         }
1624
1625         if (be_multi_rxq(adapter)) {
1626                 u8 rsstable[MAX_RSS_QS];
1627
1628                 for_all_rss_queues(adapter, rxo, i)
1629                         rsstable[i] = rxo->rss_id;
1630
1631                 rc = be_cmd_rss_config(adapter, rsstable,
1632                         adapter->num_rx_qs - 1);
1633                 if (rc)
1634                         goto err;
1635         }
1636
1637         return 0;
1638 err:
1639         be_rx_queues_destroy(adapter);
1640         return -1;
1641 }
1642
1643 static bool event_peek(struct be_eq_obj *eq_obj)
1644 {
1645         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1646         if (!eqe->evt)
1647                 return false;
1648         else
1649                 return true;
1650 }
1651
1652 static irqreturn_t be_intx(int irq, void *dev)
1653 {
1654         struct be_adapter *adapter = dev;
1655         struct be_rx_obj *rxo;
1656         int isr, i, tx = 0 , rx = 0;
1657
1658         if (lancer_chip(adapter)) {
1659                 if (event_peek(&adapter->tx_eq))
1660                         tx = event_handle(adapter, &adapter->tx_eq);
1661                 for_all_rx_queues(adapter, rxo, i) {
1662                         if (event_peek(&rxo->rx_eq))
1663                                 rx |= event_handle(adapter, &rxo->rx_eq);
1664                 }
1665
1666                 if (!(tx || rx))
1667                         return IRQ_NONE;
1668
1669         } else {
1670                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1671                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1672                 if (!isr)
1673                         return IRQ_NONE;
1674
1675                 if ((1 << adapter->tx_eq.eq_idx & isr))
1676                         event_handle(adapter, &adapter->tx_eq);
1677
1678                 for_all_rx_queues(adapter, rxo, i) {
1679                         if ((1 << rxo->rx_eq.eq_idx & isr))
1680                                 event_handle(adapter, &rxo->rx_eq);
1681                 }
1682         }
1683
1684         return IRQ_HANDLED;
1685 }
1686
1687 static irqreturn_t be_msix_rx(int irq, void *dev)
1688 {
1689         struct be_rx_obj *rxo = dev;
1690         struct be_adapter *adapter = rxo->adapter;
1691
1692         event_handle(adapter, &rxo->rx_eq);
1693
1694         return IRQ_HANDLED;
1695 }
1696
1697 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1698 {
1699         struct be_adapter *adapter = dev;
1700
1701         event_handle(adapter, &adapter->tx_eq);
1702
1703         return IRQ_HANDLED;
1704 }
1705
1706 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1707 {
1708         return (rxcp->tcpf && !rxcp->err) ? true : false;
1709 }
1710
1711 static int be_poll_rx(struct napi_struct *napi, int budget)
1712 {
1713         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1714         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715         struct be_adapter *adapter = rxo->adapter;
1716         struct be_queue_info *rx_cq = &rxo->cq;
1717         struct be_rx_compl_info *rxcp;
1718         u32 work_done;
1719
1720         rxo->stats.rx_polls++;
1721         for (work_done = 0; work_done < budget; work_done++) {
1722                 rxcp = be_rx_compl_get(rxo);
1723                 if (!rxcp)
1724                         break;
1725
1726                 /* Ignore flush completions */
1727                 if (rxcp->num_rcvd) {
1728                         if (do_gro(rxcp))
1729                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1730                         else
1731                                 be_rx_compl_process(adapter, rxo, rxcp);
1732                 }
1733                 be_rx_stats_update(rxo, rxcp);
1734         }
1735
1736         /* Refill the queue */
1737         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1738                 be_post_rx_frags(rxo, GFP_ATOMIC);
1739
1740         /* All consumed */
1741         if (work_done < budget) {
1742                 napi_complete(napi);
1743                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1744         } else {
1745                 /* More to be consumed; continue with interrupts disabled */
1746                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1747         }
1748         return work_done;
1749 }
1750
1751 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1752  * For TX/MCC we don't honour budget; consume everything
1753  */
1754 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1755 {
1756         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1757         struct be_adapter *adapter =
1758                 container_of(tx_eq, struct be_adapter, tx_eq);
1759         struct be_queue_info *txq = &adapter->tx_obj.q;
1760         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1761         struct be_eth_tx_compl *txcp;
1762         int tx_compl = 0, mcc_compl, status = 0;
1763         u16 end_idx;
1764
1765         while ((txcp = be_tx_compl_get(tx_cq))) {
1766                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1767                                 wrb_index, txcp);
1768                 be_tx_compl_process(adapter, end_idx);
1769                 tx_compl++;
1770         }
1771
1772         mcc_compl = be_process_mcc(adapter, &status);
1773
1774         napi_complete(napi);
1775
1776         if (mcc_compl) {
1777                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1778                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1779         }
1780
1781         if (tx_compl) {
1782                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1783
1784                 /* As Tx wrbs have been freed up, wake up netdev queue if
1785                  * it was stopped due to lack of tx wrbs.
1786                  */
1787                 if (netif_queue_stopped(adapter->netdev) &&
1788                         atomic_read(&txq->used) < txq->len / 2) {
1789                         netif_wake_queue(adapter->netdev);
1790                 }
1791
1792                 tx_stats(adapter)->be_tx_events++;
1793                 tx_stats(adapter)->be_tx_compl += tx_compl;
1794         }
1795
1796         return 1;
1797 }
1798
1799 void be_detect_dump_ue(struct be_adapter *adapter)
1800 {
1801         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1802         u32 i;
1803
1804         pci_read_config_dword(adapter->pdev,
1805                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1806         pci_read_config_dword(adapter->pdev,
1807                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1808         pci_read_config_dword(adapter->pdev,
1809                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1810         pci_read_config_dword(adapter->pdev,
1811                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1812
1813         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1814         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1815
1816         if (ue_status_lo || ue_status_hi) {
1817                 adapter->ue_detected = true;
1818                 adapter->eeh_err = true;
1819                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1820         }
1821
1822         if (ue_status_lo) {
1823                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1824                         if (ue_status_lo & 1)
1825                                 dev_err(&adapter->pdev->dev,
1826                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1827                 }
1828         }
1829         if (ue_status_hi) {
1830                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1831                         if (ue_status_hi & 1)
1832                                 dev_err(&adapter->pdev->dev,
1833                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1834                 }
1835         }
1836
1837 }
1838
1839 static void be_worker(struct work_struct *work)
1840 {
1841         struct be_adapter *adapter =
1842                 container_of(work, struct be_adapter, work.work);
1843         struct be_rx_obj *rxo;
1844         int i;
1845
1846         /* when interrupts are not yet enabled, just reap any pending
1847         * mcc completions */
1848         if (!netif_running(adapter->netdev)) {
1849                 int mcc_compl, status = 0;
1850
1851                 mcc_compl = be_process_mcc(adapter, &status);
1852
1853                 if (mcc_compl) {
1854                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1855                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1856                 }
1857
1858                 if (!adapter->ue_detected && !lancer_chip(adapter))
1859                         be_detect_dump_ue(adapter);
1860
1861                 goto reschedule;
1862         }
1863
1864         if (!adapter->stats_cmd_sent)
1865                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1866
1867         be_tx_rate_update(adapter);
1868
1869         for_all_rx_queues(adapter, rxo, i) {
1870                 be_rx_rate_update(rxo);
1871                 be_rx_eqd_update(adapter, rxo);
1872
1873                 if (rxo->rx_post_starved) {
1874                         rxo->rx_post_starved = false;
1875                         be_post_rx_frags(rxo, GFP_KERNEL);
1876                 }
1877         }
1878         if (!adapter->ue_detected && !lancer_chip(adapter))
1879                 be_detect_dump_ue(adapter);
1880
1881 reschedule:
1882         adapter->work_counter++;
1883         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1884 }
1885
1886 static void be_msix_disable(struct be_adapter *adapter)
1887 {
1888         if (adapter->msix_enabled) {
1889                 pci_disable_msix(adapter->pdev);
1890                 adapter->msix_enabled = false;
1891         }
1892 }
1893
1894 static int be_num_rxqs_get(struct be_adapter *adapter)
1895 {
1896         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1897                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1898                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1899         } else {
1900                 dev_warn(&adapter->pdev->dev,
1901                         "No support for multiple RX queues\n");
1902                 return 1;
1903         }
1904 }
1905
1906 static void be_msix_enable(struct be_adapter *adapter)
1907 {
1908 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1909         int i, status;
1910
1911         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1912
1913         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1914                 adapter->msix_entries[i].entry = i;
1915
1916         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1917                         adapter->num_rx_qs + 1);
1918         if (status == 0) {
1919                 goto done;
1920         } else if (status >= BE_MIN_MSIX_VECTORS) {
1921                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1922                                 status) == 0) {
1923                         adapter->num_rx_qs = status - 1;
1924                         dev_warn(&adapter->pdev->dev,
1925                                 "Could alloc only %d MSIx vectors. "
1926                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1927                         goto done;
1928                 }
1929         }
1930         return;
1931 done:
1932         adapter->msix_enabled = true;
1933 }
1934
1935 static void be_sriov_enable(struct be_adapter *adapter)
1936 {
1937         be_check_sriov_fn_type(adapter);
1938 #ifdef CONFIG_PCI_IOV
1939         if (be_physfn(adapter) && num_vfs) {
1940                 int status;
1941
1942                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1943                 adapter->sriov_enabled = status ? false : true;
1944         }
1945 #endif
1946 }
1947
1948 static void be_sriov_disable(struct be_adapter *adapter)
1949 {
1950 #ifdef CONFIG_PCI_IOV
1951         if (adapter->sriov_enabled) {
1952                 pci_disable_sriov(adapter->pdev);
1953                 adapter->sriov_enabled = false;
1954         }
1955 #endif
1956 }
1957
1958 static inline int be_msix_vec_get(struct be_adapter *adapter,
1959                                         struct be_eq_obj *eq_obj)
1960 {
1961         return adapter->msix_entries[eq_obj->eq_idx].vector;
1962 }
1963
1964 static int be_request_irq(struct be_adapter *adapter,
1965                 struct be_eq_obj *eq_obj,
1966                 void *handler, char *desc, void *context)
1967 {
1968         struct net_device *netdev = adapter->netdev;
1969         int vec;
1970
1971         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1972         vec = be_msix_vec_get(adapter, eq_obj);
1973         return request_irq(vec, handler, 0, eq_obj->desc, context);
1974 }
1975
1976 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1977                         void *context)
1978 {
1979         int vec = be_msix_vec_get(adapter, eq_obj);
1980         free_irq(vec, context);
1981 }
1982
1983 static int be_msix_register(struct be_adapter *adapter)
1984 {
1985         struct be_rx_obj *rxo;
1986         int status, i;
1987         char qname[10];
1988
1989         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1990                                 adapter);
1991         if (status)
1992                 goto err;
1993
1994         for_all_rx_queues(adapter, rxo, i) {
1995                 sprintf(qname, "rxq%d", i);
1996                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1997                                 qname, rxo);
1998                 if (status)
1999                         goto err_msix;
2000         }
2001
2002         return 0;
2003
2004 err_msix:
2005         be_free_irq(adapter, &adapter->tx_eq, adapter);
2006
2007         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2008                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2009
2010 err:
2011         dev_warn(&adapter->pdev->dev,
2012                 "MSIX Request IRQ failed - err %d\n", status);
2013         pci_disable_msix(adapter->pdev);
2014         adapter->msix_enabled = false;
2015         return status;
2016 }
2017
2018 static int be_irq_register(struct be_adapter *adapter)
2019 {
2020         struct net_device *netdev = adapter->netdev;
2021         int status;
2022
2023         if (adapter->msix_enabled) {
2024                 status = be_msix_register(adapter);
2025                 if (status == 0)
2026                         goto done;
2027                 /* INTx is not supported for VF */
2028                 if (!be_physfn(adapter))
2029                         return status;
2030         }
2031
2032         /* INTx */
2033         netdev->irq = adapter->pdev->irq;
2034         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2035                         adapter);
2036         if (status) {
2037                 dev_err(&adapter->pdev->dev,
2038                         "INTx request IRQ failed - err %d\n", status);
2039                 return status;
2040         }
2041 done:
2042         adapter->isr_registered = true;
2043         return 0;
2044 }
2045
2046 static void be_irq_unregister(struct be_adapter *adapter)
2047 {
2048         struct net_device *netdev = adapter->netdev;
2049         struct be_rx_obj *rxo;
2050         int i;
2051
2052         if (!adapter->isr_registered)
2053                 return;
2054
2055         /* INTx */
2056         if (!adapter->msix_enabled) {
2057                 free_irq(netdev->irq, adapter);
2058                 goto done;
2059         }
2060
2061         /* MSIx */
2062         be_free_irq(adapter, &adapter->tx_eq, adapter);
2063
2064         for_all_rx_queues(adapter, rxo, i)
2065                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2066
2067 done:
2068         adapter->isr_registered = false;
2069 }
2070
2071 static int be_close(struct net_device *netdev)
2072 {
2073         struct be_adapter *adapter = netdev_priv(netdev);
2074         struct be_rx_obj *rxo;
2075         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2076         int vec, i;
2077
2078         be_async_mcc_disable(adapter);
2079
2080         netif_carrier_off(netdev);
2081         adapter->link_up = false;
2082
2083         if (!lancer_chip(adapter))
2084                 be_intr_set(adapter, false);
2085
2086         for_all_rx_queues(adapter, rxo, i)
2087                 napi_disable(&rxo->rx_eq.napi);
2088
2089         napi_disable(&tx_eq->napi);
2090
2091         if (lancer_chip(adapter)) {
2092                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2093                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2094                 for_all_rx_queues(adapter, rxo, i)
2095                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2096         }
2097
2098         if (adapter->msix_enabled) {
2099                 vec = be_msix_vec_get(adapter, tx_eq);
2100                 synchronize_irq(vec);
2101
2102                 for_all_rx_queues(adapter, rxo, i) {
2103                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2104                         synchronize_irq(vec);
2105                 }
2106         } else {
2107                 synchronize_irq(netdev->irq);
2108         }
2109         be_irq_unregister(adapter);
2110
2111         /* Wait for all pending tx completions to arrive so that
2112          * all tx skbs are freed.
2113          */
2114         be_tx_compl_clean(adapter);
2115
2116         return 0;
2117 }
2118
2119 static int be_open(struct net_device *netdev)
2120 {
2121         struct be_adapter *adapter = netdev_priv(netdev);
2122         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2123         struct be_rx_obj *rxo;
2124         bool link_up;
2125         int status, i;
2126         u8 mac_speed;
2127         u16 link_speed;
2128
2129         for_all_rx_queues(adapter, rxo, i) {
2130                 be_post_rx_frags(rxo, GFP_KERNEL);
2131                 napi_enable(&rxo->rx_eq.napi);
2132         }
2133         napi_enable(&tx_eq->napi);
2134
2135         be_irq_register(adapter);
2136
2137         if (!lancer_chip(adapter))
2138                 be_intr_set(adapter, true);
2139
2140         /* The evt queues are created in unarmed state; arm them */
2141         for_all_rx_queues(adapter, rxo, i) {
2142                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2143                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2144         }
2145         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2146
2147         /* Now that interrupts are on we can process async mcc */
2148         be_async_mcc_enable(adapter);
2149
2150         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2151                         &link_speed);
2152         if (status)
2153                 goto err;
2154         be_link_status_update(adapter, link_up);
2155
2156         if (be_physfn(adapter)) {
2157                 status = be_vid_config(adapter, false, 0);
2158                 if (status)
2159                         goto err;
2160
2161                 status = be_cmd_set_flow_control(adapter,
2162                                 adapter->tx_fc, adapter->rx_fc);
2163                 if (status)
2164                         goto err;
2165         }
2166
2167         return 0;
2168 err:
2169         be_close(adapter->netdev);
2170         return -EIO;
2171 }
2172
2173 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2174 {
2175         struct be_dma_mem cmd;
2176         int status = 0;
2177         u8 mac[ETH_ALEN];
2178
2179         memset(mac, 0, ETH_ALEN);
2180
2181         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2182         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2183                                     GFP_KERNEL);
2184         if (cmd.va == NULL)
2185                 return -1;
2186         memset(cmd.va, 0, cmd.size);
2187
2188         if (enable) {
2189                 status = pci_write_config_dword(adapter->pdev,
2190                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2191                 if (status) {
2192                         dev_err(&adapter->pdev->dev,
2193                                 "Could not enable Wake-on-lan\n");
2194                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2195                                           cmd.dma);
2196                         return status;
2197                 }
2198                 status = be_cmd_enable_magic_wol(adapter,
2199                                 adapter->netdev->dev_addr, &cmd);
2200                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2201                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2202         } else {
2203                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2204                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2205                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2206         }
2207
2208         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2209         return status;
2210 }
2211
2212 /*
2213  * Generate a seed MAC address from the PF MAC Address using jhash.
2214  * MAC Address for VFs are assigned incrementally starting from the seed.
2215  * These addresses are programmed in the ASIC by the PF and the VF driver
2216  * queries for the MAC address during its probe.
2217  */
2218 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2219 {
2220         u32 vf = 0;
2221         int status = 0;
2222         u8 mac[ETH_ALEN];
2223
2224         be_vf_eth_addr_generate(adapter, mac);
2225
2226         for (vf = 0; vf < num_vfs; vf++) {
2227                 status = be_cmd_pmac_add(adapter, mac,
2228                                         adapter->vf_cfg[vf].vf_if_handle,
2229                                         &adapter->vf_cfg[vf].vf_pmac_id,
2230                                         vf + 1);
2231                 if (status)
2232                         dev_err(&adapter->pdev->dev,
2233                                 "Mac address add failed for VF %d\n", vf);
2234                 else
2235                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2236
2237                 mac[5] += 1;
2238         }
2239         return status;
2240 }
2241
2242 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2243 {
2244         u32 vf;
2245
2246         for (vf = 0; vf < num_vfs; vf++) {
2247                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248                         be_cmd_pmac_del(adapter,
2249                                         adapter->vf_cfg[vf].vf_if_handle,
2250                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2251         }
2252 }
2253
2254 static int be_setup(struct be_adapter *adapter)
2255 {
2256         struct net_device *netdev = adapter->netdev;
2257         u32 cap_flags, en_flags, vf = 0;
2258         int status;
2259         u8 mac[ETH_ALEN];
2260
2261         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2262                                 BE_IF_FLAGS_BROADCAST |
2263                                 BE_IF_FLAGS_MULTICAST;
2264
2265         if (be_physfn(adapter)) {
2266                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2267                                 BE_IF_FLAGS_PROMISCUOUS |
2268                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2269                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2270
2271                 if (be_multi_rxq(adapter)) {
2272                         cap_flags |= BE_IF_FLAGS_RSS;
2273                         en_flags |= BE_IF_FLAGS_RSS;
2274                 }
2275         }
2276
2277         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2278                         netdev->dev_addr, false/* pmac_invalid */,
2279                         &adapter->if_handle, &adapter->pmac_id, 0);
2280         if (status != 0)
2281                 goto do_none;
2282
2283         if (be_physfn(adapter)) {
2284                 if (adapter->sriov_enabled) {
2285                         while (vf < num_vfs) {
2286                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2287                                                         BE_IF_FLAGS_BROADCAST;
2288                                 status = be_cmd_if_create(adapter, cap_flags,
2289                                         en_flags, mac, true,
2290                                         &adapter->vf_cfg[vf].vf_if_handle,
2291                                         NULL, vf+1);
2292                                 if (status) {
2293                                         dev_err(&adapter->pdev->dev,
2294                                         "Interface Create failed for VF %d\n",
2295                                         vf);
2296                                         goto if_destroy;
2297                                 }
2298                                 adapter->vf_cfg[vf].vf_pmac_id =
2299                                                         BE_INVALID_PMAC_ID;
2300                                 vf++;
2301                         }
2302                 }
2303         } else {
2304                 status = be_cmd_mac_addr_query(adapter, mac,
2305                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2306                 if (!status) {
2307                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2308                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2309                 }
2310         }
2311
2312         status = be_tx_queues_create(adapter);
2313         if (status != 0)
2314                 goto if_destroy;
2315
2316         status = be_rx_queues_create(adapter);
2317         if (status != 0)
2318                 goto tx_qs_destroy;
2319
2320         status = be_mcc_queues_create(adapter);
2321         if (status != 0)
2322                 goto rx_qs_destroy;
2323
2324         adapter->link_speed = -1;
2325
2326         return 0;
2327
2328         be_mcc_queues_destroy(adapter);
2329 rx_qs_destroy:
2330         be_rx_queues_destroy(adapter);
2331 tx_qs_destroy:
2332         be_tx_queues_destroy(adapter);
2333 if_destroy:
2334         if (be_physfn(adapter) && adapter->sriov_enabled)
2335                 for (vf = 0; vf < num_vfs; vf++)
2336                         if (adapter->vf_cfg[vf].vf_if_handle)
2337                                 be_cmd_if_destroy(adapter,
2338                                         adapter->vf_cfg[vf].vf_if_handle,
2339                                         vf + 1);
2340         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2341 do_none:
2342         return status;
2343 }
2344
2345 static int be_clear(struct be_adapter *adapter)
2346 {
2347         int vf;
2348
2349         if (be_physfn(adapter) && adapter->sriov_enabled)
2350                 be_vf_eth_addr_rem(adapter);
2351
2352         be_mcc_queues_destroy(adapter);
2353         be_rx_queues_destroy(adapter);
2354         be_tx_queues_destroy(adapter);
2355         adapter->eq_next_idx = 0;
2356
2357         if (be_physfn(adapter) && adapter->sriov_enabled)
2358                 for (vf = 0; vf < num_vfs; vf++)
2359                         if (adapter->vf_cfg[vf].vf_if_handle)
2360                                 be_cmd_if_destroy(adapter,
2361                                         adapter->vf_cfg[vf].vf_if_handle,
2362                                         vf + 1);
2363
2364         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2365
2366         /* tell fw we're done with firing cmds */
2367         be_cmd_fw_clean(adapter);
2368         return 0;
2369 }
2370
2371
2372 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2373 static bool be_flash_redboot(struct be_adapter *adapter,
2374                         const u8 *p, u32 img_start, int image_size,
2375                         int hdr_size)
2376 {
2377         u32 crc_offset;
2378         u8 flashed_crc[4];
2379         int status;
2380
2381         crc_offset = hdr_size + img_start + image_size - 4;
2382
2383         p += crc_offset;
2384
2385         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2386                         (image_size - 4));
2387         if (status) {
2388                 dev_err(&adapter->pdev->dev,
2389                 "could not get crc from flash, not flashing redboot\n");
2390                 return false;
2391         }
2392
2393         /*update redboot only if crc does not match*/
2394         if (!memcmp(flashed_crc, p, 4))
2395                 return false;
2396         else
2397                 return true;
2398 }
2399
2400 static int be_flash_data(struct be_adapter *adapter,
2401                         const struct firmware *fw,
2402                         struct be_dma_mem *flash_cmd, int num_of_images)
2403
2404 {
2405         int status = 0, i, filehdr_size = 0;
2406         u32 total_bytes = 0, flash_op;
2407         int num_bytes;
2408         const u8 *p = fw->data;
2409         struct be_cmd_write_flashrom *req = flash_cmd->va;
2410         const struct flash_comp *pflashcomp;
2411         int num_comp;
2412
2413         static const struct flash_comp gen3_flash_types[9] = {
2414                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2415                         FLASH_IMAGE_MAX_SIZE_g3},
2416                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2417                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2418                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2419                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2420                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2421                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2422                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2423                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2424                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2425                         FLASH_IMAGE_MAX_SIZE_g3},
2426                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2427                         FLASH_IMAGE_MAX_SIZE_g3},
2428                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2429                         FLASH_IMAGE_MAX_SIZE_g3},
2430                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2431                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2432         };
2433         static const struct flash_comp gen2_flash_types[8] = {
2434                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2435                         FLASH_IMAGE_MAX_SIZE_g2},
2436                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2437                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2438                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2439                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2440                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2441                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2442                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2443                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2444                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2445                         FLASH_IMAGE_MAX_SIZE_g2},
2446                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2447                         FLASH_IMAGE_MAX_SIZE_g2},
2448                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2449                          FLASH_IMAGE_MAX_SIZE_g2}
2450         };
2451
2452         if (adapter->generation == BE_GEN3) {
2453                 pflashcomp = gen3_flash_types;
2454                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2455                 num_comp = ARRAY_SIZE(gen3_flash_types);
2456         } else {
2457                 pflashcomp = gen2_flash_types;
2458                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2459                 num_comp = ARRAY_SIZE(gen2_flash_types);
2460         }
2461         for (i = 0; i < num_comp; i++) {
2462                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2463                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2464                         continue;
2465                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2466                         (!be_flash_redboot(adapter, fw->data,
2467                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2468                         (num_of_images * sizeof(struct image_hdr)))))
2469                         continue;
2470                 p = fw->data;
2471                 p += filehdr_size + pflashcomp[i].offset
2472                         + (num_of_images * sizeof(struct image_hdr));
2473         if (p + pflashcomp[i].size > fw->data + fw->size)
2474                 return -1;
2475         total_bytes = pflashcomp[i].size;
2476                 while (total_bytes) {
2477                         if (total_bytes > 32*1024)
2478                                 num_bytes = 32*1024;
2479                         else
2480                                 num_bytes = total_bytes;
2481                         total_bytes -= num_bytes;
2482
2483                         if (!total_bytes)
2484                                 flash_op = FLASHROM_OPER_FLASH;
2485                         else
2486                                 flash_op = FLASHROM_OPER_SAVE;
2487                         memcpy(req->params.data_buf, p, num_bytes);
2488                         p += num_bytes;
2489                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2490                                 pflashcomp[i].optype, flash_op, num_bytes);
2491                         if (status) {
2492                                 dev_err(&adapter->pdev->dev,
2493                                         "cmd to write to flash rom failed.\n");
2494                                 return -1;
2495                         }
2496                         yield();
2497                 }
2498         }
2499         return 0;
2500 }
2501
2502 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2503 {
2504         if (fhdr == NULL)
2505                 return 0;
2506         if (fhdr->build[0] == '3')
2507                 return BE_GEN3;
2508         else if (fhdr->build[0] == '2')
2509                 return BE_GEN2;
2510         else
2511                 return 0;
2512 }
2513
2514 int be_load_fw(struct be_adapter *adapter, u8 *func)
2515 {
2516         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2517         const struct firmware *fw;
2518         struct flash_file_hdr_g2 *fhdr;
2519         struct flash_file_hdr_g3 *fhdr3;
2520         struct image_hdr *img_hdr_ptr = NULL;
2521         struct be_dma_mem flash_cmd;
2522         int status, i = 0, num_imgs = 0;
2523         const u8 *p;
2524
2525         if (!netif_running(adapter->netdev)) {
2526                 dev_err(&adapter->pdev->dev,
2527                         "Firmware load not allowed (interface is down)\n");
2528                 return -EPERM;
2529         }
2530
2531         strcpy(fw_file, func);
2532
2533         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2534         if (status)
2535                 goto fw_exit;
2536
2537         p = fw->data;
2538         fhdr = (struct flash_file_hdr_g2 *) p;
2539         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2540
2541         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2542         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2543                                           &flash_cmd.dma, GFP_KERNEL);
2544         if (!flash_cmd.va) {
2545                 status = -ENOMEM;
2546                 dev_err(&adapter->pdev->dev,
2547                         "Memory allocation failure while flashing\n");
2548                 goto fw_exit;
2549         }
2550
2551         if ((adapter->generation == BE_GEN3) &&
2552                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2553                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2554                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2555                 for (i = 0; i < num_imgs; i++) {
2556                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2557                                         (sizeof(struct flash_file_hdr_g3) +
2558                                          i * sizeof(struct image_hdr)));
2559                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2560                                 status = be_flash_data(adapter, fw, &flash_cmd,
2561                                                         num_imgs);
2562                 }
2563         } else if ((adapter->generation == BE_GEN2) &&
2564                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2565                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2566         } else {
2567                 dev_err(&adapter->pdev->dev,
2568                         "UFI and Interface are not compatible for flashing\n");
2569                 status = -1;
2570         }
2571
2572         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2573                           flash_cmd.dma);
2574         if (status) {
2575                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2576                 goto fw_exit;
2577         }
2578
2579         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2580
2581 fw_exit:
2582         release_firmware(fw);
2583         return status;
2584 }
2585
2586 static struct net_device_ops be_netdev_ops = {
2587         .ndo_open               = be_open,
2588         .ndo_stop               = be_close,
2589         .ndo_start_xmit         = be_xmit,
2590         .ndo_set_rx_mode        = be_set_multicast_list,
2591         .ndo_set_mac_address    = be_mac_addr_set,
2592         .ndo_change_mtu         = be_change_mtu,
2593         .ndo_validate_addr      = eth_validate_addr,
2594         .ndo_vlan_rx_register   = be_vlan_register,
2595         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2596         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2597         .ndo_set_vf_mac         = be_set_vf_mac,
2598         .ndo_set_vf_vlan        = be_set_vf_vlan,
2599         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2600         .ndo_get_vf_config      = be_get_vf_config
2601 };
2602
2603 static void be_netdev_init(struct net_device *netdev)
2604 {
2605         struct be_adapter *adapter = netdev_priv(netdev);
2606         struct be_rx_obj *rxo;
2607         int i;
2608
2609         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2610                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2611                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2612                 NETIF_F_GRO | NETIF_F_TSO6;
2613
2614         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2615                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2616
2617         if (lancer_chip(adapter))
2618                 netdev->vlan_features |= NETIF_F_TSO6;
2619
2620         netdev->flags |= IFF_MULTICAST;
2621
2622         adapter->rx_csum = true;
2623
2624         /* Default settings for Rx and Tx flow control */
2625         adapter->rx_fc = true;
2626         adapter->tx_fc = true;
2627
2628         netif_set_gso_max_size(netdev, 65535);
2629
2630         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2631
2632         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2633
2634         for_all_rx_queues(adapter, rxo, i)
2635                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2636                                 BE_NAPI_WEIGHT);
2637
2638         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2639                 BE_NAPI_WEIGHT);
2640 }
2641
2642 static void be_unmap_pci_bars(struct be_adapter *adapter)
2643 {
2644         if (adapter->csr)
2645                 iounmap(adapter->csr);
2646         if (adapter->db)
2647                 iounmap(adapter->db);
2648         if (adapter->pcicfg && be_physfn(adapter))
2649                 iounmap(adapter->pcicfg);
2650 }
2651
2652 static int be_map_pci_bars(struct be_adapter *adapter)
2653 {
2654         u8 __iomem *addr;
2655         int pcicfg_reg, db_reg;
2656
2657         if (lancer_chip(adapter)) {
2658                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2659                         pci_resource_len(adapter->pdev, 0));
2660                 if (addr == NULL)
2661                         return -ENOMEM;
2662                 adapter->db = addr;
2663                 return 0;
2664         }
2665
2666         if (be_physfn(adapter)) {
2667                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2668                                 pci_resource_len(adapter->pdev, 2));
2669                 if (addr == NULL)
2670                         return -ENOMEM;
2671                 adapter->csr = addr;
2672         }
2673
2674         if (adapter->generation == BE_GEN2) {
2675                 pcicfg_reg = 1;
2676                 db_reg = 4;
2677         } else {
2678                 pcicfg_reg = 0;
2679                 if (be_physfn(adapter))
2680                         db_reg = 4;
2681                 else
2682                         db_reg = 0;
2683         }
2684         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2685                                 pci_resource_len(adapter->pdev, db_reg));
2686         if (addr == NULL)
2687                 goto pci_map_err;
2688         adapter->db = addr;
2689
2690         if (be_physfn(adapter)) {
2691                 addr = ioremap_nocache(
2692                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2693                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2694                 if (addr == NULL)
2695                         goto pci_map_err;
2696                 adapter->pcicfg = addr;
2697         } else
2698                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2699
2700         return 0;
2701 pci_map_err:
2702         be_unmap_pci_bars(adapter);
2703         return -ENOMEM;
2704 }
2705
2706
2707 static void be_ctrl_cleanup(struct be_adapter *adapter)
2708 {
2709         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2710
2711         be_unmap_pci_bars(adapter);
2712
2713         if (mem->va)
2714                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2715                                   mem->dma);
2716
2717         mem = &adapter->mc_cmd_mem;
2718         if (mem->va)
2719                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2720                                   mem->dma);
2721 }
2722
2723 static int be_ctrl_init(struct be_adapter *adapter)
2724 {
2725         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2726         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2727         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2728         int status;
2729
2730         status = be_map_pci_bars(adapter);
2731         if (status)
2732                 goto done;
2733
2734         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2735         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2736                                                 mbox_mem_alloc->size,
2737                                                 &mbox_mem_alloc->dma,
2738                                                 GFP_KERNEL);
2739         if (!mbox_mem_alloc->va) {
2740                 status = -ENOMEM;
2741                 goto unmap_pci_bars;
2742         }
2743
2744         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2745         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2746         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2747         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2748
2749         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2750         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2751                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2752                                             GFP_KERNEL);
2753         if (mc_cmd_mem->va == NULL) {
2754                 status = -ENOMEM;
2755                 goto free_mbox;
2756         }
2757         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2758
2759         mutex_init(&adapter->mbox_lock);
2760         spin_lock_init(&adapter->mcc_lock);
2761         spin_lock_init(&adapter->mcc_cq_lock);
2762
2763         init_completion(&adapter->flash_compl);
2764         pci_save_state(adapter->pdev);
2765         return 0;
2766
2767 free_mbox:
2768         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2769                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2770
2771 unmap_pci_bars:
2772         be_unmap_pci_bars(adapter);
2773
2774 done:
2775         return status;
2776 }
2777
2778 static void be_stats_cleanup(struct be_adapter *adapter)
2779 {
2780         struct be_dma_mem *cmd = &adapter->stats_cmd;
2781
2782         if (cmd->va)
2783                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2784                                   cmd->va, cmd->dma);
2785 }
2786
2787 static int be_stats_init(struct be_adapter *adapter)
2788 {
2789         struct be_dma_mem *cmd = &adapter->stats_cmd;
2790
2791         cmd->size = sizeof(struct be_cmd_req_get_stats);
2792         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2793                                      GFP_KERNEL);
2794         if (cmd->va == NULL)
2795                 return -1;
2796         memset(cmd->va, 0, cmd->size);
2797         return 0;
2798 }
2799
2800 static void __devexit be_remove(struct pci_dev *pdev)
2801 {
2802         struct be_adapter *adapter = pci_get_drvdata(pdev);
2803
2804         if (!adapter)
2805                 return;
2806
2807         cancel_delayed_work_sync(&adapter->work);
2808
2809         unregister_netdev(adapter->netdev);
2810
2811         be_clear(adapter);
2812
2813         be_stats_cleanup(adapter);
2814
2815         be_ctrl_cleanup(adapter);
2816
2817         be_sriov_disable(adapter);
2818
2819         be_msix_disable(adapter);
2820
2821         pci_set_drvdata(pdev, NULL);
2822         pci_release_regions(pdev);
2823         pci_disable_device(pdev);
2824
2825         free_netdev(adapter->netdev);
2826 }
2827
2828 static int be_get_config(struct be_adapter *adapter)
2829 {
2830         int status;
2831         u8 mac[ETH_ALEN];
2832
2833         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2834         if (status)
2835                 return status;
2836
2837         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2838                         &adapter->function_mode, &adapter->function_caps);
2839         if (status)
2840                 return status;
2841
2842         memset(mac, 0, ETH_ALEN);
2843
2844         if (be_physfn(adapter)) {
2845                 status = be_cmd_mac_addr_query(adapter, mac,
2846                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2847
2848                 if (status)
2849                         return status;
2850
2851                 if (!is_valid_ether_addr(mac))
2852                         return -EADDRNOTAVAIL;
2853
2854                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2855                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2856         }
2857
2858         if (adapter->function_mode & 0x400)
2859                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2860         else
2861                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2862
2863         status = be_cmd_get_cntl_attributes(adapter);
2864         if (status)
2865                 return status;
2866
2867         be_cmd_check_native_mode(adapter);
2868         return 0;
2869 }
2870
2871 static int be_dev_family_check(struct be_adapter *adapter)
2872 {
2873         struct pci_dev *pdev = adapter->pdev;
2874         u32 sli_intf = 0, if_type;
2875
2876         switch (pdev->device) {
2877         case BE_DEVICE_ID1:
2878         case OC_DEVICE_ID1:
2879                 adapter->generation = BE_GEN2;
2880                 break;
2881         case BE_DEVICE_ID2:
2882         case OC_DEVICE_ID2:
2883                 adapter->generation = BE_GEN3;
2884                 break;
2885         case OC_DEVICE_ID3:
2886                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2887                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2888                                                 SLI_INTF_IF_TYPE_SHIFT;
2889
2890                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2891                         if_type != 0x02) {
2892                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2893                         return -EINVAL;
2894                 }
2895                 if (num_vfs > 0) {
2896                         dev_err(&pdev->dev, "VFs not supported\n");
2897                         return -EINVAL;
2898                 }
2899                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2900                                          SLI_INTF_FAMILY_SHIFT);
2901                 adapter->generation = BE_GEN3;
2902                 break;
2903         default:
2904                 adapter->generation = 0;
2905         }
2906         return 0;
2907 }
2908
2909 static int lancer_wait_ready(struct be_adapter *adapter)
2910 {
2911 #define SLIPORT_READY_TIMEOUT 500
2912         u32 sliport_status;
2913         int status = 0, i;
2914
2915         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2916                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2917                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2918                         break;
2919
2920                 msleep(20);
2921         }
2922
2923         if (i == SLIPORT_READY_TIMEOUT)
2924                 status = -1;
2925
2926         return status;
2927 }
2928
2929 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2930 {
2931         int status;
2932         u32 sliport_status, err, reset_needed;
2933         status = lancer_wait_ready(adapter);
2934         if (!status) {
2935                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2936                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2937                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2938                 if (err && reset_needed) {
2939                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2940                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2941
2942                         /* check adapter has corrected the error */
2943                         status = lancer_wait_ready(adapter);
2944                         sliport_status = ioread32(adapter->db +
2945                                                         SLIPORT_STATUS_OFFSET);
2946                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2947                                                 SLIPORT_STATUS_RN_MASK);
2948                         if (status || sliport_status)
2949                                 status = -1;
2950                 } else if (err || reset_needed) {
2951                         status = -1;
2952                 }
2953         }
2954         return status;
2955 }
2956
2957 static int __devinit be_probe(struct pci_dev *pdev,
2958                         const struct pci_device_id *pdev_id)
2959 {
2960         int status = 0;
2961         struct be_adapter *adapter;
2962         struct net_device *netdev;
2963
2964         status = pci_enable_device(pdev);
2965         if (status)
2966                 goto do_none;
2967
2968         status = pci_request_regions(pdev, DRV_NAME);
2969         if (status)
2970                 goto disable_dev;
2971         pci_set_master(pdev);
2972
2973         netdev = alloc_etherdev(sizeof(struct be_adapter));
2974         if (netdev == NULL) {
2975                 status = -ENOMEM;
2976                 goto rel_reg;
2977         }
2978         adapter = netdev_priv(netdev);
2979         adapter->pdev = pdev;
2980         pci_set_drvdata(pdev, adapter);
2981
2982         status = be_dev_family_check(adapter);
2983         if (status)
2984                 goto free_netdev;
2985
2986         adapter->netdev = netdev;
2987         SET_NETDEV_DEV(netdev, &pdev->dev);
2988
2989         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2990         if (!status) {
2991                 netdev->features |= NETIF_F_HIGHDMA;
2992         } else {
2993                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2994                 if (status) {
2995                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2996                         goto free_netdev;
2997                 }
2998         }
2999
3000         be_sriov_enable(adapter);
3001
3002         status = be_ctrl_init(adapter);
3003         if (status)
3004                 goto free_netdev;
3005
3006         if (lancer_chip(adapter)) {
3007                 status = lancer_test_and_set_rdy_state(adapter);
3008                 if (status) {
3009                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3010                         goto free_netdev;
3011                 }
3012         }
3013
3014         /* sync up with fw's ready state */
3015         if (be_physfn(adapter)) {
3016                 status = be_cmd_POST(adapter);
3017                 if (status)
3018                         goto ctrl_clean;
3019         }
3020
3021         /* tell fw we're ready to fire cmds */
3022         status = be_cmd_fw_init(adapter);
3023         if (status)
3024                 goto ctrl_clean;
3025
3026         status = be_cmd_reset_function(adapter);
3027         if (status)
3028                 goto ctrl_clean;
3029
3030         status = be_stats_init(adapter);
3031         if (status)
3032                 goto ctrl_clean;
3033
3034         status = be_get_config(adapter);
3035         if (status)
3036                 goto stats_clean;
3037
3038         be_msix_enable(adapter);
3039
3040         INIT_DELAYED_WORK(&adapter->work, be_worker);
3041
3042         status = be_setup(adapter);
3043         if (status)
3044                 goto msix_disable;
3045
3046         be_netdev_init(netdev);
3047         status = register_netdev(netdev);
3048         if (status != 0)
3049                 goto unsetup;
3050         netif_carrier_off(netdev);
3051
3052         if (be_physfn(adapter) && adapter->sriov_enabled) {
3053                 status = be_vf_eth_addr_config(adapter);
3054                 if (status)
3055                         goto unreg_netdev;
3056         }
3057
3058         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3059         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3060         return 0;
3061
3062 unreg_netdev:
3063         unregister_netdev(netdev);
3064 unsetup:
3065         be_clear(adapter);
3066 msix_disable:
3067         be_msix_disable(adapter);
3068 stats_clean:
3069         be_stats_cleanup(adapter);
3070 ctrl_clean:
3071         be_ctrl_cleanup(adapter);
3072 free_netdev:
3073         be_sriov_disable(adapter);
3074         free_netdev(netdev);
3075         pci_set_drvdata(pdev, NULL);
3076 rel_reg:
3077         pci_release_regions(pdev);
3078 disable_dev:
3079         pci_disable_device(pdev);
3080 do_none:
3081         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3082         return status;
3083 }
3084
3085 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3086 {
3087         struct be_adapter *adapter = pci_get_drvdata(pdev);
3088         struct net_device *netdev =  adapter->netdev;
3089
3090         cancel_delayed_work_sync(&adapter->work);
3091         if (adapter->wol)
3092                 be_setup_wol(adapter, true);
3093
3094         netif_device_detach(netdev);
3095         if (netif_running(netdev)) {
3096                 rtnl_lock();
3097                 be_close(netdev);
3098                 rtnl_unlock();
3099         }
3100         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3101         be_clear(adapter);
3102
3103         be_msix_disable(adapter);
3104         pci_save_state(pdev);
3105         pci_disable_device(pdev);
3106         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3107         return 0;
3108 }
3109
3110 static int be_resume(struct pci_dev *pdev)
3111 {
3112         int status = 0;
3113         struct be_adapter *adapter = pci_get_drvdata(pdev);
3114         struct net_device *netdev =  adapter->netdev;
3115
3116         netif_device_detach(netdev);
3117
3118         status = pci_enable_device(pdev);
3119         if (status)
3120                 return status;
3121
3122         pci_set_power_state(pdev, 0);
3123         pci_restore_state(pdev);
3124
3125         be_msix_enable(adapter);
3126         /* tell fw we're ready to fire cmds */
3127         status = be_cmd_fw_init(adapter);
3128         if (status)
3129                 return status;
3130
3131         be_setup(adapter);
3132         if (netif_running(netdev)) {
3133                 rtnl_lock();
3134                 be_open(netdev);
3135                 rtnl_unlock();
3136         }
3137         netif_device_attach(netdev);
3138
3139         if (adapter->wol)
3140                 be_setup_wol(adapter, false);
3141
3142         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3143         return 0;
3144 }
3145
3146 /*
3147  * An FLR will stop BE from DMAing any data.
3148  */
3149 static void be_shutdown(struct pci_dev *pdev)
3150 {
3151         struct be_adapter *adapter = pci_get_drvdata(pdev);
3152
3153         if (!adapter)
3154                 return;
3155
3156         if (netif_running(adapter->netdev))
3157                 cancel_delayed_work_sync(&adapter->work);
3158
3159         netif_device_detach(adapter->netdev);
3160
3161         be_cmd_reset_function(adapter);
3162
3163         if (adapter->wol)
3164                 be_setup_wol(adapter, true);
3165
3166         pci_disable_device(pdev);
3167 }
3168
3169 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3170                                 pci_channel_state_t state)
3171 {
3172         struct be_adapter *adapter = pci_get_drvdata(pdev);
3173         struct net_device *netdev =  adapter->netdev;
3174
3175         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3176
3177         adapter->eeh_err = true;
3178
3179         netif_device_detach(netdev);
3180
3181         if (netif_running(netdev)) {
3182                 rtnl_lock();
3183                 be_close(netdev);
3184                 rtnl_unlock();
3185         }
3186         be_clear(adapter);
3187
3188         if (state == pci_channel_io_perm_failure)
3189                 return PCI_ERS_RESULT_DISCONNECT;
3190
3191         pci_disable_device(pdev);
3192
3193         return PCI_ERS_RESULT_NEED_RESET;
3194 }
3195
3196 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3197 {
3198         struct be_adapter *adapter = pci_get_drvdata(pdev);
3199         int status;
3200
3201         dev_info(&adapter->pdev->dev, "EEH reset\n");
3202         adapter->eeh_err = false;
3203
3204         status = pci_enable_device(pdev);
3205         if (status)
3206                 return PCI_ERS_RESULT_DISCONNECT;
3207
3208         pci_set_master(pdev);
3209         pci_set_power_state(pdev, 0);
3210         pci_restore_state(pdev);
3211
3212         /* Check if card is ok and fw is ready */
3213         status = be_cmd_POST(adapter);
3214         if (status)
3215                 return PCI_ERS_RESULT_DISCONNECT;
3216
3217         return PCI_ERS_RESULT_RECOVERED;
3218 }
3219
3220 static void be_eeh_resume(struct pci_dev *pdev)
3221 {
3222         int status = 0;
3223         struct be_adapter *adapter = pci_get_drvdata(pdev);
3224         struct net_device *netdev =  adapter->netdev;
3225
3226         dev_info(&adapter->pdev->dev, "EEH resume\n");
3227
3228         pci_save_state(pdev);
3229
3230         /* tell fw we're ready to fire cmds */
3231         status = be_cmd_fw_init(adapter);
3232         if (status)
3233                 goto err;
3234
3235         status = be_setup(adapter);
3236         if (status)
3237                 goto err;
3238
3239         if (netif_running(netdev)) {
3240                 status = be_open(netdev);
3241                 if (status)
3242                         goto err;
3243         }
3244         netif_device_attach(netdev);
3245         return;
3246 err:
3247         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3248 }
3249
3250 static struct pci_error_handlers be_eeh_handlers = {
3251         .error_detected = be_eeh_err_detected,
3252         .slot_reset = be_eeh_reset,
3253         .resume = be_eeh_resume,
3254 };
3255
3256 static struct pci_driver be_driver = {
3257         .name = DRV_NAME,
3258         .id_table = be_dev_ids,
3259         .probe = be_probe,
3260         .remove = be_remove,
3261         .suspend = be_suspend,
3262         .resume = be_resume,
3263         .shutdown = be_shutdown,
3264         .err_handler = &be_eeh_handlers
3265 };
3266
3267 static int __init be_init_module(void)
3268 {
3269         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3270             rx_frag_size != 2048) {
3271                 printk(KERN_WARNING DRV_NAME
3272                         " : Module param rx_frag_size must be 2048/4096/8192."
3273                         " Using 2048\n");
3274                 rx_frag_size = 2048;
3275         }
3276
3277         if (num_vfs > 32) {
3278                 printk(KERN_WARNING DRV_NAME
3279                         " : Module param num_vfs must not be greater than 32."
3280                         "Using 32\n");
3281                 num_vfs = 32;
3282         }
3283
3284         return pci_register_driver(&be_driver);
3285 }
3286 module_init(be_init_module);
3287
3288 static void __exit be_exit_module(void)
3289 {
3290         pci_unregister_driver(&be_driver);
3291 }
3292 module_exit(be_exit_module);