7cb5a114c7338af0c584607c7bcd1e33dba4c0e2
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscuous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 struct be_rx_compl_info *rxcp)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += rxcp->num_rcvd;
860         stats->rx_bytes += rxcp->pkt_size;
861         stats->rx_pkts++;
862         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864         if (rxcp->err)
865                 stats->rxcp_err++;
866 }
867
868 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
869 {
870         /* L4 checksum is not reliable for non TCP/UDP packets.
871          * Also ignore ipcksm for ipv6 pkts */
872         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
873                                 (rxcp->ip_csum || rxcp->ipv6);
874 }
875
876 static struct be_rx_page_info *
877 get_rx_page_info(struct be_adapter *adapter,
878                 struct be_rx_obj *rxo,
879                 u16 frag_idx)
880 {
881         struct be_rx_page_info *rx_page_info;
882         struct be_queue_info *rxq = &rxo->q;
883
884         rx_page_info = &rxo->page_info_tbl[frag_idx];
885         BUG_ON(!rx_page_info->page);
886
887         if (rx_page_info->last_page_user) {
888                 dma_unmap_page(&adapter->pdev->dev,
889                                dma_unmap_addr(rx_page_info, bus),
890                                adapter->big_page_size, DMA_FROM_DEVICE);
891                 rx_page_info->last_page_user = false;
892         }
893
894         atomic_dec(&rxq->used);
895         return rx_page_info;
896 }
897
898 /* Throwaway the data in the Rx completion */
899 static void be_rx_compl_discard(struct be_adapter *adapter,
900                 struct be_rx_obj *rxo,
901                 struct be_rx_compl_info *rxcp)
902 {
903         struct be_queue_info *rxq = &rxo->q;
904         struct be_rx_page_info *page_info;
905         u16 i, num_rcvd = rxcp->num_rcvd;
906
907         for (i = 0; i < num_rcvd; i++) {
908                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
909                 put_page(page_info->page);
910                 memset(page_info, 0, sizeof(*page_info));
911                 index_inc(&rxcp->rxq_idx, rxq->len);
912         }
913 }
914
915 /*
916  * skb_fill_rx_data forms a complete skb for an ether frame
917  * indicated by rxcp.
918  */
919 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
920                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
921 {
922         struct be_queue_info *rxq = &rxo->q;
923         struct be_rx_page_info *page_info;
924         u16 i, j;
925         u16 hdr_len, curr_frag_len, remaining;
926         u8 *start;
927
928         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
929         start = page_address(page_info->page) + page_info->page_offset;
930         prefetch(start);
931
932         /* Copy data in the first descriptor of this completion */
933         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
934
935         /* Copy the header portion into skb_data */
936         hdr_len = min(BE_HDR_LEN, curr_frag_len);
937         memcpy(skb->data, start, hdr_len);
938         skb->len = curr_frag_len;
939         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
940                 /* Complete packet has now been moved to data */
941                 put_page(page_info->page);
942                 skb->data_len = 0;
943                 skb->tail += curr_frag_len;
944         } else {
945                 skb_shinfo(skb)->nr_frags = 1;
946                 skb_shinfo(skb)->frags[0].page = page_info->page;
947                 skb_shinfo(skb)->frags[0].page_offset =
948                                         page_info->page_offset + hdr_len;
949                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
950                 skb->data_len = curr_frag_len - hdr_len;
951                 skb->tail += hdr_len;
952         }
953         page_info->page = NULL;
954
955         if (rxcp->pkt_size <= rx_frag_size) {
956                 BUG_ON(rxcp->num_rcvd != 1);
957                 return;
958         }
959
960         /* More frags present for this completion */
961         index_inc(&rxcp->rxq_idx, rxq->len);
962         remaining = rxcp->pkt_size - curr_frag_len;
963         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
964                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
965                 curr_frag_len = min(remaining, rx_frag_size);
966
967                 /* Coalesce all frags from the same physical page in one slot */
968                 if (page_info->page_offset == 0) {
969                         /* Fresh page */
970                         j++;
971                         skb_shinfo(skb)->frags[j].page = page_info->page;
972                         skb_shinfo(skb)->frags[j].page_offset =
973                                                         page_info->page_offset;
974                         skb_shinfo(skb)->frags[j].size = 0;
975                         skb_shinfo(skb)->nr_frags++;
976                 } else {
977                         put_page(page_info->page);
978                 }
979
980                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
981                 skb->len += curr_frag_len;
982                 skb->data_len += curr_frag_len;
983
984                 remaining -= curr_frag_len;
985                 index_inc(&rxcp->rxq_idx, rxq->len);
986                 page_info->page = NULL;
987         }
988         BUG_ON(j > MAX_SKB_FRAGS);
989 }
990
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993                         struct be_rx_obj *rxo,
994                         struct be_rx_compl_info *rxcp)
995 {
996         struct sk_buff *skb;
997
998         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
999         if (unlikely(!skb)) {
1000                 if (net_ratelimit())
1001                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1002                 be_rx_compl_discard(adapter, rxo, rxcp);
1003                 return;
1004         }
1005
1006         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007
1008         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1009                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010         else
1011                 skb_checksum_none_assert(skb);
1012
1013         skb->truesize = skb->len + sizeof(struct sk_buff);
1014         skb->protocol = eth_type_trans(skb, adapter->netdev);
1015
1016         if (unlikely(rxcp->vlanf)) {
1017                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1018                         kfree_skb(skb);
1019                         return;
1020                 }
1021                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1022         } else {
1023                 netif_receive_skb(skb);
1024         }
1025 }
1026
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029                 struct be_rx_obj *rxo,
1030                 struct be_rx_compl_info *rxcp)
1031 {
1032         struct be_rx_page_info *page_info;
1033         struct sk_buff *skb = NULL;
1034         struct be_queue_info *rxq = &rxo->q;
1035         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1036         u16 remaining, curr_frag_len;
1037         u16 i, j;
1038
1039         skb = napi_get_frags(&eq_obj->napi);
1040         if (!skb) {
1041                 be_rx_compl_discard(adapter, rxo, rxcp);
1042                 return;
1043         }
1044
1045         remaining = rxcp->pkt_size;
1046         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049                 curr_frag_len = min(remaining, rx_frag_size);
1050
1051                 /* Coalesce all frags from the same physical page in one slot */
1052                 if (i == 0 || page_info->page_offset == 0) {
1053                         /* First frag or Fresh page */
1054                         j++;
1055                         skb_shinfo(skb)->frags[j].page = page_info->page;
1056                         skb_shinfo(skb)->frags[j].page_offset =
1057                                                         page_info->page_offset;
1058                         skb_shinfo(skb)->frags[j].size = 0;
1059                 } else {
1060                         put_page(page_info->page);
1061                 }
1062                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064                 remaining -= curr_frag_len;
1065                 index_inc(&rxcp->rxq_idx, rxq->len);
1066                 memset(page_info, 0, sizeof(*page_info));
1067         }
1068         BUG_ON(j > MAX_SKB_FRAGS);
1069
1070         skb_shinfo(skb)->nr_frags = j + 1;
1071         skb->len = rxcp->pkt_size;
1072         skb->data_len = rxcp->pkt_size;
1073         skb->truesize += rxcp->pkt_size;
1074         skb->ip_summed = CHECKSUM_UNNECESSARY;
1075
1076         if (likely(!rxcp->vlanf))
1077                 napi_gro_frags(&eq_obj->napi);
1078         else
1079                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080 }
1081
1082 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083                                 struct be_eth_rx_compl *compl,
1084                                 struct be_rx_compl_info *rxcp)
1085 {
1086         rxcp->pkt_size =
1087                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1092         rxcp->ip_csum =
1093                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094         rxcp->l4_csum =
1095                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096         rxcp->ipv6 =
1097                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098         rxcp->rxq_idx =
1099                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100         rxcp->num_rcvd =
1101                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102         rxcp->pkt_type =
1103                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1105         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
1106 }
1107
1108 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1109                                 struct be_eth_rx_compl *compl,
1110                                 struct be_rx_compl_info *rxcp)
1111 {
1112         rxcp->pkt_size =
1113                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1114         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1115         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1116         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1117         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1118         rxcp->ip_csum =
1119                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1120         rxcp->l4_csum =
1121                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1122         rxcp->ipv6 =
1123                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1124         rxcp->rxq_idx =
1125                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1126         rxcp->num_rcvd =
1127                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1128         rxcp->pkt_type =
1129                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1130         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1131         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
1132 }
1133
1134 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1135 {
1136         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1137         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1138         struct be_adapter *adapter = rxo->adapter;
1139
1140         /* For checking the valid bit it is Ok to use either definition as the
1141          * valid bit is at the same position in both v0 and v1 Rx compl */
1142         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1143                 return NULL;
1144
1145         rmb();
1146         be_dws_le_to_cpu(compl, sizeof(*compl));
1147
1148         if (adapter->be3_native)
1149                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1150         else
1151                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1152
1153         /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1154         if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1155                 rxcp->vlanf = 0;
1156
1157         if (!lancer_chip(adapter))
1158                 rxcp->vid = swab16(rxcp->vid);
1159
1160         if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
1161                 rxcp->vlanf = 0;
1162
1163         /* As the compl has been parsed, reset it; we wont touch it again */
1164         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1165
1166         queue_tail_inc(&rxo->cq);
1167         return rxcp;
1168 }
1169
1170 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1171 {
1172         u32 order = get_order(size);
1173
1174         if (order > 0)
1175                 gfp |= __GFP_COMP;
1176         return  alloc_pages(gfp, order);
1177 }
1178
1179 /*
1180  * Allocate a page, split it to fragments of size rx_frag_size and post as
1181  * receive buffers to BE
1182  */
1183 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1184 {
1185         struct be_adapter *adapter = rxo->adapter;
1186         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1187         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1188         struct be_queue_info *rxq = &rxo->q;
1189         struct page *pagep = NULL;
1190         struct be_eth_rx_d *rxd;
1191         u64 page_dmaaddr = 0, frag_dmaaddr;
1192         u32 posted, page_offset = 0;
1193
1194         page_info = &rxo->page_info_tbl[rxq->head];
1195         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1196                 if (!pagep) {
1197                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1198                         if (unlikely(!pagep)) {
1199                                 rxo->stats.rx_post_fail++;
1200                                 break;
1201                         }
1202                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1203                                                     0, adapter->big_page_size,
1204                                                     DMA_FROM_DEVICE);
1205                         page_info->page_offset = 0;
1206                 } else {
1207                         get_page(pagep);
1208                         page_info->page_offset = page_offset + rx_frag_size;
1209                 }
1210                 page_offset = page_info->page_offset;
1211                 page_info->page = pagep;
1212                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1213                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1214
1215                 rxd = queue_head_node(rxq);
1216                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1217                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1218
1219                 /* Any space left in the current big page for another frag? */
1220                 if ((page_offset + rx_frag_size + rx_frag_size) >
1221                                         adapter->big_page_size) {
1222                         pagep = NULL;
1223                         page_info->last_page_user = true;
1224                 }
1225
1226                 prev_page_info = page_info;
1227                 queue_head_inc(rxq);
1228                 page_info = &page_info_tbl[rxq->head];
1229         }
1230         if (pagep)
1231                 prev_page_info->last_page_user = true;
1232
1233         if (posted) {
1234                 atomic_add(posted, &rxq->used);
1235                 be_rxq_notify(adapter, rxq->id, posted);
1236         } else if (atomic_read(&rxq->used) == 0) {
1237                 /* Let be_worker replenish when memory is available */
1238                 rxo->rx_post_starved = true;
1239         }
1240 }
1241
1242 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1243 {
1244         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1245
1246         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1247                 return NULL;
1248
1249         rmb();
1250         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1251
1252         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1253
1254         queue_tail_inc(tx_cq);
1255         return txcp;
1256 }
1257
1258 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1259 {
1260         struct be_queue_info *txq = &adapter->tx_obj.q;
1261         struct be_eth_wrb *wrb;
1262         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1263         struct sk_buff *sent_skb;
1264         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1265         bool unmap_skb_hdr = true;
1266
1267         sent_skb = sent_skbs[txq->tail];
1268         BUG_ON(!sent_skb);
1269         sent_skbs[txq->tail] = NULL;
1270
1271         /* skip header wrb */
1272         queue_tail_inc(txq);
1273
1274         do {
1275                 cur_index = txq->tail;
1276                 wrb = queue_tail_node(txq);
1277                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1278                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1279                 unmap_skb_hdr = false;
1280
1281                 num_wrbs++;
1282                 queue_tail_inc(txq);
1283         } while (cur_index != last_index);
1284
1285         atomic_sub(num_wrbs, &txq->used);
1286
1287         kfree_skb(sent_skb);
1288 }
1289
1290 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1291 {
1292         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1293
1294         if (!eqe->evt)
1295                 return NULL;
1296
1297         rmb();
1298         eqe->evt = le32_to_cpu(eqe->evt);
1299         queue_tail_inc(&eq_obj->q);
1300         return eqe;
1301 }
1302
1303 static int event_handle(struct be_adapter *adapter,
1304                         struct be_eq_obj *eq_obj)
1305 {
1306         struct be_eq_entry *eqe;
1307         u16 num = 0;
1308
1309         while ((eqe = event_get(eq_obj)) != NULL) {
1310                 eqe->evt = 0;
1311                 num++;
1312         }
1313
1314         /* Deal with any spurious interrupts that come
1315          * without events
1316          */
1317         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1318         if (num)
1319                 napi_schedule(&eq_obj->napi);
1320
1321         return num;
1322 }
1323
1324 /* Just read and notify events without processing them.
1325  * Used at the time of destroying event queues */
1326 static void be_eq_clean(struct be_adapter *adapter,
1327                         struct be_eq_obj *eq_obj)
1328 {
1329         struct be_eq_entry *eqe;
1330         u16 num = 0;
1331
1332         while ((eqe = event_get(eq_obj)) != NULL) {
1333                 eqe->evt = 0;
1334                 num++;
1335         }
1336
1337         if (num)
1338                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1339 }
1340
1341 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1342 {
1343         struct be_rx_page_info *page_info;
1344         struct be_queue_info *rxq = &rxo->q;
1345         struct be_queue_info *rx_cq = &rxo->cq;
1346         struct be_rx_compl_info *rxcp;
1347         u16 tail;
1348
1349         /* First cleanup pending rx completions */
1350         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1351                 be_rx_compl_discard(adapter, rxo, rxcp);
1352                 be_cq_notify(adapter, rx_cq->id, false, 1);
1353         }
1354
1355         /* Then free posted rx buffer that were not used */
1356         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1357         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1358                 page_info = get_rx_page_info(adapter, rxo, tail);
1359                 put_page(page_info->page);
1360                 memset(page_info, 0, sizeof(*page_info));
1361         }
1362         BUG_ON(atomic_read(&rxq->used));
1363 }
1364
1365 static void be_tx_compl_clean(struct be_adapter *adapter)
1366 {
1367         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1368         struct be_queue_info *txq = &adapter->tx_obj.q;
1369         struct be_eth_tx_compl *txcp;
1370         u16 end_idx, cmpl = 0, timeo = 0;
1371         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1372         struct sk_buff *sent_skb;
1373         bool dummy_wrb;
1374
1375         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1376         do {
1377                 while ((txcp = be_tx_compl_get(tx_cq))) {
1378                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1379                                         wrb_index, txcp);
1380                         be_tx_compl_process(adapter, end_idx);
1381                         cmpl++;
1382                 }
1383                 if (cmpl) {
1384                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1385                         cmpl = 0;
1386                 }
1387
1388                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1389                         break;
1390
1391                 mdelay(1);
1392         } while (true);
1393
1394         if (atomic_read(&txq->used))
1395                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1396                         atomic_read(&txq->used));
1397
1398         /* free posted tx for which compls will never arrive */
1399         while (atomic_read(&txq->used)) {
1400                 sent_skb = sent_skbs[txq->tail];
1401                 end_idx = txq->tail;
1402                 index_adv(&end_idx,
1403                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404                         txq->len);
1405                 be_tx_compl_process(adapter, end_idx);
1406         }
1407 }
1408
1409 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1410 {
1411         struct be_queue_info *q;
1412
1413         q = &adapter->mcc_obj.q;
1414         if (q->created)
1415                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1416         be_queue_free(adapter, q);
1417
1418         q = &adapter->mcc_obj.cq;
1419         if (q->created)
1420                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1421         be_queue_free(adapter, q);
1422 }
1423
1424 /* Must be called only after TX qs are created as MCC shares TX EQ */
1425 static int be_mcc_queues_create(struct be_adapter *adapter)
1426 {
1427         struct be_queue_info *q, *cq;
1428
1429         /* Alloc MCC compl queue */
1430         cq = &adapter->mcc_obj.cq;
1431         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1432                         sizeof(struct be_mcc_compl)))
1433                 goto err;
1434
1435         /* Ask BE to create MCC compl queue; share TX's eq */
1436         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1437                 goto mcc_cq_free;
1438
1439         /* Alloc MCC queue */
1440         q = &adapter->mcc_obj.q;
1441         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1442                 goto mcc_cq_destroy;
1443
1444         /* Ask BE to create MCC queue */
1445         if (be_cmd_mccq_create(adapter, q, cq))
1446                 goto mcc_q_free;
1447
1448         return 0;
1449
1450 mcc_q_free:
1451         be_queue_free(adapter, q);
1452 mcc_cq_destroy:
1453         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1454 mcc_cq_free:
1455         be_queue_free(adapter, cq);
1456 err:
1457         return -1;
1458 }
1459
1460 static void be_tx_queues_destroy(struct be_adapter *adapter)
1461 {
1462         struct be_queue_info *q;
1463
1464         q = &adapter->tx_obj.q;
1465         if (q->created)
1466                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1467         be_queue_free(adapter, q);
1468
1469         q = &adapter->tx_obj.cq;
1470         if (q->created)
1471                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1472         be_queue_free(adapter, q);
1473
1474         /* Clear any residual events */
1475         be_eq_clean(adapter, &adapter->tx_eq);
1476
1477         q = &adapter->tx_eq.q;
1478         if (q->created)
1479                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1480         be_queue_free(adapter, q);
1481 }
1482
1483 static int be_tx_queues_create(struct be_adapter *adapter)
1484 {
1485         struct be_queue_info *eq, *q, *cq;
1486
1487         adapter->tx_eq.max_eqd = 0;
1488         adapter->tx_eq.min_eqd = 0;
1489         adapter->tx_eq.cur_eqd = 96;
1490         adapter->tx_eq.enable_aic = false;
1491         /* Alloc Tx Event queue */
1492         eq = &adapter->tx_eq.q;
1493         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494                 return -1;
1495
1496         /* Ask BE to create Tx Event queue */
1497         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498                 goto tx_eq_free;
1499
1500         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1501
1502
1503         /* Alloc TX eth compl queue */
1504         cq = &adapter->tx_obj.cq;
1505         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1506                         sizeof(struct be_eth_tx_compl)))
1507                 goto tx_eq_destroy;
1508
1509         /* Ask BE to create Tx eth compl queue */
1510         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1511                 goto tx_cq_free;
1512
1513         /* Alloc TX eth queue */
1514         q = &adapter->tx_obj.q;
1515         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516                 goto tx_cq_destroy;
1517
1518         /* Ask BE to create Tx eth queue */
1519         if (be_cmd_txq_create(adapter, q, cq))
1520                 goto tx_q_free;
1521         return 0;
1522
1523 tx_q_free:
1524         be_queue_free(adapter, q);
1525 tx_cq_destroy:
1526         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1527 tx_cq_free:
1528         be_queue_free(adapter, cq);
1529 tx_eq_destroy:
1530         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1531 tx_eq_free:
1532         be_queue_free(adapter, eq);
1533         return -1;
1534 }
1535
1536 static void be_rx_queues_destroy(struct be_adapter *adapter)
1537 {
1538         struct be_queue_info *q;
1539         struct be_rx_obj *rxo;
1540         int i;
1541
1542         for_all_rx_queues(adapter, rxo, i) {
1543                 q = &rxo->q;
1544                 if (q->created) {
1545                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1546                         /* After the rxq is invalidated, wait for a grace time
1547                          * of 1ms for all dma to end and the flush compl to
1548                          * arrive
1549                          */
1550                         mdelay(1);
1551                         be_rx_q_clean(adapter, rxo);
1552                 }
1553                 be_queue_free(adapter, q);
1554
1555                 q = &rxo->cq;
1556                 if (q->created)
1557                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1558                 be_queue_free(adapter, q);
1559
1560                 /* Clear any residual events */
1561                 q = &rxo->rx_eq.q;
1562                 if (q->created) {
1563                         be_eq_clean(adapter, &rxo->rx_eq);
1564                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1565                 }
1566                 be_queue_free(adapter, q);
1567         }
1568 }
1569
1570 static int be_rx_queues_create(struct be_adapter *adapter)
1571 {
1572         struct be_queue_info *eq, *q, *cq;
1573         struct be_rx_obj *rxo;
1574         int rc, i;
1575
1576         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1577         for_all_rx_queues(adapter, rxo, i) {
1578                 rxo->adapter = adapter;
1579                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580                 rxo->rx_eq.enable_aic = true;
1581
1582                 /* EQ */
1583                 eq = &rxo->rx_eq.q;
1584                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585                                         sizeof(struct be_eq_entry));
1586                 if (rc)
1587                         goto err;
1588
1589                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590                 if (rc)
1591                         goto err;
1592
1593                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1594
1595                 /* CQ */
1596                 cq = &rxo->cq;
1597                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598                                 sizeof(struct be_eth_rx_compl));
1599                 if (rc)
1600                         goto err;
1601
1602                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603                 if (rc)
1604                         goto err;
1605                 /* Rx Q */
1606                 q = &rxo->q;
1607                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608                                 sizeof(struct be_eth_rx_d));
1609                 if (rc)
1610                         goto err;
1611
1612                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615                 if (rc)
1616                         goto err;
1617         }
1618
1619         if (be_multi_rxq(adapter)) {
1620                 u8 rsstable[MAX_RSS_QS];
1621
1622                 for_all_rss_queues(adapter, rxo, i)
1623                         rsstable[i] = rxo->rss_id;
1624
1625                 rc = be_cmd_rss_config(adapter, rsstable,
1626                         adapter->num_rx_qs - 1);
1627                 if (rc)
1628                         goto err;
1629         }
1630
1631         return 0;
1632 err:
1633         be_rx_queues_destroy(adapter);
1634         return -1;
1635 }
1636
1637 static bool event_peek(struct be_eq_obj *eq_obj)
1638 {
1639         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640         if (!eqe->evt)
1641                 return false;
1642         else
1643                 return true;
1644 }
1645
1646 static irqreturn_t be_intx(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649         struct be_rx_obj *rxo;
1650         int isr, i, tx = 0 , rx = 0;
1651
1652         if (lancer_chip(adapter)) {
1653                 if (event_peek(&adapter->tx_eq))
1654                         tx = event_handle(adapter, &adapter->tx_eq);
1655                 for_all_rx_queues(adapter, rxo, i) {
1656                         if (event_peek(&rxo->rx_eq))
1657                                 rx |= event_handle(adapter, &rxo->rx_eq);
1658                 }
1659
1660                 if (!(tx || rx))
1661                         return IRQ_NONE;
1662
1663         } else {
1664                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666                 if (!isr)
1667                         return IRQ_NONE;
1668
1669                 if ((1 << adapter->tx_eq.eq_idx & isr))
1670                         event_handle(adapter, &adapter->tx_eq);
1671
1672                 for_all_rx_queues(adapter, rxo, i) {
1673                         if ((1 << rxo->rx_eq.eq_idx & isr))
1674                                 event_handle(adapter, &rxo->rx_eq);
1675                 }
1676         }
1677
1678         return IRQ_HANDLED;
1679 }
1680
1681 static irqreturn_t be_msix_rx(int irq, void *dev)
1682 {
1683         struct be_rx_obj *rxo = dev;
1684         struct be_adapter *adapter = rxo->adapter;
1685
1686         event_handle(adapter, &rxo->rx_eq);
1687
1688         return IRQ_HANDLED;
1689 }
1690
1691 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1692 {
1693         struct be_adapter *adapter = dev;
1694
1695         event_handle(adapter, &adapter->tx_eq);
1696
1697         return IRQ_HANDLED;
1698 }
1699
1700 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1701 {
1702         return (rxcp->tcpf && !rxcp->err) ? true : false;
1703 }
1704
1705 static int be_poll_rx(struct napi_struct *napi, int budget)
1706 {
1707         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1708         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1709         struct be_adapter *adapter = rxo->adapter;
1710         struct be_queue_info *rx_cq = &rxo->cq;
1711         struct be_rx_compl_info *rxcp;
1712         u32 work_done;
1713
1714         rxo->stats.rx_polls++;
1715         for (work_done = 0; work_done < budget; work_done++) {
1716                 rxcp = be_rx_compl_get(rxo);
1717                 if (!rxcp)
1718                         break;
1719
1720                 /* Ignore flush completions */
1721                 if (rxcp->num_rcvd) {
1722                         if (do_gro(rxcp))
1723                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1724                         else
1725                                 be_rx_compl_process(adapter, rxo, rxcp);
1726                 }
1727                 be_rx_stats_update(rxo, rxcp);
1728         }
1729
1730         /* Refill the queue */
1731         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1732                 be_post_rx_frags(rxo, GFP_ATOMIC);
1733
1734         /* All consumed */
1735         if (work_done < budget) {
1736                 napi_complete(napi);
1737                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1738         } else {
1739                 /* More to be consumed; continue with interrupts disabled */
1740                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1741         }
1742         return work_done;
1743 }
1744
1745 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1746  * For TX/MCC we don't honour budget; consume everything
1747  */
1748 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1749 {
1750         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1751         struct be_adapter *adapter =
1752                 container_of(tx_eq, struct be_adapter, tx_eq);
1753         struct be_queue_info *txq = &adapter->tx_obj.q;
1754         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1755         struct be_eth_tx_compl *txcp;
1756         int tx_compl = 0, mcc_compl, status = 0;
1757         u16 end_idx;
1758
1759         while ((txcp = be_tx_compl_get(tx_cq))) {
1760                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1761                                 wrb_index, txcp);
1762                 be_tx_compl_process(adapter, end_idx);
1763                 tx_compl++;
1764         }
1765
1766         mcc_compl = be_process_mcc(adapter, &status);
1767
1768         napi_complete(napi);
1769
1770         if (mcc_compl) {
1771                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1772                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1773         }
1774
1775         if (tx_compl) {
1776                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1777
1778                 /* As Tx wrbs have been freed up, wake up netdev queue if
1779                  * it was stopped due to lack of tx wrbs.
1780                  */
1781                 if (netif_queue_stopped(adapter->netdev) &&
1782                         atomic_read(&txq->used) < txq->len / 2) {
1783                         netif_wake_queue(adapter->netdev);
1784                 }
1785
1786                 tx_stats(adapter)->be_tx_events++;
1787                 tx_stats(adapter)->be_tx_compl += tx_compl;
1788         }
1789
1790         return 1;
1791 }
1792
1793 void be_detect_dump_ue(struct be_adapter *adapter)
1794 {
1795         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1796         u32 i;
1797
1798         pci_read_config_dword(adapter->pdev,
1799                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1800         pci_read_config_dword(adapter->pdev,
1801                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1802         pci_read_config_dword(adapter->pdev,
1803                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1804         pci_read_config_dword(adapter->pdev,
1805                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1806
1807         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1808         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1809
1810         if (ue_status_lo || ue_status_hi) {
1811                 adapter->ue_detected = true;
1812                 adapter->eeh_err = true;
1813                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1814         }
1815
1816         if (ue_status_lo) {
1817                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1818                         if (ue_status_lo & 1)
1819                                 dev_err(&adapter->pdev->dev,
1820                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1821                 }
1822         }
1823         if (ue_status_hi) {
1824                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1825                         if (ue_status_hi & 1)
1826                                 dev_err(&adapter->pdev->dev,
1827                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1828                 }
1829         }
1830
1831 }
1832
1833 static void be_worker(struct work_struct *work)
1834 {
1835         struct be_adapter *adapter =
1836                 container_of(work, struct be_adapter, work.work);
1837         struct be_rx_obj *rxo;
1838         int i;
1839
1840         /* when interrupts are not yet enabled, just reap any pending
1841         * mcc completions */
1842         if (!netif_running(adapter->netdev)) {
1843                 int mcc_compl, status = 0;
1844
1845                 mcc_compl = be_process_mcc(adapter, &status);
1846
1847                 if (mcc_compl) {
1848                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1849                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1850                 }
1851
1852                 if (!adapter->ue_detected && !lancer_chip(adapter))
1853                         be_detect_dump_ue(adapter);
1854
1855                 goto reschedule;
1856         }
1857
1858         if (!adapter->stats_cmd_sent)
1859                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1860
1861         be_tx_rate_update(adapter);
1862
1863         for_all_rx_queues(adapter, rxo, i) {
1864                 be_rx_rate_update(rxo);
1865                 be_rx_eqd_update(adapter, rxo);
1866
1867                 if (rxo->rx_post_starved) {
1868                         rxo->rx_post_starved = false;
1869                         be_post_rx_frags(rxo, GFP_KERNEL);
1870                 }
1871         }
1872         if (!adapter->ue_detected && !lancer_chip(adapter))
1873                 be_detect_dump_ue(adapter);
1874
1875 reschedule:
1876         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1877 }
1878
1879 static void be_msix_disable(struct be_adapter *adapter)
1880 {
1881         if (adapter->msix_enabled) {
1882                 pci_disable_msix(adapter->pdev);
1883                 adapter->msix_enabled = false;
1884         }
1885 }
1886
1887 static int be_num_rxqs_get(struct be_adapter *adapter)
1888 {
1889         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1890                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1891                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1892         } else {
1893                 dev_warn(&adapter->pdev->dev,
1894                         "No support for multiple RX queues\n");
1895                 return 1;
1896         }
1897 }
1898
1899 static void be_msix_enable(struct be_adapter *adapter)
1900 {
1901 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1902         int i, status;
1903
1904         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1905
1906         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1907                 adapter->msix_entries[i].entry = i;
1908
1909         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1910                         adapter->num_rx_qs + 1);
1911         if (status == 0) {
1912                 goto done;
1913         } else if (status >= BE_MIN_MSIX_VECTORS) {
1914                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1915                                 status) == 0) {
1916                         adapter->num_rx_qs = status - 1;
1917                         dev_warn(&adapter->pdev->dev,
1918                                 "Could alloc only %d MSIx vectors. "
1919                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1920                         goto done;
1921                 }
1922         }
1923         return;
1924 done:
1925         adapter->msix_enabled = true;
1926 }
1927
1928 static void be_sriov_enable(struct be_adapter *adapter)
1929 {
1930         be_check_sriov_fn_type(adapter);
1931 #ifdef CONFIG_PCI_IOV
1932         if (be_physfn(adapter) && num_vfs) {
1933                 int status;
1934
1935                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1936                 adapter->sriov_enabled = status ? false : true;
1937         }
1938 #endif
1939 }
1940
1941 static void be_sriov_disable(struct be_adapter *adapter)
1942 {
1943 #ifdef CONFIG_PCI_IOV
1944         if (adapter->sriov_enabled) {
1945                 pci_disable_sriov(adapter->pdev);
1946                 adapter->sriov_enabled = false;
1947         }
1948 #endif
1949 }
1950
1951 static inline int be_msix_vec_get(struct be_adapter *adapter,
1952                                         struct be_eq_obj *eq_obj)
1953 {
1954         return adapter->msix_entries[eq_obj->eq_idx].vector;
1955 }
1956
1957 static int be_request_irq(struct be_adapter *adapter,
1958                 struct be_eq_obj *eq_obj,
1959                 void *handler, char *desc, void *context)
1960 {
1961         struct net_device *netdev = adapter->netdev;
1962         int vec;
1963
1964         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1965         vec = be_msix_vec_get(adapter, eq_obj);
1966         return request_irq(vec, handler, 0, eq_obj->desc, context);
1967 }
1968
1969 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1970                         void *context)
1971 {
1972         int vec = be_msix_vec_get(adapter, eq_obj);
1973         free_irq(vec, context);
1974 }
1975
1976 static int be_msix_register(struct be_adapter *adapter)
1977 {
1978         struct be_rx_obj *rxo;
1979         int status, i;
1980         char qname[10];
1981
1982         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1983                                 adapter);
1984         if (status)
1985                 goto err;
1986
1987         for_all_rx_queues(adapter, rxo, i) {
1988                 sprintf(qname, "rxq%d", i);
1989                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1990                                 qname, rxo);
1991                 if (status)
1992                         goto err_msix;
1993         }
1994
1995         return 0;
1996
1997 err_msix:
1998         be_free_irq(adapter, &adapter->tx_eq, adapter);
1999
2000         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2001                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2002
2003 err:
2004         dev_warn(&adapter->pdev->dev,
2005                 "MSIX Request IRQ failed - err %d\n", status);
2006         pci_disable_msix(adapter->pdev);
2007         adapter->msix_enabled = false;
2008         return status;
2009 }
2010
2011 static int be_irq_register(struct be_adapter *adapter)
2012 {
2013         struct net_device *netdev = adapter->netdev;
2014         int status;
2015
2016         if (adapter->msix_enabled) {
2017                 status = be_msix_register(adapter);
2018                 if (status == 0)
2019                         goto done;
2020                 /* INTx is not supported for VF */
2021                 if (!be_physfn(adapter))
2022                         return status;
2023         }
2024
2025         /* INTx */
2026         netdev->irq = adapter->pdev->irq;
2027         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2028                         adapter);
2029         if (status) {
2030                 dev_err(&adapter->pdev->dev,
2031                         "INTx request IRQ failed - err %d\n", status);
2032                 return status;
2033         }
2034 done:
2035         adapter->isr_registered = true;
2036         return 0;
2037 }
2038
2039 static void be_irq_unregister(struct be_adapter *adapter)
2040 {
2041         struct net_device *netdev = adapter->netdev;
2042         struct be_rx_obj *rxo;
2043         int i;
2044
2045         if (!adapter->isr_registered)
2046                 return;
2047
2048         /* INTx */
2049         if (!adapter->msix_enabled) {
2050                 free_irq(netdev->irq, adapter);
2051                 goto done;
2052         }
2053
2054         /* MSIx */
2055         be_free_irq(adapter, &adapter->tx_eq, adapter);
2056
2057         for_all_rx_queues(adapter, rxo, i)
2058                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2059
2060 done:
2061         adapter->isr_registered = false;
2062 }
2063
2064 static int be_close(struct net_device *netdev)
2065 {
2066         struct be_adapter *adapter = netdev_priv(netdev);
2067         struct be_rx_obj *rxo;
2068         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2069         int vec, i;
2070
2071         be_async_mcc_disable(adapter);
2072
2073         netif_carrier_off(netdev);
2074         adapter->link_up = false;
2075
2076         if (!lancer_chip(adapter))
2077                 be_intr_set(adapter, false);
2078
2079         for_all_rx_queues(adapter, rxo, i)
2080                 napi_disable(&rxo->rx_eq.napi);
2081
2082         napi_disable(&tx_eq->napi);
2083
2084         if (lancer_chip(adapter)) {
2085                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2086                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2087                 for_all_rx_queues(adapter, rxo, i)
2088                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2089         }
2090
2091         if (adapter->msix_enabled) {
2092                 vec = be_msix_vec_get(adapter, tx_eq);
2093                 synchronize_irq(vec);
2094
2095                 for_all_rx_queues(adapter, rxo, i) {
2096                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2097                         synchronize_irq(vec);
2098                 }
2099         } else {
2100                 synchronize_irq(netdev->irq);
2101         }
2102         be_irq_unregister(adapter);
2103
2104         /* Wait for all pending tx completions to arrive so that
2105          * all tx skbs are freed.
2106          */
2107         be_tx_compl_clean(adapter);
2108
2109         return 0;
2110 }
2111
2112 static int be_open(struct net_device *netdev)
2113 {
2114         struct be_adapter *adapter = netdev_priv(netdev);
2115         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2116         struct be_rx_obj *rxo;
2117         bool link_up;
2118         int status, i;
2119         u8 mac_speed;
2120         u16 link_speed;
2121
2122         for_all_rx_queues(adapter, rxo, i) {
2123                 be_post_rx_frags(rxo, GFP_KERNEL);
2124                 napi_enable(&rxo->rx_eq.napi);
2125         }
2126         napi_enable(&tx_eq->napi);
2127
2128         be_irq_register(adapter);
2129
2130         if (!lancer_chip(adapter))
2131                 be_intr_set(adapter, true);
2132
2133         /* The evt queues are created in unarmed state; arm them */
2134         for_all_rx_queues(adapter, rxo, i) {
2135                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2136                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2137         }
2138         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2139
2140         /* Now that interrupts are on we can process async mcc */
2141         be_async_mcc_enable(adapter);
2142
2143         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2144                         &link_speed);
2145         if (status)
2146                 goto err;
2147         be_link_status_update(adapter, link_up);
2148
2149         if (be_physfn(adapter)) {
2150                 status = be_vid_config(adapter, false, 0);
2151                 if (status)
2152                         goto err;
2153
2154                 status = be_cmd_set_flow_control(adapter,
2155                                 adapter->tx_fc, adapter->rx_fc);
2156                 if (status)
2157                         goto err;
2158         }
2159
2160         return 0;
2161 err:
2162         be_close(adapter->netdev);
2163         return -EIO;
2164 }
2165
2166 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2167 {
2168         struct be_dma_mem cmd;
2169         int status = 0;
2170         u8 mac[ETH_ALEN];
2171
2172         memset(mac, 0, ETH_ALEN);
2173
2174         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2175         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2176                                     GFP_KERNEL);
2177         if (cmd.va == NULL)
2178                 return -1;
2179         memset(cmd.va, 0, cmd.size);
2180
2181         if (enable) {
2182                 status = pci_write_config_dword(adapter->pdev,
2183                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2184                 if (status) {
2185                         dev_err(&adapter->pdev->dev,
2186                                 "Could not enable Wake-on-lan\n");
2187                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2188                                           cmd.dma);
2189                         return status;
2190                 }
2191                 status = be_cmd_enable_magic_wol(adapter,
2192                                 adapter->netdev->dev_addr, &cmd);
2193                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2194                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2195         } else {
2196                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2197                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2198                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2199         }
2200
2201         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2202         return status;
2203 }
2204
2205 /*
2206  * Generate a seed MAC address from the PF MAC Address using jhash.
2207  * MAC Address for VFs are assigned incrementally starting from the seed.
2208  * These addresses are programmed in the ASIC by the PF and the VF driver
2209  * queries for the MAC address during its probe.
2210  */
2211 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2212 {
2213         u32 vf = 0;
2214         int status = 0;
2215         u8 mac[ETH_ALEN];
2216
2217         be_vf_eth_addr_generate(adapter, mac);
2218
2219         for (vf = 0; vf < num_vfs; vf++) {
2220                 status = be_cmd_pmac_add(adapter, mac,
2221                                         adapter->vf_cfg[vf].vf_if_handle,
2222                                         &adapter->vf_cfg[vf].vf_pmac_id,
2223                                         vf + 1);
2224                 if (status)
2225                         dev_err(&adapter->pdev->dev,
2226                                 "Mac address add failed for VF %d\n", vf);
2227                 else
2228                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2229
2230                 mac[5] += 1;
2231         }
2232         return status;
2233 }
2234
2235 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2236 {
2237         u32 vf;
2238
2239         for (vf = 0; vf < num_vfs; vf++) {
2240                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2241                         be_cmd_pmac_del(adapter,
2242                                         adapter->vf_cfg[vf].vf_if_handle,
2243                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2244         }
2245 }
2246
2247 static int be_setup(struct be_adapter *adapter)
2248 {
2249         struct net_device *netdev = adapter->netdev;
2250         u32 cap_flags, en_flags, vf = 0;
2251         int status;
2252         u8 mac[ETH_ALEN];
2253
2254         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2255                                 BE_IF_FLAGS_BROADCAST |
2256                                 BE_IF_FLAGS_MULTICAST;
2257
2258         if (be_physfn(adapter)) {
2259                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2260                                 BE_IF_FLAGS_PROMISCUOUS |
2261                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2262                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2263
2264                 if (be_multi_rxq(adapter)) {
2265                         cap_flags |= BE_IF_FLAGS_RSS;
2266                         en_flags |= BE_IF_FLAGS_RSS;
2267                 }
2268         }
2269
2270         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2271                         netdev->dev_addr, false/* pmac_invalid */,
2272                         &adapter->if_handle, &adapter->pmac_id, 0);
2273         if (status != 0)
2274                 goto do_none;
2275
2276         if (be_physfn(adapter)) {
2277                 if (adapter->sriov_enabled) {
2278                         while (vf < num_vfs) {
2279                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2280                                                         BE_IF_FLAGS_BROADCAST;
2281                                 status = be_cmd_if_create(adapter, cap_flags,
2282                                         en_flags, mac, true,
2283                                         &adapter->vf_cfg[vf].vf_if_handle,
2284                                         NULL, vf+1);
2285                                 if (status) {
2286                                         dev_err(&adapter->pdev->dev,
2287                                         "Interface Create failed for VF %d\n",
2288                                         vf);
2289                                         goto if_destroy;
2290                                 }
2291                                 adapter->vf_cfg[vf].vf_pmac_id =
2292                                                         BE_INVALID_PMAC_ID;
2293                                 vf++;
2294                         }
2295                 }
2296         } else {
2297                 status = be_cmd_mac_addr_query(adapter, mac,
2298                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2299                 if (!status) {
2300                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2301                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2302                 }
2303         }
2304
2305         status = be_tx_queues_create(adapter);
2306         if (status != 0)
2307                 goto if_destroy;
2308
2309         status = be_rx_queues_create(adapter);
2310         if (status != 0)
2311                 goto tx_qs_destroy;
2312
2313         status = be_mcc_queues_create(adapter);
2314         if (status != 0)
2315                 goto rx_qs_destroy;
2316
2317         adapter->link_speed = -1;
2318
2319         return 0;
2320
2321         be_mcc_queues_destroy(adapter);
2322 rx_qs_destroy:
2323         be_rx_queues_destroy(adapter);
2324 tx_qs_destroy:
2325         be_tx_queues_destroy(adapter);
2326 if_destroy:
2327         if (be_physfn(adapter) && adapter->sriov_enabled)
2328                 for (vf = 0; vf < num_vfs; vf++)
2329                         if (adapter->vf_cfg[vf].vf_if_handle)
2330                                 be_cmd_if_destroy(adapter,
2331                                         adapter->vf_cfg[vf].vf_if_handle,
2332                                         vf + 1);
2333         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2334 do_none:
2335         return status;
2336 }
2337
2338 static int be_clear(struct be_adapter *adapter)
2339 {
2340         int vf;
2341
2342         if (be_physfn(adapter) && adapter->sriov_enabled)
2343                 be_vf_eth_addr_rem(adapter);
2344
2345         be_mcc_queues_destroy(adapter);
2346         be_rx_queues_destroy(adapter);
2347         be_tx_queues_destroy(adapter);
2348         adapter->eq_next_idx = 0;
2349
2350         if (be_physfn(adapter) && adapter->sriov_enabled)
2351                 for (vf = 0; vf < num_vfs; vf++)
2352                         if (adapter->vf_cfg[vf].vf_if_handle)
2353                                 be_cmd_if_destroy(adapter,
2354                                         adapter->vf_cfg[vf].vf_if_handle,
2355                                         vf + 1);
2356
2357         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2358
2359         /* tell fw we're done with firing cmds */
2360         be_cmd_fw_clean(adapter);
2361         return 0;
2362 }
2363
2364
2365 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2366 static bool be_flash_redboot(struct be_adapter *adapter,
2367                         const u8 *p, u32 img_start, int image_size,
2368                         int hdr_size)
2369 {
2370         u32 crc_offset;
2371         u8 flashed_crc[4];
2372         int status;
2373
2374         crc_offset = hdr_size + img_start + image_size - 4;
2375
2376         p += crc_offset;
2377
2378         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2379                         (image_size - 4));
2380         if (status) {
2381                 dev_err(&adapter->pdev->dev,
2382                 "could not get crc from flash, not flashing redboot\n");
2383                 return false;
2384         }
2385
2386         /*update redboot only if crc does not match*/
2387         if (!memcmp(flashed_crc, p, 4))
2388                 return false;
2389         else
2390                 return true;
2391 }
2392
2393 static int be_flash_data(struct be_adapter *adapter,
2394                         const struct firmware *fw,
2395                         struct be_dma_mem *flash_cmd, int num_of_images)
2396
2397 {
2398         int status = 0, i, filehdr_size = 0;
2399         u32 total_bytes = 0, flash_op;
2400         int num_bytes;
2401         const u8 *p = fw->data;
2402         struct be_cmd_write_flashrom *req = flash_cmd->va;
2403         const struct flash_comp *pflashcomp;
2404         int num_comp;
2405
2406         static const struct flash_comp gen3_flash_types[9] = {
2407                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2408                         FLASH_IMAGE_MAX_SIZE_g3},
2409                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2410                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2411                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2412                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2414                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2416                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2417                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2418                         FLASH_IMAGE_MAX_SIZE_g3},
2419                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2420                         FLASH_IMAGE_MAX_SIZE_g3},
2421                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2422                         FLASH_IMAGE_MAX_SIZE_g3},
2423                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2424                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2425         };
2426         static const struct flash_comp gen2_flash_types[8] = {
2427                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2428                         FLASH_IMAGE_MAX_SIZE_g2},
2429                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2430                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2431                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2432                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2434                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2436                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2437                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2438                         FLASH_IMAGE_MAX_SIZE_g2},
2439                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2440                         FLASH_IMAGE_MAX_SIZE_g2},
2441                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2442                          FLASH_IMAGE_MAX_SIZE_g2}
2443         };
2444
2445         if (adapter->generation == BE_GEN3) {
2446                 pflashcomp = gen3_flash_types;
2447                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2448                 num_comp = ARRAY_SIZE(gen3_flash_types);
2449         } else {
2450                 pflashcomp = gen2_flash_types;
2451                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2452                 num_comp = ARRAY_SIZE(gen2_flash_types);
2453         }
2454         for (i = 0; i < num_comp; i++) {
2455                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2456                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2457                         continue;
2458                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2459                         (!be_flash_redboot(adapter, fw->data,
2460                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2461                         (num_of_images * sizeof(struct image_hdr)))))
2462                         continue;
2463                 p = fw->data;
2464                 p += filehdr_size + pflashcomp[i].offset
2465                         + (num_of_images * sizeof(struct image_hdr));
2466         if (p + pflashcomp[i].size > fw->data + fw->size)
2467                 return -1;
2468         total_bytes = pflashcomp[i].size;
2469                 while (total_bytes) {
2470                         if (total_bytes > 32*1024)
2471                                 num_bytes = 32*1024;
2472                         else
2473                                 num_bytes = total_bytes;
2474                         total_bytes -= num_bytes;
2475
2476                         if (!total_bytes)
2477                                 flash_op = FLASHROM_OPER_FLASH;
2478                         else
2479                                 flash_op = FLASHROM_OPER_SAVE;
2480                         memcpy(req->params.data_buf, p, num_bytes);
2481                         p += num_bytes;
2482                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2483                                 pflashcomp[i].optype, flash_op, num_bytes);
2484                         if (status) {
2485                                 dev_err(&adapter->pdev->dev,
2486                                         "cmd to write to flash rom failed.\n");
2487                                 return -1;
2488                         }
2489                         yield();
2490                 }
2491         }
2492         return 0;
2493 }
2494
2495 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2496 {
2497         if (fhdr == NULL)
2498                 return 0;
2499         if (fhdr->build[0] == '3')
2500                 return BE_GEN3;
2501         else if (fhdr->build[0] == '2')
2502                 return BE_GEN2;
2503         else
2504                 return 0;
2505 }
2506
2507 int be_load_fw(struct be_adapter *adapter, u8 *func)
2508 {
2509         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2510         const struct firmware *fw;
2511         struct flash_file_hdr_g2 *fhdr;
2512         struct flash_file_hdr_g3 *fhdr3;
2513         struct image_hdr *img_hdr_ptr = NULL;
2514         struct be_dma_mem flash_cmd;
2515         int status, i = 0, num_imgs = 0;
2516         const u8 *p;
2517
2518         if (!netif_running(adapter->netdev)) {
2519                 dev_err(&adapter->pdev->dev,
2520                         "Firmware load not allowed (interface is down)\n");
2521                 return -EPERM;
2522         }
2523
2524         strcpy(fw_file, func);
2525
2526         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2527         if (status)
2528                 goto fw_exit;
2529
2530         p = fw->data;
2531         fhdr = (struct flash_file_hdr_g2 *) p;
2532         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2533
2534         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2535         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2536                                           &flash_cmd.dma, GFP_KERNEL);
2537         if (!flash_cmd.va) {
2538                 status = -ENOMEM;
2539                 dev_err(&adapter->pdev->dev,
2540                         "Memory allocation failure while flashing\n");
2541                 goto fw_exit;
2542         }
2543
2544         if ((adapter->generation == BE_GEN3) &&
2545                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2546                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2547                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2548                 for (i = 0; i < num_imgs; i++) {
2549                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2550                                         (sizeof(struct flash_file_hdr_g3) +
2551                                          i * sizeof(struct image_hdr)));
2552                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2553                                 status = be_flash_data(adapter, fw, &flash_cmd,
2554                                                         num_imgs);
2555                 }
2556         } else if ((adapter->generation == BE_GEN2) &&
2557                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2558                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2559         } else {
2560                 dev_err(&adapter->pdev->dev,
2561                         "UFI and Interface are not compatible for flashing\n");
2562                 status = -1;
2563         }
2564
2565         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2566                           flash_cmd.dma);
2567         if (status) {
2568                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2569                 goto fw_exit;
2570         }
2571
2572         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2573
2574 fw_exit:
2575         release_firmware(fw);
2576         return status;
2577 }
2578
2579 static struct net_device_ops be_netdev_ops = {
2580         .ndo_open               = be_open,
2581         .ndo_stop               = be_close,
2582         .ndo_start_xmit         = be_xmit,
2583         .ndo_set_rx_mode        = be_set_multicast_list,
2584         .ndo_set_mac_address    = be_mac_addr_set,
2585         .ndo_change_mtu         = be_change_mtu,
2586         .ndo_validate_addr      = eth_validate_addr,
2587         .ndo_vlan_rx_register   = be_vlan_register,
2588         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2589         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2590         .ndo_set_vf_mac         = be_set_vf_mac,
2591         .ndo_set_vf_vlan        = be_set_vf_vlan,
2592         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2593         .ndo_get_vf_config      = be_get_vf_config
2594 };
2595
2596 static void be_netdev_init(struct net_device *netdev)
2597 {
2598         struct be_adapter *adapter = netdev_priv(netdev);
2599         struct be_rx_obj *rxo;
2600         int i;
2601
2602         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2603                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2604                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2605                 NETIF_F_GRO | NETIF_F_TSO6;
2606
2607         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2608                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2609
2610         if (lancer_chip(adapter))
2611                 netdev->vlan_features |= NETIF_F_TSO6;
2612
2613         netdev->flags |= IFF_MULTICAST;
2614
2615         adapter->rx_csum = true;
2616
2617         /* Default settings for Rx and Tx flow control */
2618         adapter->rx_fc = true;
2619         adapter->tx_fc = true;
2620
2621         netif_set_gso_max_size(netdev, 65535);
2622
2623         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2624
2625         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2626
2627         for_all_rx_queues(adapter, rxo, i)
2628                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2629                                 BE_NAPI_WEIGHT);
2630
2631         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2632                 BE_NAPI_WEIGHT);
2633 }
2634
2635 static void be_unmap_pci_bars(struct be_adapter *adapter)
2636 {
2637         if (adapter->csr)
2638                 iounmap(adapter->csr);
2639         if (adapter->db)
2640                 iounmap(adapter->db);
2641         if (adapter->pcicfg && be_physfn(adapter))
2642                 iounmap(adapter->pcicfg);
2643 }
2644
2645 static int be_map_pci_bars(struct be_adapter *adapter)
2646 {
2647         u8 __iomem *addr;
2648         int pcicfg_reg, db_reg;
2649
2650         if (lancer_chip(adapter)) {
2651                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652                         pci_resource_len(adapter->pdev, 0));
2653                 if (addr == NULL)
2654                         return -ENOMEM;
2655                 adapter->db = addr;
2656                 return 0;
2657         }
2658
2659         if (be_physfn(adapter)) {
2660                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2661                                 pci_resource_len(adapter->pdev, 2));
2662                 if (addr == NULL)
2663                         return -ENOMEM;
2664                 adapter->csr = addr;
2665         }
2666
2667         if (adapter->generation == BE_GEN2) {
2668                 pcicfg_reg = 1;
2669                 db_reg = 4;
2670         } else {
2671                 pcicfg_reg = 0;
2672                 if (be_physfn(adapter))
2673                         db_reg = 4;
2674                 else
2675                         db_reg = 0;
2676         }
2677         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2678                                 pci_resource_len(adapter->pdev, db_reg));
2679         if (addr == NULL)
2680                 goto pci_map_err;
2681         adapter->db = addr;
2682
2683         if (be_physfn(adapter)) {
2684                 addr = ioremap_nocache(
2685                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2686                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2687                 if (addr == NULL)
2688                         goto pci_map_err;
2689                 adapter->pcicfg = addr;
2690         } else
2691                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2692
2693         return 0;
2694 pci_map_err:
2695         be_unmap_pci_bars(adapter);
2696         return -ENOMEM;
2697 }
2698
2699
2700 static void be_ctrl_cleanup(struct be_adapter *adapter)
2701 {
2702         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2703
2704         be_unmap_pci_bars(adapter);
2705
2706         if (mem->va)
2707                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2708                                   mem->dma);
2709
2710         mem = &adapter->mc_cmd_mem;
2711         if (mem->va)
2712                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2713                                   mem->dma);
2714 }
2715
2716 static int be_ctrl_init(struct be_adapter *adapter)
2717 {
2718         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2719         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2720         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2721         int status;
2722
2723         status = be_map_pci_bars(adapter);
2724         if (status)
2725                 goto done;
2726
2727         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2729                                                 mbox_mem_alloc->size,
2730                                                 &mbox_mem_alloc->dma,
2731                                                 GFP_KERNEL);
2732         if (!mbox_mem_alloc->va) {
2733                 status = -ENOMEM;
2734                 goto unmap_pci_bars;
2735         }
2736
2737         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2738         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2739         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2740         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2741
2742         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2743         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2744                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2745                                             GFP_KERNEL);
2746         if (mc_cmd_mem->va == NULL) {
2747                 status = -ENOMEM;
2748                 goto free_mbox;
2749         }
2750         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2751
2752         mutex_init(&adapter->mbox_lock);
2753         spin_lock_init(&adapter->mcc_lock);
2754         spin_lock_init(&adapter->mcc_cq_lock);
2755
2756         init_completion(&adapter->flash_compl);
2757         pci_save_state(adapter->pdev);
2758         return 0;
2759
2760 free_mbox:
2761         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2762                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2763
2764 unmap_pci_bars:
2765         be_unmap_pci_bars(adapter);
2766
2767 done:
2768         return status;
2769 }
2770
2771 static void be_stats_cleanup(struct be_adapter *adapter)
2772 {
2773         struct be_dma_mem *cmd = &adapter->stats_cmd;
2774
2775         if (cmd->va)
2776                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2777                                   cmd->va, cmd->dma);
2778 }
2779
2780 static int be_stats_init(struct be_adapter *adapter)
2781 {
2782         struct be_dma_mem *cmd = &adapter->stats_cmd;
2783
2784         cmd->size = sizeof(struct be_cmd_req_get_stats);
2785         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2786                                      GFP_KERNEL);
2787         if (cmd->va == NULL)
2788                 return -1;
2789         memset(cmd->va, 0, cmd->size);
2790         return 0;
2791 }
2792
2793 static void __devexit be_remove(struct pci_dev *pdev)
2794 {
2795         struct be_adapter *adapter = pci_get_drvdata(pdev);
2796
2797         if (!adapter)
2798                 return;
2799
2800         cancel_delayed_work_sync(&adapter->work);
2801
2802         unregister_netdev(adapter->netdev);
2803
2804         be_clear(adapter);
2805
2806         be_stats_cleanup(adapter);
2807
2808         be_ctrl_cleanup(adapter);
2809
2810         be_sriov_disable(adapter);
2811
2812         be_msix_disable(adapter);
2813
2814         pci_set_drvdata(pdev, NULL);
2815         pci_release_regions(pdev);
2816         pci_disable_device(pdev);
2817
2818         free_netdev(adapter->netdev);
2819 }
2820
2821 static int be_get_config(struct be_adapter *adapter)
2822 {
2823         int status;
2824         u8 mac[ETH_ALEN];
2825
2826         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2827         if (status)
2828                 return status;
2829
2830         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2831                         &adapter->function_mode, &adapter->function_caps);
2832         if (status)
2833                 return status;
2834
2835         memset(mac, 0, ETH_ALEN);
2836
2837         if (be_physfn(adapter)) {
2838                 status = be_cmd_mac_addr_query(adapter, mac,
2839                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2840
2841                 if (status)
2842                         return status;
2843
2844                 if (!is_valid_ether_addr(mac))
2845                         return -EADDRNOTAVAIL;
2846
2847                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2848                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2849         }
2850
2851         if (adapter->function_mode & 0x400)
2852                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2853         else
2854                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2855
2856         status = be_cmd_get_cntl_attributes(adapter);
2857         if (status)
2858                 return status;
2859
2860         be_cmd_check_native_mode(adapter);
2861         return 0;
2862 }
2863
2864 static int be_dev_family_check(struct be_adapter *adapter)
2865 {
2866         struct pci_dev *pdev = adapter->pdev;
2867         u32 sli_intf = 0, if_type;
2868
2869         switch (pdev->device) {
2870         case BE_DEVICE_ID1:
2871         case OC_DEVICE_ID1:
2872                 adapter->generation = BE_GEN2;
2873                 break;
2874         case BE_DEVICE_ID2:
2875         case OC_DEVICE_ID2:
2876                 adapter->generation = BE_GEN3;
2877                 break;
2878         case OC_DEVICE_ID3:
2879                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2880                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2881                                                 SLI_INTF_IF_TYPE_SHIFT;
2882
2883                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2884                         if_type != 0x02) {
2885                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2886                         return -EINVAL;
2887                 }
2888                 if (num_vfs > 0) {
2889                         dev_err(&pdev->dev, "VFs not supported\n");
2890                         return -EINVAL;
2891                 }
2892                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2893                                          SLI_INTF_FAMILY_SHIFT);
2894                 adapter->generation = BE_GEN3;
2895                 break;
2896         default:
2897                 adapter->generation = 0;
2898         }
2899         return 0;
2900 }
2901
2902 static int lancer_wait_ready(struct be_adapter *adapter)
2903 {
2904 #define SLIPORT_READY_TIMEOUT 500
2905         u32 sliport_status;
2906         int status = 0, i;
2907
2908         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2909                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2910                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2911                         break;
2912
2913                 msleep(20);
2914         }
2915
2916         if (i == SLIPORT_READY_TIMEOUT)
2917                 status = -1;
2918
2919         return status;
2920 }
2921
2922 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2923 {
2924         int status;
2925         u32 sliport_status, err, reset_needed;
2926         status = lancer_wait_ready(adapter);
2927         if (!status) {
2928                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2929                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2930                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2931                 if (err && reset_needed) {
2932                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2933                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2934
2935                         /* check adapter has corrected the error */
2936                         status = lancer_wait_ready(adapter);
2937                         sliport_status = ioread32(adapter->db +
2938                                                         SLIPORT_STATUS_OFFSET);
2939                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2940                                                 SLIPORT_STATUS_RN_MASK);
2941                         if (status || sliport_status)
2942                                 status = -1;
2943                 } else if (err || reset_needed) {
2944                         status = -1;
2945                 }
2946         }
2947         return status;
2948 }
2949
2950 static int __devinit be_probe(struct pci_dev *pdev,
2951                         const struct pci_device_id *pdev_id)
2952 {
2953         int status = 0;
2954         struct be_adapter *adapter;
2955         struct net_device *netdev;
2956
2957         status = pci_enable_device(pdev);
2958         if (status)
2959                 goto do_none;
2960
2961         status = pci_request_regions(pdev, DRV_NAME);
2962         if (status)
2963                 goto disable_dev;
2964         pci_set_master(pdev);
2965
2966         netdev = alloc_etherdev(sizeof(struct be_adapter));
2967         if (netdev == NULL) {
2968                 status = -ENOMEM;
2969                 goto rel_reg;
2970         }
2971         adapter = netdev_priv(netdev);
2972         adapter->pdev = pdev;
2973         pci_set_drvdata(pdev, adapter);
2974
2975         status = be_dev_family_check(adapter);
2976         if (status)
2977                 goto free_netdev;
2978
2979         adapter->netdev = netdev;
2980         SET_NETDEV_DEV(netdev, &pdev->dev);
2981
2982         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2983         if (!status) {
2984                 netdev->features |= NETIF_F_HIGHDMA;
2985         } else {
2986                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2987                 if (status) {
2988                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2989                         goto free_netdev;
2990                 }
2991         }
2992
2993         be_sriov_enable(adapter);
2994
2995         status = be_ctrl_init(adapter);
2996         if (status)
2997                 goto free_netdev;
2998
2999         if (lancer_chip(adapter)) {
3000                 status = lancer_test_and_set_rdy_state(adapter);
3001                 if (status) {
3002                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3003                         goto free_netdev;
3004                 }
3005         }
3006
3007         /* sync up with fw's ready state */
3008         if (be_physfn(adapter)) {
3009                 status = be_cmd_POST(adapter);
3010                 if (status)
3011                         goto ctrl_clean;
3012         }
3013
3014         /* tell fw we're ready to fire cmds */
3015         status = be_cmd_fw_init(adapter);
3016         if (status)
3017                 goto ctrl_clean;
3018
3019         status = be_cmd_reset_function(adapter);
3020         if (status)
3021                 goto ctrl_clean;
3022
3023         status = be_stats_init(adapter);
3024         if (status)
3025                 goto ctrl_clean;
3026
3027         status = be_get_config(adapter);
3028         if (status)
3029                 goto stats_clean;
3030
3031         be_msix_enable(adapter);
3032
3033         INIT_DELAYED_WORK(&adapter->work, be_worker);
3034
3035         status = be_setup(adapter);
3036         if (status)
3037                 goto msix_disable;
3038
3039         be_netdev_init(netdev);
3040         status = register_netdev(netdev);
3041         if (status != 0)
3042                 goto unsetup;
3043         netif_carrier_off(netdev);
3044
3045         if (be_physfn(adapter) && adapter->sriov_enabled) {
3046                 status = be_vf_eth_addr_config(adapter);
3047                 if (status)
3048                         goto unreg_netdev;
3049         }
3050
3051         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3052         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3053         return 0;
3054
3055 unreg_netdev:
3056         unregister_netdev(netdev);
3057 unsetup:
3058         be_clear(adapter);
3059 msix_disable:
3060         be_msix_disable(adapter);
3061 stats_clean:
3062         be_stats_cleanup(adapter);
3063 ctrl_clean:
3064         be_ctrl_cleanup(adapter);
3065 free_netdev:
3066         be_sriov_disable(adapter);
3067         free_netdev(netdev);
3068         pci_set_drvdata(pdev, NULL);
3069 rel_reg:
3070         pci_release_regions(pdev);
3071 disable_dev:
3072         pci_disable_device(pdev);
3073 do_none:
3074         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3075         return status;
3076 }
3077
3078 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3079 {
3080         struct be_adapter *adapter = pci_get_drvdata(pdev);
3081         struct net_device *netdev =  adapter->netdev;
3082
3083         cancel_delayed_work_sync(&adapter->work);
3084         if (adapter->wol)
3085                 be_setup_wol(adapter, true);
3086
3087         netif_device_detach(netdev);
3088         if (netif_running(netdev)) {
3089                 rtnl_lock();
3090                 be_close(netdev);
3091                 rtnl_unlock();
3092         }
3093         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3094         be_clear(adapter);
3095
3096         be_msix_disable(adapter);
3097         pci_save_state(pdev);
3098         pci_disable_device(pdev);
3099         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3100         return 0;
3101 }
3102
3103 static int be_resume(struct pci_dev *pdev)
3104 {
3105         int status = 0;
3106         struct be_adapter *adapter = pci_get_drvdata(pdev);
3107         struct net_device *netdev =  adapter->netdev;
3108
3109         netif_device_detach(netdev);
3110
3111         status = pci_enable_device(pdev);
3112         if (status)
3113                 return status;
3114
3115         pci_set_power_state(pdev, 0);
3116         pci_restore_state(pdev);
3117
3118         be_msix_enable(adapter);
3119         /* tell fw we're ready to fire cmds */
3120         status = be_cmd_fw_init(adapter);
3121         if (status)
3122                 return status;
3123
3124         be_setup(adapter);
3125         if (netif_running(netdev)) {
3126                 rtnl_lock();
3127                 be_open(netdev);
3128                 rtnl_unlock();
3129         }
3130         netif_device_attach(netdev);
3131
3132         if (adapter->wol)
3133                 be_setup_wol(adapter, false);
3134
3135         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3136         return 0;
3137 }
3138
3139 /*
3140  * An FLR will stop BE from DMAing any data.
3141  */
3142 static void be_shutdown(struct pci_dev *pdev)
3143 {
3144         struct be_adapter *adapter = pci_get_drvdata(pdev);
3145
3146         if (!adapter)
3147                 return;
3148
3149         if (netif_running(adapter->netdev))
3150                 cancel_delayed_work_sync(&adapter->work);
3151
3152         netif_device_detach(adapter->netdev);
3153
3154         be_cmd_reset_function(adapter);
3155
3156         if (adapter->wol)
3157                 be_setup_wol(adapter, true);
3158
3159         pci_disable_device(pdev);
3160 }
3161
3162 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3163                                 pci_channel_state_t state)
3164 {
3165         struct be_adapter *adapter = pci_get_drvdata(pdev);
3166         struct net_device *netdev =  adapter->netdev;
3167
3168         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3169
3170         adapter->eeh_err = true;
3171
3172         netif_device_detach(netdev);
3173
3174         if (netif_running(netdev)) {
3175                 rtnl_lock();
3176                 be_close(netdev);
3177                 rtnl_unlock();
3178         }
3179         be_clear(adapter);
3180
3181         if (state == pci_channel_io_perm_failure)
3182                 return PCI_ERS_RESULT_DISCONNECT;
3183
3184         pci_disable_device(pdev);
3185
3186         return PCI_ERS_RESULT_NEED_RESET;
3187 }
3188
3189 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3190 {
3191         struct be_adapter *adapter = pci_get_drvdata(pdev);
3192         int status;
3193
3194         dev_info(&adapter->pdev->dev, "EEH reset\n");
3195         adapter->eeh_err = false;
3196
3197         status = pci_enable_device(pdev);
3198         if (status)
3199                 return PCI_ERS_RESULT_DISCONNECT;
3200
3201         pci_set_master(pdev);
3202         pci_set_power_state(pdev, 0);
3203         pci_restore_state(pdev);
3204
3205         /* Check if card is ok and fw is ready */
3206         status = be_cmd_POST(adapter);
3207         if (status)
3208                 return PCI_ERS_RESULT_DISCONNECT;
3209
3210         return PCI_ERS_RESULT_RECOVERED;
3211 }
3212
3213 static void be_eeh_resume(struct pci_dev *pdev)
3214 {
3215         int status = 0;
3216         struct be_adapter *adapter = pci_get_drvdata(pdev);
3217         struct net_device *netdev =  adapter->netdev;
3218
3219         dev_info(&adapter->pdev->dev, "EEH resume\n");
3220
3221         pci_save_state(pdev);
3222
3223         /* tell fw we're ready to fire cmds */
3224         status = be_cmd_fw_init(adapter);
3225         if (status)
3226                 goto err;
3227
3228         status = be_setup(adapter);
3229         if (status)
3230                 goto err;
3231
3232         if (netif_running(netdev)) {
3233                 status = be_open(netdev);
3234                 if (status)
3235                         goto err;
3236         }
3237         netif_device_attach(netdev);
3238         return;
3239 err:
3240         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3241 }
3242
3243 static struct pci_error_handlers be_eeh_handlers = {
3244         .error_detected = be_eeh_err_detected,
3245         .slot_reset = be_eeh_reset,
3246         .resume = be_eeh_resume,
3247 };
3248
3249 static struct pci_driver be_driver = {
3250         .name = DRV_NAME,
3251         .id_table = be_dev_ids,
3252         .probe = be_probe,
3253         .remove = be_remove,
3254         .suspend = be_suspend,
3255         .resume = be_resume,
3256         .shutdown = be_shutdown,
3257         .err_handler = &be_eeh_handlers
3258 };
3259
3260 static int __init be_init_module(void)
3261 {
3262         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3263             rx_frag_size != 2048) {
3264                 printk(KERN_WARNING DRV_NAME
3265                         " : Module param rx_frag_size must be 2048/4096/8192."
3266                         " Using 2048\n");
3267                 rx_frag_size = 2048;
3268         }
3269
3270         if (num_vfs > 32) {
3271                 printk(KERN_WARNING DRV_NAME
3272                         " : Module param num_vfs must not be greater than 32."
3273                         "Using 32\n");
3274                 num_vfs = 32;
3275         }
3276
3277         return pci_register_driver(&be_driver);
3278 }
3279 module_init(be_init_module);
3280
3281 static void __exit be_exit_module(void)
3282 {
3283         pci_unregister_driver(&be_driver);
3284 }
3285 module_exit(be_exit_module);