be2net: Use NTWK_RX_FILTER command for promiscous mode
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120 {
121         struct be_dma_mem *mem = &q->dma_mem;
122         if (mem->va)
123                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124                                   mem->dma);
125 }
126
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128                 u16 len, u16 entry_size)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131
132         memset(q, 0, sizeof(*q));
133         q->len = len;
134         q->entry_size = entry_size;
135         mem->size = len * entry_size;
136         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137                                      GFP_KERNEL);
138         if (!mem->va)
139                 return -1;
140         memset(mem->va, 0, mem->size);
141         return 0;
142 }
143
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
145 {
146         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147         u32 reg = ioread32(addr);
148         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150         if (adapter->eeh_err)
151                 return;
152
153         if (!enabled && enable)
154                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else if (enabled && !enable)
156                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else
158                 return;
159
160         iowrite32(reg, addr);
161 }
162
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164 {
165         u32 val = 0;
166         val |= qid & DB_RQ_RING_ID_MASK;
167         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169         wmb();
170         iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 }
172
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 {
175         u32 val = 0;
176         val |= qid & DB_TXULP_RING_ID_MASK;
177         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179         wmb();
180         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 }
182
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184                 bool arm, bool clear_int, u16 num_popped)
185 {
186         u32 val = 0;
187         val |= qid & DB_EQ_RING_ID_MASK;
188         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191         if (adapter->eeh_err)
192                 return;
193
194         if (arm)
195                 val |= 1 << DB_EQ_REARM_SHIFT;
196         if (clear_int)
197                 val |= 1 << DB_EQ_CLR_SHIFT;
198         val |= 1 << DB_EQ_EVNT_SHIFT;
199         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200         iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 }
202
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204 {
205         u32 val = 0;
206         val |= qid & DB_CQ_RING_ID_MASK;
207         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210         if (adapter->eeh_err)
211                 return;
212
213         if (arm)
214                 val |= 1 << DB_CQ_REARM_SHIFT;
215         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216         iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 }
218
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 {
221         struct be_adapter *adapter = netdev_priv(netdev);
222         struct sockaddr *addr = p;
223         int status = 0;
224
225         if (!is_valid_ether_addr(addr->sa_data))
226                 return -EADDRNOTAVAIL;
227
228         /* MAC addr configuration will be done in hardware for VFs
229          * by their corresponding PFs. Just copy to netdev addr here
230          */
231         if (!be_physfn(adapter))
232                 goto netdev_addr;
233
234         status = be_cmd_pmac_del(adapter, adapter->if_handle,
235                                 adapter->pmac_id, 0);
236         if (status)
237                 return status;
238
239         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240                                 adapter->if_handle, &adapter->pmac_id, 0);
241 netdev_addr:
242         if (!status)
243                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245         return status;
246 }
247
248 void netdev_stats_update(struct be_adapter *adapter)
249 {
250         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252         struct be_port_rxf_stats *port_stats =
253                         &rxf_stats->port[adapter->port_num];
254         struct net_device_stats *dev_stats = &adapter->netdev->stats;
255         struct be_erx_stats *erx_stats = &hw_stats->erx;
256         struct be_rx_obj *rxo;
257         int i;
258
259         memset(dev_stats, 0, sizeof(*dev_stats));
260         for_all_rx_queues(adapter, rxo, i) {
261                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264                 /*  no space in linux buffers: best possible approximation */
265                 dev_stats->rx_dropped +=
266                         erx_stats->rx_drops_no_fragments[rxo->q.id];
267         }
268
269         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
271
272         /* bad pkts received */
273         dev_stats->rx_errors = port_stats->rx_crc_errors +
274                 port_stats->rx_alignment_symbol_errors +
275                 port_stats->rx_in_range_errors +
276                 port_stats->rx_out_range_errors +
277                 port_stats->rx_frame_too_long +
278                 port_stats->rx_dropped_too_small +
279                 port_stats->rx_dropped_too_short +
280                 port_stats->rx_dropped_header_too_small +
281                 port_stats->rx_dropped_tcp_length +
282                 port_stats->rx_dropped_runt +
283                 port_stats->rx_tcp_checksum_errs +
284                 port_stats->rx_ip_checksum_errs +
285                 port_stats->rx_udp_checksum_errs;
286
287         /* detailed rx errors */
288         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
289                 port_stats->rx_out_range_errors +
290                 port_stats->rx_frame_too_long;
291
292         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294         /* frame alignment errors */
295         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
296
297         /* receiver fifo overrun */
298         /* drops_no_pbuf is no per i/f, it's per BE card */
299         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300                                         port_stats->rx_input_fifo_overflow +
301                                         rxf_stats->rx_drops_no_pbuf;
302 }
303
304 void be_link_status_update(struct be_adapter *adapter, bool link_up)
305 {
306         struct net_device *netdev = adapter->netdev;
307
308         /* If link came up or went down */
309         if (adapter->link_up != link_up) {
310                 adapter->link_speed = -1;
311                 if (link_up) {
312                         netif_carrier_on(netdev);
313                         printk(KERN_INFO "%s: Link up\n", netdev->name);
314                 } else {
315                         netif_carrier_off(netdev);
316                         printk(KERN_INFO "%s: Link down\n", netdev->name);
317                 }
318                 adapter->link_up = link_up;
319         }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325         struct be_eq_obj *rx_eq = &rxo->rx_eq;
326         struct be_rx_stats *stats = &rxo->stats;
327         ulong now = jiffies;
328         u32 eqd;
329
330         if (!rx_eq->enable_aic)
331                 return;
332
333         /* Wrapped around */
334         if (time_before(now, stats->rx_fps_jiffies)) {
335                 stats->rx_fps_jiffies = now;
336                 return;
337         }
338
339         /* Update once a second */
340         if ((now - stats->rx_fps_jiffies) < HZ)
341                 return;
342
343         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344                         ((now - stats->rx_fps_jiffies) / HZ);
345
346         stats->rx_fps_jiffies = now;
347         stats->prev_rx_frags = stats->rx_frags;
348         eqd = stats->rx_fps / 110000;
349         eqd = eqd << 3;
350         if (eqd > rx_eq->max_eqd)
351                 eqd = rx_eq->max_eqd;
352         if (eqd < rx_eq->min_eqd)
353                 eqd = rx_eq->min_eqd;
354         if (eqd < 10)
355                 eqd = 0;
356         if (eqd != rx_eq->cur_eqd)
357                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359         rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364         u64 rate = bytes;
365
366         do_div(rate, ticks / HZ);
367         rate <<= 3;                     /* bytes/sec -> bits/sec */
368         do_div(rate, 1000000ul);        /* MB/Sec */
369
370         return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375         struct be_tx_stats *stats = tx_stats(adapter);
376         ulong now = jiffies;
377
378         /* Wrapped around? */
379         if (time_before(now, stats->be_tx_jiffies)) {
380                 stats->be_tx_jiffies = now;
381                 return;
382         }
383
384         /* Update tx rate once in two seconds */
385         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387                                                   - stats->be_tx_bytes_prev,
388                                                  now - stats->be_tx_jiffies);
389                 stats->be_tx_jiffies = now;
390                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391         }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397         struct be_tx_stats *stats = tx_stats(adapter);
398         stats->be_tx_reqs++;
399         stats->be_tx_wrbs += wrb_cnt;
400         stats->be_tx_bytes += copied;
401         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402         if (stopped)
403                 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408                                                                 bool *dummy)
409 {
410         int cnt = (skb->len > skb->data_len);
411
412         cnt += skb_shinfo(skb)->nr_frags;
413
414         /* to account for hdr wrb */
415         cnt++;
416         if (lancer_chip(adapter) || !(cnt & 1)) {
417                 *dummy = false;
418         } else {
419                 /* add a dummy to make it an even num */
420                 cnt++;
421                 *dummy = true;
422         }
423         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424         return cnt;
425 }
426
427 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428 {
429         wrb->frag_pa_hi = upper_32_bits(addr);
430         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432 }
433
434 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
436 {
437         u8 vlan_prio = 0;
438         u16 vlan_tag = 0;
439
440         memset(hdr, 0, sizeof(*hdr));
441
442         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
444         if (skb_is_gso(skb)) {
445                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447                         hdr, skb_shinfo(skb)->gso_size);
448                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
449                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
450                 if (lancer_chip(adapter) && adapter->sli_family  ==
451                                                         LANCER_A0_SLI_FAMILY) {
452                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453                         if (is_tcp_pkt(skb))
454                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455                                                                 tcpcs, hdr, 1);
456                         else if (is_udp_pkt(skb))
457                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458                                                                 udpcs, hdr, 1);
459                 }
460         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461                 if (is_tcp_pkt(skb))
462                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463                 else if (is_udp_pkt(skb))
464                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465         }
466
467         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
468                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
469                 vlan_tag = vlan_tx_tag_get(skb);
470                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471                 /* If vlan priority provided by OS is NOT in available bmap */
472                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474                                         adapter->recommended_prio;
475                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
476         }
477
478         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482 }
483
484 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
485                 bool unmap_single)
486 {
487         dma_addr_t dma;
488
489         be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
492         if (wrb->frag_len) {
493                 if (unmap_single)
494                         dma_unmap_single(dev, dma, wrb->frag_len,
495                                          DMA_TO_DEVICE);
496                 else
497                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
498         }
499 }
500
501 static int make_tx_wrbs(struct be_adapter *adapter,
502                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503 {
504         dma_addr_t busaddr;
505         int i, copied = 0;
506         struct device *dev = &adapter->pdev->dev;
507         struct sk_buff *first_skb = skb;
508         struct be_queue_info *txq = &adapter->tx_obj.q;
509         struct be_eth_wrb *wrb;
510         struct be_eth_hdr_wrb *hdr;
511         bool map_single = false;
512         u16 map_head;
513
514         hdr = queue_head_node(txq);
515         queue_head_inc(txq);
516         map_head = txq->head;
517
518         if (skb->len > skb->data_len) {
519                 int len = skb_headlen(skb);
520                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521                 if (dma_mapping_error(dev, busaddr))
522                         goto dma_err;
523                 map_single = true;
524                 wrb = queue_head_node(txq);
525                 wrb_fill(wrb, busaddr, len);
526                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527                 queue_head_inc(txq);
528                 copied += len;
529         }
530
531         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532                 struct skb_frag_struct *frag =
533                         &skb_shinfo(skb)->frags[i];
534                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535                                        frag->size, DMA_TO_DEVICE);
536                 if (dma_mapping_error(dev, busaddr))
537                         goto dma_err;
538                 wrb = queue_head_node(txq);
539                 wrb_fill(wrb, busaddr, frag->size);
540                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541                 queue_head_inc(txq);
542                 copied += frag->size;
543         }
544
545         if (dummy_wrb) {
546                 wrb = queue_head_node(txq);
547                 wrb_fill(wrb, 0, 0);
548                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549                 queue_head_inc(txq);
550         }
551
552         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
553         be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555         return copied;
556 dma_err:
557         txq->head = map_head;
558         while (copied) {
559                 wrb = queue_head_node(txq);
560                 unmap_tx_frag(dev, wrb, map_single);
561                 map_single = false;
562                 copied -= wrb->frag_len;
563                 queue_head_inc(txq);
564         }
565         return 0;
566 }
567
568 static netdev_tx_t be_xmit(struct sk_buff *skb,
569                         struct net_device *netdev)
570 {
571         struct be_adapter *adapter = netdev_priv(netdev);
572         struct be_tx_obj *tx_obj = &adapter->tx_obj;
573         struct be_queue_info *txq = &tx_obj->q;
574         u32 wrb_cnt = 0, copied = 0;
575         u32 start = txq->head;
576         bool dummy_wrb, stopped = false;
577
578         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
579
580         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
581         if (copied) {
582                 /* record the sent skb in the sent_skb table */
583                 BUG_ON(tx_obj->sent_skb_list[start]);
584                 tx_obj->sent_skb_list[start] = skb;
585
586                 /* Ensure txq has space for the next skb; Else stop the queue
587                  * *BEFORE* ringing the tx doorbell, so that we serialze the
588                  * tx compls of the current transmit which'll wake up the queue
589                  */
590                 atomic_add(wrb_cnt, &txq->used);
591                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592                                                                 txq->len) {
593                         netif_stop_queue(netdev);
594                         stopped = true;
595                 }
596
597                 be_txq_notify(adapter, txq->id, wrb_cnt);
598
599                 be_tx_stats_update(adapter, wrb_cnt, copied,
600                                 skb_shinfo(skb)->gso_segs, stopped);
601         } else {
602                 txq->head = start;
603                 dev_kfree_skb_any(skb);
604         }
605         return NETDEV_TX_OK;
606 }
607
608 static int be_change_mtu(struct net_device *netdev, int new_mtu)
609 {
610         struct be_adapter *adapter = netdev_priv(netdev);
611         if (new_mtu < BE_MIN_MTU ||
612                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613                                         (ETH_HLEN + ETH_FCS_LEN))) {
614                 dev_info(&adapter->pdev->dev,
615                         "MTU must be between %d and %d bytes\n",
616                         BE_MIN_MTU,
617                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
618                 return -EINVAL;
619         }
620         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621                         netdev->mtu, new_mtu);
622         netdev->mtu = new_mtu;
623         return 0;
624 }
625
626 /*
627  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628  * If the user configures more, place BE in vlan promiscuous mode.
629  */
630 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
631 {
632         u16 vtag[BE_NUM_VLANS_SUPPORTED];
633         u16 ntags = 0, i;
634         int status = 0;
635         u32 if_handle;
636
637         if (vf) {
638                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641         }
642
643         if (adapter->vlans_added <= adapter->max_vlans)  {
644                 /* Construct VLAN Table to give to HW */
645                 for (i = 0; i < VLAN_N_VID; i++) {
646                         if (adapter->vlan_tag[i]) {
647                                 vtag[ntags] = cpu_to_le16(i);
648                                 ntags++;
649                         }
650                 }
651                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652                                         vtag, ntags, 1, 0);
653         } else {
654                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655                                         NULL, 0, 1, 1);
656         }
657
658         return status;
659 }
660
661 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662 {
663         struct be_adapter *adapter = netdev_priv(netdev);
664
665         adapter->vlan_grp = grp;
666 }
667
668 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669 {
670         struct be_adapter *adapter = netdev_priv(netdev);
671
672         adapter->vlans_added++;
673         if (!be_physfn(adapter))
674                 return;
675
676         adapter->vlan_tag[vid] = 1;
677         if (adapter->vlans_added <= (adapter->max_vlans + 1))
678                 be_vid_config(adapter, false, 0);
679 }
680
681 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682 {
683         struct be_adapter *adapter = netdev_priv(netdev);
684
685         adapter->vlans_added--;
686         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
688         if (!be_physfn(adapter))
689                 return;
690
691         adapter->vlan_tag[vid] = 0;
692         if (adapter->vlans_added <= adapter->max_vlans)
693                 be_vid_config(adapter, false, 0);
694 }
695
696 static void be_set_multicast_list(struct net_device *netdev)
697 {
698         struct be_adapter *adapter = netdev_priv(netdev);
699
700         if (netdev->flags & IFF_PROMISC) {
701                 be_cmd_promiscuous_config(adapter, true);
702                 adapter->promiscuous = true;
703                 goto done;
704         }
705
706         /* BE was previously in promiscuous mode; disable it */
707         if (adapter->promiscuous) {
708                 adapter->promiscuous = false;
709                 be_cmd_promiscuous_config(adapter, false);
710         }
711
712         /* Enable multicast promisc if num configured exceeds what we support */
713         if (netdev->flags & IFF_ALLMULTI ||
714             netdev_mc_count(netdev) > BE_MAX_MC) {
715                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
716                                 &adapter->mc_cmd_mem);
717                 goto done;
718         }
719
720         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
721                 &adapter->mc_cmd_mem);
722 done:
723         return;
724 }
725
726 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727 {
728         struct be_adapter *adapter = netdev_priv(netdev);
729         int status;
730
731         if (!adapter->sriov_enabled)
732                 return -EPERM;
733
734         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735                 return -EINVAL;
736
737         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738                 status = be_cmd_pmac_del(adapter,
739                                         adapter->vf_cfg[vf].vf_if_handle,
740                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
741
742         status = be_cmd_pmac_add(adapter, mac,
743                                 adapter->vf_cfg[vf].vf_if_handle,
744                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
745
746         if (status)
747                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748                                 mac, vf);
749         else
750                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
752         return status;
753 }
754
755 static int be_get_vf_config(struct net_device *netdev, int vf,
756                         struct ifla_vf_info *vi)
757 {
758         struct be_adapter *adapter = netdev_priv(netdev);
759
760         if (!adapter->sriov_enabled)
761                 return -EPERM;
762
763         if (vf >= num_vfs)
764                 return -EINVAL;
765
766         vi->vf = vf;
767         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
768         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
769         vi->qos = 0;
770         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772         return 0;
773 }
774
775 static int be_set_vf_vlan(struct net_device *netdev,
776                         int vf, u16 vlan, u8 qos)
777 {
778         struct be_adapter *adapter = netdev_priv(netdev);
779         int status = 0;
780
781         if (!adapter->sriov_enabled)
782                 return -EPERM;
783
784         if ((vf >= num_vfs) || (vlan > 4095))
785                 return -EINVAL;
786
787         if (vlan) {
788                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789                 adapter->vlans_added++;
790         } else {
791                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792                 adapter->vlans_added--;
793         }
794
795         status = be_vid_config(adapter, true, vf);
796
797         if (status)
798                 dev_info(&adapter->pdev->dev,
799                                 "VLAN %d config on VF %d failed\n", vlan, vf);
800         return status;
801 }
802
803 static int be_set_vf_tx_rate(struct net_device *netdev,
804                         int vf, int rate)
805 {
806         struct be_adapter *adapter = netdev_priv(netdev);
807         int status = 0;
808
809         if (!adapter->sriov_enabled)
810                 return -EPERM;
811
812         if ((vf >= num_vfs) || (rate < 0))
813                 return -EINVAL;
814
815         if (rate > 10000)
816                 rate = 10000;
817
818         adapter->vf_cfg[vf].vf_tx_rate = rate;
819         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
820
821         if (status)
822                 dev_info(&adapter->pdev->dev,
823                                 "tx rate %d on VF %d failed\n", rate, vf);
824         return status;
825 }
826
827 static void be_rx_rate_update(struct be_rx_obj *rxo)
828 {
829         struct be_rx_stats *stats = &rxo->stats;
830         ulong now = jiffies;
831
832         /* Wrapped around */
833         if (time_before(now, stats->rx_jiffies)) {
834                 stats->rx_jiffies = now;
835                 return;
836         }
837
838         /* Update the rate once in two seconds */
839         if ((now - stats->rx_jiffies) < 2 * HZ)
840                 return;
841
842         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843                                 now - stats->rx_jiffies);
844         stats->rx_jiffies = now;
845         stats->rx_bytes_prev = stats->rx_bytes;
846 }
847
848 static void be_rx_stats_update(struct be_rx_obj *rxo,
849                 struct be_rx_compl_info *rxcp)
850 {
851         struct be_rx_stats *stats = &rxo->stats;
852
853         stats->rx_compl++;
854         stats->rx_frags += rxcp->num_rcvd;
855         stats->rx_bytes += rxcp->pkt_size;
856         stats->rx_pkts++;
857         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
858                 stats->rx_mcast_pkts++;
859         if (rxcp->err)
860                 stats->rxcp_err++;
861 }
862
863 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
864 {
865         /* L4 checksum is not reliable for non TCP/UDP packets.
866          * Also ignore ipcksm for ipv6 pkts */
867         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868                                 (rxcp->ip_csum || rxcp->ipv6);
869 }
870
871 static struct be_rx_page_info *
872 get_rx_page_info(struct be_adapter *adapter,
873                 struct be_rx_obj *rxo,
874                 u16 frag_idx)
875 {
876         struct be_rx_page_info *rx_page_info;
877         struct be_queue_info *rxq = &rxo->q;
878
879         rx_page_info = &rxo->page_info_tbl[frag_idx];
880         BUG_ON(!rx_page_info->page);
881
882         if (rx_page_info->last_page_user) {
883                 dma_unmap_page(&adapter->pdev->dev,
884                                dma_unmap_addr(rx_page_info, bus),
885                                adapter->big_page_size, DMA_FROM_DEVICE);
886                 rx_page_info->last_page_user = false;
887         }
888
889         atomic_dec(&rxq->used);
890         return rx_page_info;
891 }
892
893 /* Throwaway the data in the Rx completion */
894 static void be_rx_compl_discard(struct be_adapter *adapter,
895                 struct be_rx_obj *rxo,
896                 struct be_rx_compl_info *rxcp)
897 {
898         struct be_queue_info *rxq = &rxo->q;
899         struct be_rx_page_info *page_info;
900         u16 i, num_rcvd = rxcp->num_rcvd;
901
902         for (i = 0; i < num_rcvd; i++) {
903                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
904                 put_page(page_info->page);
905                 memset(page_info, 0, sizeof(*page_info));
906                 index_inc(&rxcp->rxq_idx, rxq->len);
907         }
908 }
909
910 /*
911  * skb_fill_rx_data forms a complete skb for an ether frame
912  * indicated by rxcp.
913  */
914 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
915                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
916 {
917         struct be_queue_info *rxq = &rxo->q;
918         struct be_rx_page_info *page_info;
919         u16 i, j;
920         u16 hdr_len, curr_frag_len, remaining;
921         u8 *start;
922
923         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
924         start = page_address(page_info->page) + page_info->page_offset;
925         prefetch(start);
926
927         /* Copy data in the first descriptor of this completion */
928         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
929
930         /* Copy the header portion into skb_data */
931         hdr_len = min(BE_HDR_LEN, curr_frag_len);
932         memcpy(skb->data, start, hdr_len);
933         skb->len = curr_frag_len;
934         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935                 /* Complete packet has now been moved to data */
936                 put_page(page_info->page);
937                 skb->data_len = 0;
938                 skb->tail += curr_frag_len;
939         } else {
940                 skb_shinfo(skb)->nr_frags = 1;
941                 skb_shinfo(skb)->frags[0].page = page_info->page;
942                 skb_shinfo(skb)->frags[0].page_offset =
943                                         page_info->page_offset + hdr_len;
944                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945                 skb->data_len = curr_frag_len - hdr_len;
946                 skb->tail += hdr_len;
947         }
948         page_info->page = NULL;
949
950         if (rxcp->pkt_size <= rx_frag_size) {
951                 BUG_ON(rxcp->num_rcvd != 1);
952                 return;
953         }
954
955         /* More frags present for this completion */
956         index_inc(&rxcp->rxq_idx, rxq->len);
957         remaining = rxcp->pkt_size - curr_frag_len;
958         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960                 curr_frag_len = min(remaining, rx_frag_size);
961
962                 /* Coalesce all frags from the same physical page in one slot */
963                 if (page_info->page_offset == 0) {
964                         /* Fresh page */
965                         j++;
966                         skb_shinfo(skb)->frags[j].page = page_info->page;
967                         skb_shinfo(skb)->frags[j].page_offset =
968                                                         page_info->page_offset;
969                         skb_shinfo(skb)->frags[j].size = 0;
970                         skb_shinfo(skb)->nr_frags++;
971                 } else {
972                         put_page(page_info->page);
973                 }
974
975                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976                 skb->len += curr_frag_len;
977                 skb->data_len += curr_frag_len;
978
979                 remaining -= curr_frag_len;
980                 index_inc(&rxcp->rxq_idx, rxq->len);
981                 page_info->page = NULL;
982         }
983         BUG_ON(j > MAX_SKB_FRAGS);
984 }
985
986 /* Process the RX completion indicated by rxcp when GRO is disabled */
987 static void be_rx_compl_process(struct be_adapter *adapter,
988                         struct be_rx_obj *rxo,
989                         struct be_rx_compl_info *rxcp)
990 {
991         struct net_device *netdev = adapter->netdev;
992         struct sk_buff *skb;
993
994         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
995         if (unlikely(!skb)) {
996                 if (net_ratelimit())
997                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
998                 be_rx_compl_discard(adapter, rxo, rxcp);
999                 return;
1000         }
1001
1002         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1003
1004         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1005                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1006         else
1007                 skb_checksum_none_assert(skb);
1008
1009         skb->truesize = skb->len + sizeof(struct sk_buff);
1010         skb->protocol = eth_type_trans(skb, netdev);
1011         if (adapter->netdev->features & NETIF_F_RXHASH)
1012                 skb->rxhash = rxcp->rss_hash;
1013
1014
1015         if (unlikely(rxcp->vlanf)) {
1016                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1017                         kfree_skb(skb);
1018                         return;
1019                 }
1020                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1021                                         rxcp->vlan_tag);
1022         } else {
1023                 netif_receive_skb(skb);
1024         }
1025 }
1026
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029                 struct be_rx_obj *rxo,
1030                 struct be_rx_compl_info *rxcp)
1031 {
1032         struct be_rx_page_info *page_info;
1033         struct sk_buff *skb = NULL;
1034         struct be_queue_info *rxq = &rxo->q;
1035         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1036         u16 remaining, curr_frag_len;
1037         u16 i, j;
1038
1039         skb = napi_get_frags(&eq_obj->napi);
1040         if (!skb) {
1041                 be_rx_compl_discard(adapter, rxo, rxcp);
1042                 return;
1043         }
1044
1045         remaining = rxcp->pkt_size;
1046         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049                 curr_frag_len = min(remaining, rx_frag_size);
1050
1051                 /* Coalesce all frags from the same physical page in one slot */
1052                 if (i == 0 || page_info->page_offset == 0) {
1053                         /* First frag or Fresh page */
1054                         j++;
1055                         skb_shinfo(skb)->frags[j].page = page_info->page;
1056                         skb_shinfo(skb)->frags[j].page_offset =
1057                                                         page_info->page_offset;
1058                         skb_shinfo(skb)->frags[j].size = 0;
1059                 } else {
1060                         put_page(page_info->page);
1061                 }
1062                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064                 remaining -= curr_frag_len;
1065                 index_inc(&rxcp->rxq_idx, rxq->len);
1066                 memset(page_info, 0, sizeof(*page_info));
1067         }
1068         BUG_ON(j > MAX_SKB_FRAGS);
1069
1070         skb_shinfo(skb)->nr_frags = j + 1;
1071         skb->len = rxcp->pkt_size;
1072         skb->data_len = rxcp->pkt_size;
1073         skb->truesize += rxcp->pkt_size;
1074         skb->ip_summed = CHECKSUM_UNNECESSARY;
1075         if (adapter->netdev->features & NETIF_F_RXHASH)
1076                 skb->rxhash = rxcp->rss_hash;
1077
1078         if (likely(!rxcp->vlanf))
1079                 napi_gro_frags(&eq_obj->napi);
1080         else
1081                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1082                                 rxcp->vlan_tag);
1083 }
1084
1085 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1086                                 struct be_eth_rx_compl *compl,
1087                                 struct be_rx_compl_info *rxcp)
1088 {
1089         rxcp->pkt_size =
1090                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1091         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1092         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1093         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1094         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1095         rxcp->ip_csum =
1096                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1097         rxcp->l4_csum =
1098                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1099         rxcp->ipv6 =
1100                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1101         rxcp->rxq_idx =
1102                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1103         rxcp->num_rcvd =
1104                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1105         rxcp->pkt_type =
1106                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1107         rxcp->rss_hash =
1108                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1109         if (rxcp->vlanf) {
1110                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1111                                           compl);
1112                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1113                                                compl);
1114         }
1115 }
1116
1117 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1118                                 struct be_eth_rx_compl *compl,
1119                                 struct be_rx_compl_info *rxcp)
1120 {
1121         rxcp->pkt_size =
1122                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1123         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1124         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1125         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1126         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1127         rxcp->ip_csum =
1128                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1129         rxcp->l4_csum =
1130                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1131         rxcp->ipv6 =
1132                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1133         rxcp->rxq_idx =
1134                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1135         rxcp->num_rcvd =
1136                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1137         rxcp->pkt_type =
1138                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1139         rxcp->rss_hash =
1140                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1141         if (rxcp->vlanf) {
1142                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1143                                           compl);
1144                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1145                                                compl);
1146         }
1147 }
1148
1149 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1150 {
1151         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1152         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1153         struct be_adapter *adapter = rxo->adapter;
1154
1155         /* For checking the valid bit it is Ok to use either definition as the
1156          * valid bit is at the same position in both v0 and v1 Rx compl */
1157         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1158                 return NULL;
1159
1160         rmb();
1161         be_dws_le_to_cpu(compl, sizeof(*compl));
1162
1163         if (adapter->be3_native)
1164                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1165         else
1166                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1167
1168         if (rxcp->vlanf) {
1169                 /* vlanf could be wrongly set in some cards.
1170                  * ignore if vtm is not set */
1171                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1172                         rxcp->vlanf = 0;
1173
1174                 if (!lancer_chip(adapter))
1175                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1176
1177                 if (((adapter->pvid & VLAN_VID_MASK) ==
1178                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1179                     !adapter->vlan_tag[rxcp->vlan_tag])
1180                         rxcp->vlanf = 0;
1181         }
1182
1183         /* As the compl has been parsed, reset it; we wont touch it again */
1184         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1185
1186         queue_tail_inc(&rxo->cq);
1187         return rxcp;
1188 }
1189
1190 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1191 {
1192         u32 order = get_order(size);
1193
1194         if (order > 0)
1195                 gfp |= __GFP_COMP;
1196         return  alloc_pages(gfp, order);
1197 }
1198
1199 /*
1200  * Allocate a page, split it to fragments of size rx_frag_size and post as
1201  * receive buffers to BE
1202  */
1203 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1204 {
1205         struct be_adapter *adapter = rxo->adapter;
1206         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1207         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1208         struct be_queue_info *rxq = &rxo->q;
1209         struct page *pagep = NULL;
1210         struct be_eth_rx_d *rxd;
1211         u64 page_dmaaddr = 0, frag_dmaaddr;
1212         u32 posted, page_offset = 0;
1213
1214         page_info = &rxo->page_info_tbl[rxq->head];
1215         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1216                 if (!pagep) {
1217                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1218                         if (unlikely(!pagep)) {
1219                                 rxo->stats.rx_post_fail++;
1220                                 break;
1221                         }
1222                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1223                                                     0, adapter->big_page_size,
1224                                                     DMA_FROM_DEVICE);
1225                         page_info->page_offset = 0;
1226                 } else {
1227                         get_page(pagep);
1228                         page_info->page_offset = page_offset + rx_frag_size;
1229                 }
1230                 page_offset = page_info->page_offset;
1231                 page_info->page = pagep;
1232                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1233                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1234
1235                 rxd = queue_head_node(rxq);
1236                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1237                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1238
1239                 /* Any space left in the current big page for another frag? */
1240                 if ((page_offset + rx_frag_size + rx_frag_size) >
1241                                         adapter->big_page_size) {
1242                         pagep = NULL;
1243                         page_info->last_page_user = true;
1244                 }
1245
1246                 prev_page_info = page_info;
1247                 queue_head_inc(rxq);
1248                 page_info = &page_info_tbl[rxq->head];
1249         }
1250         if (pagep)
1251                 prev_page_info->last_page_user = true;
1252
1253         if (posted) {
1254                 atomic_add(posted, &rxq->used);
1255                 be_rxq_notify(adapter, rxq->id, posted);
1256         } else if (atomic_read(&rxq->used) == 0) {
1257                 /* Let be_worker replenish when memory is available */
1258                 rxo->rx_post_starved = true;
1259         }
1260 }
1261
1262 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1263 {
1264         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1265
1266         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1267                 return NULL;
1268
1269         rmb();
1270         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1271
1272         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1273
1274         queue_tail_inc(tx_cq);
1275         return txcp;
1276 }
1277
1278 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1279 {
1280         struct be_queue_info *txq = &adapter->tx_obj.q;
1281         struct be_eth_wrb *wrb;
1282         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1283         struct sk_buff *sent_skb;
1284         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1285         bool unmap_skb_hdr = true;
1286
1287         sent_skb = sent_skbs[txq->tail];
1288         BUG_ON(!sent_skb);
1289         sent_skbs[txq->tail] = NULL;
1290
1291         /* skip header wrb */
1292         queue_tail_inc(txq);
1293
1294         do {
1295                 cur_index = txq->tail;
1296                 wrb = queue_tail_node(txq);
1297                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1298                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1299                 unmap_skb_hdr = false;
1300
1301                 num_wrbs++;
1302                 queue_tail_inc(txq);
1303         } while (cur_index != last_index);
1304
1305         atomic_sub(num_wrbs, &txq->used);
1306
1307         kfree_skb(sent_skb);
1308 }
1309
1310 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1311 {
1312         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1313
1314         if (!eqe->evt)
1315                 return NULL;
1316
1317         rmb();
1318         eqe->evt = le32_to_cpu(eqe->evt);
1319         queue_tail_inc(&eq_obj->q);
1320         return eqe;
1321 }
1322
1323 static int event_handle(struct be_adapter *adapter,
1324                         struct be_eq_obj *eq_obj)
1325 {
1326         struct be_eq_entry *eqe;
1327         u16 num = 0;
1328
1329         while ((eqe = event_get(eq_obj)) != NULL) {
1330                 eqe->evt = 0;
1331                 num++;
1332         }
1333
1334         /* Deal with any spurious interrupts that come
1335          * without events
1336          */
1337         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1338         if (num)
1339                 napi_schedule(&eq_obj->napi);
1340
1341         return num;
1342 }
1343
1344 /* Just read and notify events without processing them.
1345  * Used at the time of destroying event queues */
1346 static void be_eq_clean(struct be_adapter *adapter,
1347                         struct be_eq_obj *eq_obj)
1348 {
1349         struct be_eq_entry *eqe;
1350         u16 num = 0;
1351
1352         while ((eqe = event_get(eq_obj)) != NULL) {
1353                 eqe->evt = 0;
1354                 num++;
1355         }
1356
1357         if (num)
1358                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1359 }
1360
1361 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1362 {
1363         struct be_rx_page_info *page_info;
1364         struct be_queue_info *rxq = &rxo->q;
1365         struct be_queue_info *rx_cq = &rxo->cq;
1366         struct be_rx_compl_info *rxcp;
1367         u16 tail;
1368
1369         /* First cleanup pending rx completions */
1370         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1371                 be_rx_compl_discard(adapter, rxo, rxcp);
1372                 be_cq_notify(adapter, rx_cq->id, false, 1);
1373         }
1374
1375         /* Then free posted rx buffer that were not used */
1376         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1377         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1378                 page_info = get_rx_page_info(adapter, rxo, tail);
1379                 put_page(page_info->page);
1380                 memset(page_info, 0, sizeof(*page_info));
1381         }
1382         BUG_ON(atomic_read(&rxq->used));
1383 }
1384
1385 static void be_tx_compl_clean(struct be_adapter *adapter)
1386 {
1387         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1388         struct be_queue_info *txq = &adapter->tx_obj.q;
1389         struct be_eth_tx_compl *txcp;
1390         u16 end_idx, cmpl = 0, timeo = 0;
1391         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1392         struct sk_buff *sent_skb;
1393         bool dummy_wrb;
1394
1395         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1396         do {
1397                 while ((txcp = be_tx_compl_get(tx_cq))) {
1398                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1399                                         wrb_index, txcp);
1400                         be_tx_compl_process(adapter, end_idx);
1401                         cmpl++;
1402                 }
1403                 if (cmpl) {
1404                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1405                         cmpl = 0;
1406                 }
1407
1408                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1409                         break;
1410
1411                 mdelay(1);
1412         } while (true);
1413
1414         if (atomic_read(&txq->used))
1415                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1416                         atomic_read(&txq->used));
1417
1418         /* free posted tx for which compls will never arrive */
1419         while (atomic_read(&txq->used)) {
1420                 sent_skb = sent_skbs[txq->tail];
1421                 end_idx = txq->tail;
1422                 index_adv(&end_idx,
1423                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1424                         txq->len);
1425                 be_tx_compl_process(adapter, end_idx);
1426         }
1427 }
1428
1429 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1430 {
1431         struct be_queue_info *q;
1432
1433         q = &adapter->mcc_obj.q;
1434         if (q->created)
1435                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1436         be_queue_free(adapter, q);
1437
1438         q = &adapter->mcc_obj.cq;
1439         if (q->created)
1440                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1441         be_queue_free(adapter, q);
1442 }
1443
1444 /* Must be called only after TX qs are created as MCC shares TX EQ */
1445 static int be_mcc_queues_create(struct be_adapter *adapter)
1446 {
1447         struct be_queue_info *q, *cq;
1448
1449         /* Alloc MCC compl queue */
1450         cq = &adapter->mcc_obj.cq;
1451         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1452                         sizeof(struct be_mcc_compl)))
1453                 goto err;
1454
1455         /* Ask BE to create MCC compl queue; share TX's eq */
1456         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1457                 goto mcc_cq_free;
1458
1459         /* Alloc MCC queue */
1460         q = &adapter->mcc_obj.q;
1461         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1462                 goto mcc_cq_destroy;
1463
1464         /* Ask BE to create MCC queue */
1465         if (be_cmd_mccq_create(adapter, q, cq))
1466                 goto mcc_q_free;
1467
1468         return 0;
1469
1470 mcc_q_free:
1471         be_queue_free(adapter, q);
1472 mcc_cq_destroy:
1473         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1474 mcc_cq_free:
1475         be_queue_free(adapter, cq);
1476 err:
1477         return -1;
1478 }
1479
1480 static void be_tx_queues_destroy(struct be_adapter *adapter)
1481 {
1482         struct be_queue_info *q;
1483
1484         q = &adapter->tx_obj.q;
1485         if (q->created)
1486                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1487         be_queue_free(adapter, q);
1488
1489         q = &adapter->tx_obj.cq;
1490         if (q->created)
1491                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1492         be_queue_free(adapter, q);
1493
1494         /* Clear any residual events */
1495         be_eq_clean(adapter, &adapter->tx_eq);
1496
1497         q = &adapter->tx_eq.q;
1498         if (q->created)
1499                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1500         be_queue_free(adapter, q);
1501 }
1502
1503 static int be_tx_queues_create(struct be_adapter *adapter)
1504 {
1505         struct be_queue_info *eq, *q, *cq;
1506
1507         adapter->tx_eq.max_eqd = 0;
1508         adapter->tx_eq.min_eqd = 0;
1509         adapter->tx_eq.cur_eqd = 96;
1510         adapter->tx_eq.enable_aic = false;
1511         /* Alloc Tx Event queue */
1512         eq = &adapter->tx_eq.q;
1513         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1514                 return -1;
1515
1516         /* Ask BE to create Tx Event queue */
1517         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1518                 goto tx_eq_free;
1519
1520         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1521
1522
1523         /* Alloc TX eth compl queue */
1524         cq = &adapter->tx_obj.cq;
1525         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1526                         sizeof(struct be_eth_tx_compl)))
1527                 goto tx_eq_destroy;
1528
1529         /* Ask BE to create Tx eth compl queue */
1530         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1531                 goto tx_cq_free;
1532
1533         /* Alloc TX eth queue */
1534         q = &adapter->tx_obj.q;
1535         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1536                 goto tx_cq_destroy;
1537
1538         /* Ask BE to create Tx eth queue */
1539         if (be_cmd_txq_create(adapter, q, cq))
1540                 goto tx_q_free;
1541         return 0;
1542
1543 tx_q_free:
1544         be_queue_free(adapter, q);
1545 tx_cq_destroy:
1546         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1547 tx_cq_free:
1548         be_queue_free(adapter, cq);
1549 tx_eq_destroy:
1550         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1551 tx_eq_free:
1552         be_queue_free(adapter, eq);
1553         return -1;
1554 }
1555
1556 static void be_rx_queues_destroy(struct be_adapter *adapter)
1557 {
1558         struct be_queue_info *q;
1559         struct be_rx_obj *rxo;
1560         int i;
1561
1562         for_all_rx_queues(adapter, rxo, i) {
1563                 q = &rxo->q;
1564                 if (q->created) {
1565                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1566                         /* After the rxq is invalidated, wait for a grace time
1567                          * of 1ms for all dma to end and the flush compl to
1568                          * arrive
1569                          */
1570                         mdelay(1);
1571                         be_rx_q_clean(adapter, rxo);
1572                 }
1573                 be_queue_free(adapter, q);
1574
1575                 q = &rxo->cq;
1576                 if (q->created)
1577                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1578                 be_queue_free(adapter, q);
1579
1580                 /* Clear any residual events */
1581                 q = &rxo->rx_eq.q;
1582                 if (q->created) {
1583                         be_eq_clean(adapter, &rxo->rx_eq);
1584                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1585                 }
1586                 be_queue_free(adapter, q);
1587         }
1588 }
1589
1590 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1591 {
1592         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1593                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1594                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1595         } else {
1596                 dev_warn(&adapter->pdev->dev,
1597                         "No support for multiple RX queues\n");
1598                 return 1;
1599         }
1600 }
1601
1602 static int be_rx_queues_create(struct be_adapter *adapter)
1603 {
1604         struct be_queue_info *eq, *q, *cq;
1605         struct be_rx_obj *rxo;
1606         int rc, i;
1607
1608         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1609                                 msix_enabled(adapter) ?
1610                                         adapter->num_msix_vec - 1 : 1);
1611         if (adapter->num_rx_qs != MAX_RX_QS)
1612                 dev_warn(&adapter->pdev->dev,
1613                         "Can create only %d RX queues", adapter->num_rx_qs);
1614
1615         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1616         for_all_rx_queues(adapter, rxo, i) {
1617                 rxo->adapter = adapter;
1618                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1619                 rxo->rx_eq.enable_aic = true;
1620
1621                 /* EQ */
1622                 eq = &rxo->rx_eq.q;
1623                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1624                                         sizeof(struct be_eq_entry));
1625                 if (rc)
1626                         goto err;
1627
1628                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1629                 if (rc)
1630                         goto err;
1631
1632                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1633
1634                 /* CQ */
1635                 cq = &rxo->cq;
1636                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1637                                 sizeof(struct be_eth_rx_compl));
1638                 if (rc)
1639                         goto err;
1640
1641                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1642                 if (rc)
1643                         goto err;
1644                 /* Rx Q */
1645                 q = &rxo->q;
1646                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1647                                 sizeof(struct be_eth_rx_d));
1648                 if (rc)
1649                         goto err;
1650
1651                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1652                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1653                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1654                 if (rc)
1655                         goto err;
1656         }
1657
1658         if (be_multi_rxq(adapter)) {
1659                 u8 rsstable[MAX_RSS_QS];
1660
1661                 for_all_rss_queues(adapter, rxo, i)
1662                         rsstable[i] = rxo->rss_id;
1663
1664                 rc = be_cmd_rss_config(adapter, rsstable,
1665                         adapter->num_rx_qs - 1);
1666                 if (rc)
1667                         goto err;
1668         }
1669
1670         return 0;
1671 err:
1672         be_rx_queues_destroy(adapter);
1673         return -1;
1674 }
1675
1676 static bool event_peek(struct be_eq_obj *eq_obj)
1677 {
1678         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1679         if (!eqe->evt)
1680                 return false;
1681         else
1682                 return true;
1683 }
1684
1685 static irqreturn_t be_intx(int irq, void *dev)
1686 {
1687         struct be_adapter *adapter = dev;
1688         struct be_rx_obj *rxo;
1689         int isr, i, tx = 0 , rx = 0;
1690
1691         if (lancer_chip(adapter)) {
1692                 if (event_peek(&adapter->tx_eq))
1693                         tx = event_handle(adapter, &adapter->tx_eq);
1694                 for_all_rx_queues(adapter, rxo, i) {
1695                         if (event_peek(&rxo->rx_eq))
1696                                 rx |= event_handle(adapter, &rxo->rx_eq);
1697                 }
1698
1699                 if (!(tx || rx))
1700                         return IRQ_NONE;
1701
1702         } else {
1703                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1704                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1705                 if (!isr)
1706                         return IRQ_NONE;
1707
1708                 if ((1 << adapter->tx_eq.eq_idx & isr))
1709                         event_handle(adapter, &adapter->tx_eq);
1710
1711                 for_all_rx_queues(adapter, rxo, i) {
1712                         if ((1 << rxo->rx_eq.eq_idx & isr))
1713                                 event_handle(adapter, &rxo->rx_eq);
1714                 }
1715         }
1716
1717         return IRQ_HANDLED;
1718 }
1719
1720 static irqreturn_t be_msix_rx(int irq, void *dev)
1721 {
1722         struct be_rx_obj *rxo = dev;
1723         struct be_adapter *adapter = rxo->adapter;
1724
1725         event_handle(adapter, &rxo->rx_eq);
1726
1727         return IRQ_HANDLED;
1728 }
1729
1730 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1731 {
1732         struct be_adapter *adapter = dev;
1733
1734         event_handle(adapter, &adapter->tx_eq);
1735
1736         return IRQ_HANDLED;
1737 }
1738
1739 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1740 {
1741         return (rxcp->tcpf && !rxcp->err) ? true : false;
1742 }
1743
1744 static int be_poll_rx(struct napi_struct *napi, int budget)
1745 {
1746         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1747         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1748         struct be_adapter *adapter = rxo->adapter;
1749         struct be_queue_info *rx_cq = &rxo->cq;
1750         struct be_rx_compl_info *rxcp;
1751         u32 work_done;
1752
1753         rxo->stats.rx_polls++;
1754         for (work_done = 0; work_done < budget; work_done++) {
1755                 rxcp = be_rx_compl_get(rxo);
1756                 if (!rxcp)
1757                         break;
1758
1759                 /* Ignore flush completions */
1760                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1761                         if (do_gro(rxcp))
1762                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1763                         else
1764                                 be_rx_compl_process(adapter, rxo, rxcp);
1765                 } else if (rxcp->pkt_size == 0) {
1766                         be_rx_compl_discard(adapter, rxo, rxcp);
1767                 }
1768
1769                 be_rx_stats_update(rxo, rxcp);
1770         }
1771
1772         /* Refill the queue */
1773         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1774                 be_post_rx_frags(rxo, GFP_ATOMIC);
1775
1776         /* All consumed */
1777         if (work_done < budget) {
1778                 napi_complete(napi);
1779                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1780         } else {
1781                 /* More to be consumed; continue with interrupts disabled */
1782                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1783         }
1784         return work_done;
1785 }
1786
1787 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1788  * For TX/MCC we don't honour budget; consume everything
1789  */
1790 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1791 {
1792         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1793         struct be_adapter *adapter =
1794                 container_of(tx_eq, struct be_adapter, tx_eq);
1795         struct be_queue_info *txq = &adapter->tx_obj.q;
1796         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1797         struct be_eth_tx_compl *txcp;
1798         int tx_compl = 0, mcc_compl, status = 0;
1799         u16 end_idx;
1800
1801         while ((txcp = be_tx_compl_get(tx_cq))) {
1802                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1803                                 wrb_index, txcp);
1804                 be_tx_compl_process(adapter, end_idx);
1805                 tx_compl++;
1806         }
1807
1808         mcc_compl = be_process_mcc(adapter, &status);
1809
1810         napi_complete(napi);
1811
1812         if (mcc_compl) {
1813                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1814                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1815         }
1816
1817         if (tx_compl) {
1818                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1819
1820                 /* As Tx wrbs have been freed up, wake up netdev queue if
1821                  * it was stopped due to lack of tx wrbs.
1822                  */
1823                 if (netif_queue_stopped(adapter->netdev) &&
1824                         atomic_read(&txq->used) < txq->len / 2) {
1825                         netif_wake_queue(adapter->netdev);
1826                 }
1827
1828                 tx_stats(adapter)->be_tx_events++;
1829                 tx_stats(adapter)->be_tx_compl += tx_compl;
1830         }
1831
1832         return 1;
1833 }
1834
1835 void be_detect_dump_ue(struct be_adapter *adapter)
1836 {
1837         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1838         u32 i;
1839
1840         pci_read_config_dword(adapter->pdev,
1841                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1842         pci_read_config_dword(adapter->pdev,
1843                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1844         pci_read_config_dword(adapter->pdev,
1845                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1846         pci_read_config_dword(adapter->pdev,
1847                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1848
1849         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1850         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1851
1852         if (ue_status_lo || ue_status_hi) {
1853                 adapter->ue_detected = true;
1854                 adapter->eeh_err = true;
1855                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1856         }
1857
1858         if (ue_status_lo) {
1859                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1860                         if (ue_status_lo & 1)
1861                                 dev_err(&adapter->pdev->dev,
1862                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1863                 }
1864         }
1865         if (ue_status_hi) {
1866                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1867                         if (ue_status_hi & 1)
1868                                 dev_err(&adapter->pdev->dev,
1869                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1870                 }
1871         }
1872
1873 }
1874
1875 static void be_worker(struct work_struct *work)
1876 {
1877         struct be_adapter *adapter =
1878                 container_of(work, struct be_adapter, work.work);
1879         struct be_rx_obj *rxo;
1880         int i;
1881
1882         if (!adapter->ue_detected && !lancer_chip(adapter))
1883                 be_detect_dump_ue(adapter);
1884
1885         /* when interrupts are not yet enabled, just reap any pending
1886         * mcc completions */
1887         if (!netif_running(adapter->netdev)) {
1888                 int mcc_compl, status = 0;
1889
1890                 mcc_compl = be_process_mcc(adapter, &status);
1891
1892                 if (mcc_compl) {
1893                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1894                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1895                 }
1896
1897                 goto reschedule;
1898         }
1899
1900         if (!adapter->stats_cmd_sent)
1901                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1902
1903         be_tx_rate_update(adapter);
1904
1905         for_all_rx_queues(adapter, rxo, i) {
1906                 be_rx_rate_update(rxo);
1907                 be_rx_eqd_update(adapter, rxo);
1908
1909                 if (rxo->rx_post_starved) {
1910                         rxo->rx_post_starved = false;
1911                         be_post_rx_frags(rxo, GFP_KERNEL);
1912                 }
1913         }
1914
1915 reschedule:
1916         adapter->work_counter++;
1917         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1918 }
1919
1920 static void be_msix_disable(struct be_adapter *adapter)
1921 {
1922         if (msix_enabled(adapter)) {
1923                 pci_disable_msix(adapter->pdev);
1924                 adapter->num_msix_vec = 0;
1925         }
1926 }
1927
1928 static void be_msix_enable(struct be_adapter *adapter)
1929 {
1930 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1931         int i, status, num_vec;
1932
1933         num_vec = be_num_rxqs_want(adapter) + 1;
1934
1935         for (i = 0; i < num_vec; i++)
1936                 adapter->msix_entries[i].entry = i;
1937
1938         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1939         if (status == 0) {
1940                 goto done;
1941         } else if (status >= BE_MIN_MSIX_VECTORS) {
1942                 num_vec = status;
1943                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1944                                 num_vec) == 0)
1945                         goto done;
1946         }
1947         return;
1948 done:
1949         adapter->num_msix_vec = num_vec;
1950         return;
1951 }
1952
1953 static void be_sriov_enable(struct be_adapter *adapter)
1954 {
1955         be_check_sriov_fn_type(adapter);
1956 #ifdef CONFIG_PCI_IOV
1957         if (be_physfn(adapter) && num_vfs) {
1958                 int status, pos;
1959                 u16 nvfs;
1960
1961                 pos = pci_find_ext_capability(adapter->pdev,
1962                                                 PCI_EXT_CAP_ID_SRIOV);
1963                 pci_read_config_word(adapter->pdev,
1964                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1965
1966                 if (num_vfs > nvfs) {
1967                         dev_info(&adapter->pdev->dev,
1968                                         "Device supports %d VFs and not %d\n",
1969                                         nvfs, num_vfs);
1970                         num_vfs = nvfs;
1971                 }
1972
1973                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1974                 adapter->sriov_enabled = status ? false : true;
1975         }
1976 #endif
1977 }
1978
1979 static void be_sriov_disable(struct be_adapter *adapter)
1980 {
1981 #ifdef CONFIG_PCI_IOV
1982         if (adapter->sriov_enabled) {
1983                 pci_disable_sriov(adapter->pdev);
1984                 adapter->sriov_enabled = false;
1985         }
1986 #endif
1987 }
1988
1989 static inline int be_msix_vec_get(struct be_adapter *adapter,
1990                                         struct be_eq_obj *eq_obj)
1991 {
1992         return adapter->msix_entries[eq_obj->eq_idx].vector;
1993 }
1994
1995 static int be_request_irq(struct be_adapter *adapter,
1996                 struct be_eq_obj *eq_obj,
1997                 void *handler, char *desc, void *context)
1998 {
1999         struct net_device *netdev = adapter->netdev;
2000         int vec;
2001
2002         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2003         vec = be_msix_vec_get(adapter, eq_obj);
2004         return request_irq(vec, handler, 0, eq_obj->desc, context);
2005 }
2006
2007 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2008                         void *context)
2009 {
2010         int vec = be_msix_vec_get(adapter, eq_obj);
2011         free_irq(vec, context);
2012 }
2013
2014 static int be_msix_register(struct be_adapter *adapter)
2015 {
2016         struct be_rx_obj *rxo;
2017         int status, i;
2018         char qname[10];
2019
2020         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2021                                 adapter);
2022         if (status)
2023                 goto err;
2024
2025         for_all_rx_queues(adapter, rxo, i) {
2026                 sprintf(qname, "rxq%d", i);
2027                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2028                                 qname, rxo);
2029                 if (status)
2030                         goto err_msix;
2031         }
2032
2033         return 0;
2034
2035 err_msix:
2036         be_free_irq(adapter, &adapter->tx_eq, adapter);
2037
2038         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2039                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2040
2041 err:
2042         dev_warn(&adapter->pdev->dev,
2043                 "MSIX Request IRQ failed - err %d\n", status);
2044         be_msix_disable(adapter);
2045         return status;
2046 }
2047
2048 static int be_irq_register(struct be_adapter *adapter)
2049 {
2050         struct net_device *netdev = adapter->netdev;
2051         int status;
2052
2053         if (msix_enabled(adapter)) {
2054                 status = be_msix_register(adapter);
2055                 if (status == 0)
2056                         goto done;
2057                 /* INTx is not supported for VF */
2058                 if (!be_physfn(adapter))
2059                         return status;
2060         }
2061
2062         /* INTx */
2063         netdev->irq = adapter->pdev->irq;
2064         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2065                         adapter);
2066         if (status) {
2067                 dev_err(&adapter->pdev->dev,
2068                         "INTx request IRQ failed - err %d\n", status);
2069                 return status;
2070         }
2071 done:
2072         adapter->isr_registered = true;
2073         return 0;
2074 }
2075
2076 static void be_irq_unregister(struct be_adapter *adapter)
2077 {
2078         struct net_device *netdev = adapter->netdev;
2079         struct be_rx_obj *rxo;
2080         int i;
2081
2082         if (!adapter->isr_registered)
2083                 return;
2084
2085         /* INTx */
2086         if (!msix_enabled(adapter)) {
2087                 free_irq(netdev->irq, adapter);
2088                 goto done;
2089         }
2090
2091         /* MSIx */
2092         be_free_irq(adapter, &adapter->tx_eq, adapter);
2093
2094         for_all_rx_queues(adapter, rxo, i)
2095                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2096
2097 done:
2098         adapter->isr_registered = false;
2099 }
2100
2101 static int be_close(struct net_device *netdev)
2102 {
2103         struct be_adapter *adapter = netdev_priv(netdev);
2104         struct be_rx_obj *rxo;
2105         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2106         int vec, i;
2107
2108         be_async_mcc_disable(adapter);
2109
2110         netif_carrier_off(netdev);
2111         adapter->link_up = false;
2112
2113         if (!lancer_chip(adapter))
2114                 be_intr_set(adapter, false);
2115
2116         for_all_rx_queues(adapter, rxo, i)
2117                 napi_disable(&rxo->rx_eq.napi);
2118
2119         napi_disable(&tx_eq->napi);
2120
2121         if (lancer_chip(adapter)) {
2122                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2123                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2124                 for_all_rx_queues(adapter, rxo, i)
2125                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2126         }
2127
2128         if (msix_enabled(adapter)) {
2129                 vec = be_msix_vec_get(adapter, tx_eq);
2130                 synchronize_irq(vec);
2131
2132                 for_all_rx_queues(adapter, rxo, i) {
2133                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2134                         synchronize_irq(vec);
2135                 }
2136         } else {
2137                 synchronize_irq(netdev->irq);
2138         }
2139         be_irq_unregister(adapter);
2140
2141         /* Wait for all pending tx completions to arrive so that
2142          * all tx skbs are freed.
2143          */
2144         be_tx_compl_clean(adapter);
2145
2146         return 0;
2147 }
2148
2149 static int be_open(struct net_device *netdev)
2150 {
2151         struct be_adapter *adapter = netdev_priv(netdev);
2152         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2153         struct be_rx_obj *rxo;
2154         bool link_up;
2155         int status, i;
2156         u8 mac_speed;
2157         u16 link_speed;
2158
2159         for_all_rx_queues(adapter, rxo, i) {
2160                 be_post_rx_frags(rxo, GFP_KERNEL);
2161                 napi_enable(&rxo->rx_eq.napi);
2162         }
2163         napi_enable(&tx_eq->napi);
2164
2165         be_irq_register(adapter);
2166
2167         if (!lancer_chip(adapter))
2168                 be_intr_set(adapter, true);
2169
2170         /* The evt queues are created in unarmed state; arm them */
2171         for_all_rx_queues(adapter, rxo, i) {
2172                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2173                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2174         }
2175         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2176
2177         /* Now that interrupts are on we can process async mcc */
2178         be_async_mcc_enable(adapter);
2179
2180         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2181                         &link_speed, 0);
2182         if (status)
2183                 goto err;
2184         be_link_status_update(adapter, link_up);
2185
2186         if (be_physfn(adapter)) {
2187                 status = be_vid_config(adapter, false, 0);
2188                 if (status)
2189                         goto err;
2190
2191                 status = be_cmd_set_flow_control(adapter,
2192                                 adapter->tx_fc, adapter->rx_fc);
2193                 if (status)
2194                         goto err;
2195         }
2196
2197         return 0;
2198 err:
2199         be_close(adapter->netdev);
2200         return -EIO;
2201 }
2202
2203 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2204 {
2205         struct be_dma_mem cmd;
2206         int status = 0;
2207         u8 mac[ETH_ALEN];
2208
2209         memset(mac, 0, ETH_ALEN);
2210
2211         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2212         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2213                                     GFP_KERNEL);
2214         if (cmd.va == NULL)
2215                 return -1;
2216         memset(cmd.va, 0, cmd.size);
2217
2218         if (enable) {
2219                 status = pci_write_config_dword(adapter->pdev,
2220                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2221                 if (status) {
2222                         dev_err(&adapter->pdev->dev,
2223                                 "Could not enable Wake-on-lan\n");
2224                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2225                                           cmd.dma);
2226                         return status;
2227                 }
2228                 status = be_cmd_enable_magic_wol(adapter,
2229                                 adapter->netdev->dev_addr, &cmd);
2230                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2231                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2232         } else {
2233                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2234                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2235                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2236         }
2237
2238         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2239         return status;
2240 }
2241
2242 /*
2243  * Generate a seed MAC address from the PF MAC Address using jhash.
2244  * MAC Address for VFs are assigned incrementally starting from the seed.
2245  * These addresses are programmed in the ASIC by the PF and the VF driver
2246  * queries for the MAC address during its probe.
2247  */
2248 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2249 {
2250         u32 vf = 0;
2251         int status = 0;
2252         u8 mac[ETH_ALEN];
2253
2254         be_vf_eth_addr_generate(adapter, mac);
2255
2256         for (vf = 0; vf < num_vfs; vf++) {
2257                 status = be_cmd_pmac_add(adapter, mac,
2258                                         adapter->vf_cfg[vf].vf_if_handle,
2259                                         &adapter->vf_cfg[vf].vf_pmac_id,
2260                                         vf + 1);
2261                 if (status)
2262                         dev_err(&adapter->pdev->dev,
2263                                 "Mac address add failed for VF %d\n", vf);
2264                 else
2265                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2266
2267                 mac[5] += 1;
2268         }
2269         return status;
2270 }
2271
2272 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2273 {
2274         u32 vf;
2275
2276         for (vf = 0; vf < num_vfs; vf++) {
2277                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2278                         be_cmd_pmac_del(adapter,
2279                                         adapter->vf_cfg[vf].vf_if_handle,
2280                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2281         }
2282 }
2283
2284 static int be_setup(struct be_adapter *adapter)
2285 {
2286         struct net_device *netdev = adapter->netdev;
2287         u32 cap_flags, en_flags, vf = 0;
2288         int status;
2289         u8 mac[ETH_ALEN];
2290
2291         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2292                                 BE_IF_FLAGS_BROADCAST |
2293                                 BE_IF_FLAGS_MULTICAST;
2294
2295         if (be_physfn(adapter)) {
2296                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2297                                 BE_IF_FLAGS_PROMISCUOUS |
2298                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2299                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2300
2301                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2302                         cap_flags |= BE_IF_FLAGS_RSS;
2303                         en_flags |= BE_IF_FLAGS_RSS;
2304                 }
2305         }
2306
2307         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2308                         netdev->dev_addr, false/* pmac_invalid */,
2309                         &adapter->if_handle, &adapter->pmac_id, 0);
2310         if (status != 0)
2311                 goto do_none;
2312
2313         if (be_physfn(adapter)) {
2314                 if (adapter->sriov_enabled) {
2315                         while (vf < num_vfs) {
2316                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2317                                                         BE_IF_FLAGS_BROADCAST;
2318                                 status = be_cmd_if_create(adapter, cap_flags,
2319                                         en_flags, mac, true,
2320                                         &adapter->vf_cfg[vf].vf_if_handle,
2321                                         NULL, vf+1);
2322                                 if (status) {
2323                                         dev_err(&adapter->pdev->dev,
2324                                         "Interface Create failed for VF %d\n",
2325                                         vf);
2326                                         goto if_destroy;
2327                                 }
2328                                 adapter->vf_cfg[vf].vf_pmac_id =
2329                                                         BE_INVALID_PMAC_ID;
2330                                 vf++;
2331                         }
2332                 }
2333         } else {
2334                 status = be_cmd_mac_addr_query(adapter, mac,
2335                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2336                 if (!status) {
2337                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2338                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2339                 }
2340         }
2341
2342         status = be_tx_queues_create(adapter);
2343         if (status != 0)
2344                 goto if_destroy;
2345
2346         status = be_rx_queues_create(adapter);
2347         if (status != 0)
2348                 goto tx_qs_destroy;
2349
2350         status = be_mcc_queues_create(adapter);
2351         if (status != 0)
2352                 goto rx_qs_destroy;
2353
2354         adapter->link_speed = -1;
2355
2356         return 0;
2357
2358 rx_qs_destroy:
2359         be_rx_queues_destroy(adapter);
2360 tx_qs_destroy:
2361         be_tx_queues_destroy(adapter);
2362 if_destroy:
2363         if (be_physfn(adapter) && adapter->sriov_enabled)
2364                 for (vf = 0; vf < num_vfs; vf++)
2365                         if (adapter->vf_cfg[vf].vf_if_handle)
2366                                 be_cmd_if_destroy(adapter,
2367                                         adapter->vf_cfg[vf].vf_if_handle,
2368                                         vf + 1);
2369         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2370 do_none:
2371         return status;
2372 }
2373
2374 static int be_clear(struct be_adapter *adapter)
2375 {
2376         int vf;
2377
2378         if (be_physfn(adapter) && adapter->sriov_enabled)
2379                 be_vf_eth_addr_rem(adapter);
2380
2381         be_mcc_queues_destroy(adapter);
2382         be_rx_queues_destroy(adapter);
2383         be_tx_queues_destroy(adapter);
2384         adapter->eq_next_idx = 0;
2385
2386         if (be_physfn(adapter) && adapter->sriov_enabled)
2387                 for (vf = 0; vf < num_vfs; vf++)
2388                         if (adapter->vf_cfg[vf].vf_if_handle)
2389                                 be_cmd_if_destroy(adapter,
2390                                         adapter->vf_cfg[vf].vf_if_handle,
2391                                         vf + 1);
2392
2393         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2394
2395         /* tell fw we're done with firing cmds */
2396         be_cmd_fw_clean(adapter);
2397         return 0;
2398 }
2399
2400
2401 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2402 static bool be_flash_redboot(struct be_adapter *adapter,
2403                         const u8 *p, u32 img_start, int image_size,
2404                         int hdr_size)
2405 {
2406         u32 crc_offset;
2407         u8 flashed_crc[4];
2408         int status;
2409
2410         crc_offset = hdr_size + img_start + image_size - 4;
2411
2412         p += crc_offset;
2413
2414         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2415                         (image_size - 4));
2416         if (status) {
2417                 dev_err(&adapter->pdev->dev,
2418                 "could not get crc from flash, not flashing redboot\n");
2419                 return false;
2420         }
2421
2422         /*update redboot only if crc does not match*/
2423         if (!memcmp(flashed_crc, p, 4))
2424                 return false;
2425         else
2426                 return true;
2427 }
2428
2429 static int be_flash_data(struct be_adapter *adapter,
2430                         const struct firmware *fw,
2431                         struct be_dma_mem *flash_cmd, int num_of_images)
2432
2433 {
2434         int status = 0, i, filehdr_size = 0;
2435         u32 total_bytes = 0, flash_op;
2436         int num_bytes;
2437         const u8 *p = fw->data;
2438         struct be_cmd_write_flashrom *req = flash_cmd->va;
2439         const struct flash_comp *pflashcomp;
2440         int num_comp;
2441
2442         static const struct flash_comp gen3_flash_types[9] = {
2443                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2444                         FLASH_IMAGE_MAX_SIZE_g3},
2445                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2446                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2447                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2448                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2449                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2450                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2451                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2452                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2453                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2454                         FLASH_IMAGE_MAX_SIZE_g3},
2455                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2456                         FLASH_IMAGE_MAX_SIZE_g3},
2457                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2458                         FLASH_IMAGE_MAX_SIZE_g3},
2459                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2460                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2461         };
2462         static const struct flash_comp gen2_flash_types[8] = {
2463                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2464                         FLASH_IMAGE_MAX_SIZE_g2},
2465                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2466                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2467                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2468                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2469                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2470                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2471                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2472                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2473                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2474                         FLASH_IMAGE_MAX_SIZE_g2},
2475                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2476                         FLASH_IMAGE_MAX_SIZE_g2},
2477                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2478                          FLASH_IMAGE_MAX_SIZE_g2}
2479         };
2480
2481         if (adapter->generation == BE_GEN3) {
2482                 pflashcomp = gen3_flash_types;
2483                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2484                 num_comp = ARRAY_SIZE(gen3_flash_types);
2485         } else {
2486                 pflashcomp = gen2_flash_types;
2487                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2488                 num_comp = ARRAY_SIZE(gen2_flash_types);
2489         }
2490         for (i = 0; i < num_comp; i++) {
2491                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2492                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2493                         continue;
2494                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2495                         (!be_flash_redboot(adapter, fw->data,
2496                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2497                         (num_of_images * sizeof(struct image_hdr)))))
2498                         continue;
2499                 p = fw->data;
2500                 p += filehdr_size + pflashcomp[i].offset
2501                         + (num_of_images * sizeof(struct image_hdr));
2502         if (p + pflashcomp[i].size > fw->data + fw->size)
2503                 return -1;
2504         total_bytes = pflashcomp[i].size;
2505                 while (total_bytes) {
2506                         if (total_bytes > 32*1024)
2507                                 num_bytes = 32*1024;
2508                         else
2509                                 num_bytes = total_bytes;
2510                         total_bytes -= num_bytes;
2511
2512                         if (!total_bytes)
2513                                 flash_op = FLASHROM_OPER_FLASH;
2514                         else
2515                                 flash_op = FLASHROM_OPER_SAVE;
2516                         memcpy(req->params.data_buf, p, num_bytes);
2517                         p += num_bytes;
2518                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2519                                 pflashcomp[i].optype, flash_op, num_bytes);
2520                         if (status) {
2521                                 dev_err(&adapter->pdev->dev,
2522                                         "cmd to write to flash rom failed.\n");
2523                                 return -1;
2524                         }
2525                         yield();
2526                 }
2527         }
2528         return 0;
2529 }
2530
2531 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2532 {
2533         if (fhdr == NULL)
2534                 return 0;
2535         if (fhdr->build[0] == '3')
2536                 return BE_GEN3;
2537         else if (fhdr->build[0] == '2')
2538                 return BE_GEN2;
2539         else
2540                 return 0;
2541 }
2542
2543 int be_load_fw(struct be_adapter *adapter, u8 *func)
2544 {
2545         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2546         const struct firmware *fw;
2547         struct flash_file_hdr_g2 *fhdr;
2548         struct flash_file_hdr_g3 *fhdr3;
2549         struct image_hdr *img_hdr_ptr = NULL;
2550         struct be_dma_mem flash_cmd;
2551         int status, i = 0, num_imgs = 0;
2552         const u8 *p;
2553
2554         if (!netif_running(adapter->netdev)) {
2555                 dev_err(&adapter->pdev->dev,
2556                         "Firmware load not allowed (interface is down)\n");
2557                 return -EPERM;
2558         }
2559
2560         strcpy(fw_file, func);
2561
2562         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2563         if (status)
2564                 goto fw_exit;
2565
2566         p = fw->data;
2567         fhdr = (struct flash_file_hdr_g2 *) p;
2568         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2569
2570         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2571         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2572                                           &flash_cmd.dma, GFP_KERNEL);
2573         if (!flash_cmd.va) {
2574                 status = -ENOMEM;
2575                 dev_err(&adapter->pdev->dev,
2576                         "Memory allocation failure while flashing\n");
2577                 goto fw_exit;
2578         }
2579
2580         if ((adapter->generation == BE_GEN3) &&
2581                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2582                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2583                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2584                 for (i = 0; i < num_imgs; i++) {
2585                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2586                                         (sizeof(struct flash_file_hdr_g3) +
2587                                          i * sizeof(struct image_hdr)));
2588                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2589                                 status = be_flash_data(adapter, fw, &flash_cmd,
2590                                                         num_imgs);
2591                 }
2592         } else if ((adapter->generation == BE_GEN2) &&
2593                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2594                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2595         } else {
2596                 dev_err(&adapter->pdev->dev,
2597                         "UFI and Interface are not compatible for flashing\n");
2598                 status = -1;
2599         }
2600
2601         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2602                           flash_cmd.dma);
2603         if (status) {
2604                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2605                 goto fw_exit;
2606         }
2607
2608         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2609
2610 fw_exit:
2611         release_firmware(fw);
2612         return status;
2613 }
2614
2615 static struct net_device_ops be_netdev_ops = {
2616         .ndo_open               = be_open,
2617         .ndo_stop               = be_close,
2618         .ndo_start_xmit         = be_xmit,
2619         .ndo_set_rx_mode        = be_set_multicast_list,
2620         .ndo_set_mac_address    = be_mac_addr_set,
2621         .ndo_change_mtu         = be_change_mtu,
2622         .ndo_validate_addr      = eth_validate_addr,
2623         .ndo_vlan_rx_register   = be_vlan_register,
2624         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2625         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2626         .ndo_set_vf_mac         = be_set_vf_mac,
2627         .ndo_set_vf_vlan        = be_set_vf_vlan,
2628         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2629         .ndo_get_vf_config      = be_get_vf_config
2630 };
2631
2632 static void be_netdev_init(struct net_device *netdev)
2633 {
2634         struct be_adapter *adapter = netdev_priv(netdev);
2635         struct be_rx_obj *rxo;
2636         int i;
2637
2638         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2639                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2640                 NETIF_F_HW_VLAN_TX;
2641         if (be_multi_rxq(adapter))
2642                 netdev->hw_features |= NETIF_F_RXHASH;
2643
2644         netdev->features |= netdev->hw_features |
2645                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2646
2647         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2648                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2649
2650         if (lancer_chip(adapter))
2651                 netdev->vlan_features |= NETIF_F_TSO6;
2652
2653         netdev->flags |= IFF_MULTICAST;
2654
2655         /* Default settings for Rx and Tx flow control */
2656         adapter->rx_fc = true;
2657         adapter->tx_fc = true;
2658
2659         netif_set_gso_max_size(netdev, 65535);
2660
2661         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2662
2663         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2664
2665         for_all_rx_queues(adapter, rxo, i)
2666                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2667                                 BE_NAPI_WEIGHT);
2668
2669         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2670                 BE_NAPI_WEIGHT);
2671 }
2672
2673 static void be_unmap_pci_bars(struct be_adapter *adapter)
2674 {
2675         if (adapter->csr)
2676                 iounmap(adapter->csr);
2677         if (adapter->db)
2678                 iounmap(adapter->db);
2679         if (adapter->pcicfg && be_physfn(adapter))
2680                 iounmap(adapter->pcicfg);
2681 }
2682
2683 static int be_map_pci_bars(struct be_adapter *adapter)
2684 {
2685         u8 __iomem *addr;
2686         int pcicfg_reg, db_reg;
2687
2688         if (lancer_chip(adapter)) {
2689                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2690                         pci_resource_len(adapter->pdev, 0));
2691                 if (addr == NULL)
2692                         return -ENOMEM;
2693                 adapter->db = addr;
2694                 return 0;
2695         }
2696
2697         if (be_physfn(adapter)) {
2698                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2699                                 pci_resource_len(adapter->pdev, 2));
2700                 if (addr == NULL)
2701                         return -ENOMEM;
2702                 adapter->csr = addr;
2703         }
2704
2705         if (adapter->generation == BE_GEN2) {
2706                 pcicfg_reg = 1;
2707                 db_reg = 4;
2708         } else {
2709                 pcicfg_reg = 0;
2710                 if (be_physfn(adapter))
2711                         db_reg = 4;
2712                 else
2713                         db_reg = 0;
2714         }
2715         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2716                                 pci_resource_len(adapter->pdev, db_reg));
2717         if (addr == NULL)
2718                 goto pci_map_err;
2719         adapter->db = addr;
2720
2721         if (be_physfn(adapter)) {
2722                 addr = ioremap_nocache(
2723                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2724                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2725                 if (addr == NULL)
2726                         goto pci_map_err;
2727                 adapter->pcicfg = addr;
2728         } else
2729                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2730
2731         return 0;
2732 pci_map_err:
2733         be_unmap_pci_bars(adapter);
2734         return -ENOMEM;
2735 }
2736
2737
2738 static void be_ctrl_cleanup(struct be_adapter *adapter)
2739 {
2740         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2741
2742         be_unmap_pci_bars(adapter);
2743
2744         if (mem->va)
2745                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2746                                   mem->dma);
2747
2748         mem = &adapter->mc_cmd_mem;
2749         if (mem->va)
2750                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2751                                   mem->dma);
2752 }
2753
2754 static int be_ctrl_init(struct be_adapter *adapter)
2755 {
2756         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2757         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2758         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2759         int status;
2760
2761         status = be_map_pci_bars(adapter);
2762         if (status)
2763                 goto done;
2764
2765         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2766         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2767                                                 mbox_mem_alloc->size,
2768                                                 &mbox_mem_alloc->dma,
2769                                                 GFP_KERNEL);
2770         if (!mbox_mem_alloc->va) {
2771                 status = -ENOMEM;
2772                 goto unmap_pci_bars;
2773         }
2774
2775         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2776         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2777         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2778         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2779
2780         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2781         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2782                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2783                                             GFP_KERNEL);
2784         if (mc_cmd_mem->va == NULL) {
2785                 status = -ENOMEM;
2786                 goto free_mbox;
2787         }
2788         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2789
2790         mutex_init(&adapter->mbox_lock);
2791         spin_lock_init(&adapter->mcc_lock);
2792         spin_lock_init(&adapter->mcc_cq_lock);
2793
2794         init_completion(&adapter->flash_compl);
2795         pci_save_state(adapter->pdev);
2796         return 0;
2797
2798 free_mbox:
2799         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2800                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2801
2802 unmap_pci_bars:
2803         be_unmap_pci_bars(adapter);
2804
2805 done:
2806         return status;
2807 }
2808
2809 static void be_stats_cleanup(struct be_adapter *adapter)
2810 {
2811         struct be_dma_mem *cmd = &adapter->stats_cmd;
2812
2813         if (cmd->va)
2814                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2815                                   cmd->va, cmd->dma);
2816 }
2817
2818 static int be_stats_init(struct be_adapter *adapter)
2819 {
2820         struct be_dma_mem *cmd = &adapter->stats_cmd;
2821
2822         cmd->size = sizeof(struct be_cmd_req_get_stats);
2823         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2824                                      GFP_KERNEL);
2825         if (cmd->va == NULL)
2826                 return -1;
2827         memset(cmd->va, 0, cmd->size);
2828         return 0;
2829 }
2830
2831 static void __devexit be_remove(struct pci_dev *pdev)
2832 {
2833         struct be_adapter *adapter = pci_get_drvdata(pdev);
2834
2835         if (!adapter)
2836                 return;
2837
2838         cancel_delayed_work_sync(&adapter->work);
2839
2840         unregister_netdev(adapter->netdev);
2841
2842         be_clear(adapter);
2843
2844         be_stats_cleanup(adapter);
2845
2846         be_ctrl_cleanup(adapter);
2847
2848         kfree(adapter->vf_cfg);
2849         be_sriov_disable(adapter);
2850
2851         be_msix_disable(adapter);
2852
2853         pci_set_drvdata(pdev, NULL);
2854         pci_release_regions(pdev);
2855         pci_disable_device(pdev);
2856
2857         free_netdev(adapter->netdev);
2858 }
2859
2860 static int be_get_config(struct be_adapter *adapter)
2861 {
2862         int status;
2863         u8 mac[ETH_ALEN];
2864
2865         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2866         if (status)
2867                 return status;
2868
2869         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2870                         &adapter->function_mode, &adapter->function_caps);
2871         if (status)
2872                 return status;
2873
2874         memset(mac, 0, ETH_ALEN);
2875
2876         if (be_physfn(adapter)) {
2877                 status = be_cmd_mac_addr_query(adapter, mac,
2878                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2879
2880                 if (status)
2881                         return status;
2882
2883                 if (!is_valid_ether_addr(mac))
2884                         return -EADDRNOTAVAIL;
2885
2886                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2887                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2888         }
2889
2890         if (adapter->function_mode & 0x400)
2891                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2892         else
2893                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2894
2895         status = be_cmd_get_cntl_attributes(adapter);
2896         if (status)
2897                 return status;
2898
2899         be_cmd_check_native_mode(adapter);
2900         return 0;
2901 }
2902
2903 static int be_dev_family_check(struct be_adapter *adapter)
2904 {
2905         struct pci_dev *pdev = adapter->pdev;
2906         u32 sli_intf = 0, if_type;
2907
2908         switch (pdev->device) {
2909         case BE_DEVICE_ID1:
2910         case OC_DEVICE_ID1:
2911                 adapter->generation = BE_GEN2;
2912                 break;
2913         case BE_DEVICE_ID2:
2914         case OC_DEVICE_ID2:
2915                 adapter->generation = BE_GEN3;
2916                 break;
2917         case OC_DEVICE_ID3:
2918                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2919                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2920                                                 SLI_INTF_IF_TYPE_SHIFT;
2921
2922                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2923                         if_type != 0x02) {
2924                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2925                         return -EINVAL;
2926                 }
2927                 if (num_vfs > 0) {
2928                         dev_err(&pdev->dev, "VFs not supported\n");
2929                         return -EINVAL;
2930                 }
2931                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2932                                          SLI_INTF_FAMILY_SHIFT);
2933                 adapter->generation = BE_GEN3;
2934                 break;
2935         default:
2936                 adapter->generation = 0;
2937         }
2938         return 0;
2939 }
2940
2941 static int lancer_wait_ready(struct be_adapter *adapter)
2942 {
2943 #define SLIPORT_READY_TIMEOUT 500
2944         u32 sliport_status;
2945         int status = 0, i;
2946
2947         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2948                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2949                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2950                         break;
2951
2952                 msleep(20);
2953         }
2954
2955         if (i == SLIPORT_READY_TIMEOUT)
2956                 status = -1;
2957
2958         return status;
2959 }
2960
2961 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2962 {
2963         int status;
2964         u32 sliport_status, err, reset_needed;
2965         status = lancer_wait_ready(adapter);
2966         if (!status) {
2967                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2968                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2969                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2970                 if (err && reset_needed) {
2971                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2972                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2973
2974                         /* check adapter has corrected the error */
2975                         status = lancer_wait_ready(adapter);
2976                         sliport_status = ioread32(adapter->db +
2977                                                         SLIPORT_STATUS_OFFSET);
2978                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2979                                                 SLIPORT_STATUS_RN_MASK);
2980                         if (status || sliport_status)
2981                                 status = -1;
2982                 } else if (err || reset_needed) {
2983                         status = -1;
2984                 }
2985         }
2986         return status;
2987 }
2988
2989 static int __devinit be_probe(struct pci_dev *pdev,
2990                         const struct pci_device_id *pdev_id)
2991 {
2992         int status = 0;
2993         struct be_adapter *adapter;
2994         struct net_device *netdev;
2995
2996         status = pci_enable_device(pdev);
2997         if (status)
2998                 goto do_none;
2999
3000         status = pci_request_regions(pdev, DRV_NAME);
3001         if (status)
3002                 goto disable_dev;
3003         pci_set_master(pdev);
3004
3005         netdev = alloc_etherdev(sizeof(struct be_adapter));
3006         if (netdev == NULL) {
3007                 status = -ENOMEM;
3008                 goto rel_reg;
3009         }
3010         adapter = netdev_priv(netdev);
3011         adapter->pdev = pdev;
3012         pci_set_drvdata(pdev, adapter);
3013
3014         status = be_dev_family_check(adapter);
3015         if (status)
3016                 goto free_netdev;
3017
3018         adapter->netdev = netdev;
3019         SET_NETDEV_DEV(netdev, &pdev->dev);
3020
3021         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3022         if (!status) {
3023                 netdev->features |= NETIF_F_HIGHDMA;
3024         } else {
3025                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3026                 if (status) {
3027                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3028                         goto free_netdev;
3029                 }
3030         }
3031
3032         be_sriov_enable(adapter);
3033         if (adapter->sriov_enabled) {
3034                 adapter->vf_cfg = kcalloc(num_vfs,
3035                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3036
3037                 if (!adapter->vf_cfg)
3038                         goto free_netdev;
3039         }
3040
3041         status = be_ctrl_init(adapter);
3042         if (status)
3043                 goto free_vf_cfg;
3044
3045         if (lancer_chip(adapter)) {
3046                 status = lancer_test_and_set_rdy_state(adapter);
3047                 if (status) {
3048                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3049                         goto ctrl_clean;
3050                 }
3051         }
3052
3053         /* sync up with fw's ready state */
3054         if (be_physfn(adapter)) {
3055                 status = be_cmd_POST(adapter);
3056                 if (status)
3057                         goto ctrl_clean;
3058         }
3059
3060         /* tell fw we're ready to fire cmds */
3061         status = be_cmd_fw_init(adapter);
3062         if (status)
3063                 goto ctrl_clean;
3064
3065         status = be_cmd_reset_function(adapter);
3066         if (status)
3067                 goto ctrl_clean;
3068
3069         status = be_stats_init(adapter);
3070         if (status)
3071                 goto ctrl_clean;
3072
3073         status = be_get_config(adapter);
3074         if (status)
3075                 goto stats_clean;
3076
3077         be_msix_enable(adapter);
3078
3079         INIT_DELAYED_WORK(&adapter->work, be_worker);
3080
3081         status = be_setup(adapter);
3082         if (status)
3083                 goto msix_disable;
3084
3085         be_netdev_init(netdev);
3086         status = register_netdev(netdev);
3087         if (status != 0)
3088                 goto unsetup;
3089         netif_carrier_off(netdev);
3090
3091         if (be_physfn(adapter) && adapter->sriov_enabled) {
3092                 u8 mac_speed;
3093                 bool link_up;
3094                 u16 vf, lnk_speed;
3095
3096                 status = be_vf_eth_addr_config(adapter);
3097                 if (status)
3098                         goto unreg_netdev;
3099
3100                 for (vf = 0; vf < num_vfs; vf++) {
3101                         status = be_cmd_link_status_query(adapter, &link_up,
3102                                         &mac_speed, &lnk_speed, vf + 1);
3103                         if (!status)
3104                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3105                         else
3106                                 goto unreg_netdev;
3107                 }
3108         }
3109
3110         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3111         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3112         return 0;
3113
3114 unreg_netdev:
3115         unregister_netdev(netdev);
3116 unsetup:
3117         be_clear(adapter);
3118 msix_disable:
3119         be_msix_disable(adapter);
3120 stats_clean:
3121         be_stats_cleanup(adapter);
3122 ctrl_clean:
3123         be_ctrl_cleanup(adapter);
3124 free_vf_cfg:
3125         kfree(adapter->vf_cfg);
3126 free_netdev:
3127         be_sriov_disable(adapter);
3128         free_netdev(netdev);
3129         pci_set_drvdata(pdev, NULL);
3130 rel_reg:
3131         pci_release_regions(pdev);
3132 disable_dev:
3133         pci_disable_device(pdev);
3134 do_none:
3135         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3136         return status;
3137 }
3138
3139 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3140 {
3141         struct be_adapter *adapter = pci_get_drvdata(pdev);
3142         struct net_device *netdev =  adapter->netdev;
3143
3144         cancel_delayed_work_sync(&adapter->work);
3145         if (adapter->wol)
3146                 be_setup_wol(adapter, true);
3147
3148         netif_device_detach(netdev);
3149         if (netif_running(netdev)) {
3150                 rtnl_lock();
3151                 be_close(netdev);
3152                 rtnl_unlock();
3153         }
3154         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3155         be_clear(adapter);
3156
3157         be_msix_disable(adapter);
3158         pci_save_state(pdev);
3159         pci_disable_device(pdev);
3160         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3161         return 0;
3162 }
3163
3164 static int be_resume(struct pci_dev *pdev)
3165 {
3166         int status = 0;
3167         struct be_adapter *adapter = pci_get_drvdata(pdev);
3168         struct net_device *netdev =  adapter->netdev;
3169
3170         netif_device_detach(netdev);
3171
3172         status = pci_enable_device(pdev);
3173         if (status)
3174                 return status;
3175
3176         pci_set_power_state(pdev, 0);
3177         pci_restore_state(pdev);
3178
3179         be_msix_enable(adapter);
3180         /* tell fw we're ready to fire cmds */
3181         status = be_cmd_fw_init(adapter);
3182         if (status)
3183                 return status;
3184
3185         be_setup(adapter);
3186         if (netif_running(netdev)) {
3187                 rtnl_lock();
3188                 be_open(netdev);
3189                 rtnl_unlock();
3190         }
3191         netif_device_attach(netdev);
3192
3193         if (adapter->wol)
3194                 be_setup_wol(adapter, false);
3195
3196         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3197         return 0;
3198 }
3199
3200 /*
3201  * An FLR will stop BE from DMAing any data.
3202  */
3203 static void be_shutdown(struct pci_dev *pdev)
3204 {
3205         struct be_adapter *adapter = pci_get_drvdata(pdev);
3206
3207         if (!adapter)
3208                 return;
3209
3210         cancel_delayed_work_sync(&adapter->work);
3211
3212         netif_device_detach(adapter->netdev);
3213
3214         if (adapter->wol)
3215                 be_setup_wol(adapter, true);
3216
3217         be_cmd_reset_function(adapter);
3218
3219         pci_disable_device(pdev);
3220 }
3221
3222 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3223                                 pci_channel_state_t state)
3224 {
3225         struct be_adapter *adapter = pci_get_drvdata(pdev);
3226         struct net_device *netdev =  adapter->netdev;
3227
3228         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3229
3230         adapter->eeh_err = true;
3231
3232         netif_device_detach(netdev);
3233
3234         if (netif_running(netdev)) {
3235                 rtnl_lock();
3236                 be_close(netdev);
3237                 rtnl_unlock();
3238         }
3239         be_clear(adapter);
3240
3241         if (state == pci_channel_io_perm_failure)
3242                 return PCI_ERS_RESULT_DISCONNECT;
3243
3244         pci_disable_device(pdev);
3245
3246         return PCI_ERS_RESULT_NEED_RESET;
3247 }
3248
3249 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3250 {
3251         struct be_adapter *adapter = pci_get_drvdata(pdev);
3252         int status;
3253
3254         dev_info(&adapter->pdev->dev, "EEH reset\n");
3255         adapter->eeh_err = false;
3256
3257         status = pci_enable_device(pdev);
3258         if (status)
3259                 return PCI_ERS_RESULT_DISCONNECT;
3260
3261         pci_set_master(pdev);
3262         pci_set_power_state(pdev, 0);
3263         pci_restore_state(pdev);
3264
3265         /* Check if card is ok and fw is ready */
3266         status = be_cmd_POST(adapter);
3267         if (status)
3268                 return PCI_ERS_RESULT_DISCONNECT;
3269
3270         return PCI_ERS_RESULT_RECOVERED;
3271 }
3272
3273 static void be_eeh_resume(struct pci_dev *pdev)
3274 {
3275         int status = 0;
3276         struct be_adapter *adapter = pci_get_drvdata(pdev);
3277         struct net_device *netdev =  adapter->netdev;
3278
3279         dev_info(&adapter->pdev->dev, "EEH resume\n");
3280
3281         pci_save_state(pdev);
3282
3283         /* tell fw we're ready to fire cmds */
3284         status = be_cmd_fw_init(adapter);
3285         if (status)
3286                 goto err;
3287
3288         status = be_setup(adapter);
3289         if (status)
3290                 goto err;
3291
3292         if (netif_running(netdev)) {
3293                 status = be_open(netdev);
3294                 if (status)
3295                         goto err;
3296         }
3297         netif_device_attach(netdev);
3298         return;
3299 err:
3300         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3301 }
3302
3303 static struct pci_error_handlers be_eeh_handlers = {
3304         .error_detected = be_eeh_err_detected,
3305         .slot_reset = be_eeh_reset,
3306         .resume = be_eeh_resume,
3307 };
3308
3309 static struct pci_driver be_driver = {
3310         .name = DRV_NAME,
3311         .id_table = be_dev_ids,
3312         .probe = be_probe,
3313         .remove = be_remove,
3314         .suspend = be_suspend,
3315         .resume = be_resume,
3316         .shutdown = be_shutdown,
3317         .err_handler = &be_eeh_handlers
3318 };
3319
3320 static int __init be_init_module(void)
3321 {
3322         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3323             rx_frag_size != 2048) {
3324                 printk(KERN_WARNING DRV_NAME
3325                         " : Module param rx_frag_size must be 2048/4096/8192."
3326                         " Using 2048\n");
3327                 rx_frag_size = 2048;
3328         }
3329
3330         return pci_register_driver(&be_driver);
3331 }
3332 module_init(be_init_module);
3333
3334 static void __exit be_exit_module(void)
3335 {
3336         pci_unregister_driver(&be_driver);
3337 }
3338 module_exit(be_exit_module);