Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120 {
121         struct be_dma_mem *mem = &q->dma_mem;
122         if (mem->va)
123                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124                                   mem->dma);
125 }
126
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128                 u16 len, u16 entry_size)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131
132         memset(q, 0, sizeof(*q));
133         q->len = len;
134         q->entry_size = entry_size;
135         mem->size = len * entry_size;
136         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137                                      GFP_KERNEL);
138         if (!mem->va)
139                 return -1;
140         memset(mem->va, 0, mem->size);
141         return 0;
142 }
143
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
145 {
146         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147         u32 reg = ioread32(addr);
148         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150         if (adapter->eeh_err)
151                 return;
152
153         if (!enabled && enable)
154                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else if (enabled && !enable)
156                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else
158                 return;
159
160         iowrite32(reg, addr);
161 }
162
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164 {
165         u32 val = 0;
166         val |= qid & DB_RQ_RING_ID_MASK;
167         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169         wmb();
170         iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 }
172
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 {
175         u32 val = 0;
176         val |= qid & DB_TXULP_RING_ID_MASK;
177         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179         wmb();
180         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 }
182
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184                 bool arm, bool clear_int, u16 num_popped)
185 {
186         u32 val = 0;
187         val |= qid & DB_EQ_RING_ID_MASK;
188         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191         if (adapter->eeh_err)
192                 return;
193
194         if (arm)
195                 val |= 1 << DB_EQ_REARM_SHIFT;
196         if (clear_int)
197                 val |= 1 << DB_EQ_CLR_SHIFT;
198         val |= 1 << DB_EQ_EVNT_SHIFT;
199         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200         iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 }
202
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204 {
205         u32 val = 0;
206         val |= qid & DB_CQ_RING_ID_MASK;
207         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210         if (adapter->eeh_err)
211                 return;
212
213         if (arm)
214                 val |= 1 << DB_CQ_REARM_SHIFT;
215         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216         iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 }
218
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 {
221         struct be_adapter *adapter = netdev_priv(netdev);
222         struct sockaddr *addr = p;
223         int status = 0;
224
225         if (!is_valid_ether_addr(addr->sa_data))
226                 return -EADDRNOTAVAIL;
227
228         /* MAC addr configuration will be done in hardware for VFs
229          * by their corresponding PFs. Just copy to netdev addr here
230          */
231         if (!be_physfn(adapter))
232                 goto netdev_addr;
233
234         status = be_cmd_pmac_del(adapter, adapter->if_handle,
235                                 adapter->pmac_id, 0);
236         if (status)
237                 return status;
238
239         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240                                 adapter->if_handle, &adapter->pmac_id, 0);
241 netdev_addr:
242         if (!status)
243                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245         return status;
246 }
247
248 void netdev_stats_update(struct be_adapter *adapter)
249 {
250         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252         struct be_port_rxf_stats *port_stats =
253                         &rxf_stats->port[adapter->port_num];
254         struct net_device_stats *dev_stats = &adapter->netdev->stats;
255         struct be_erx_stats *erx_stats = &hw_stats->erx;
256         struct be_rx_obj *rxo;
257         int i;
258
259         memset(dev_stats, 0, sizeof(*dev_stats));
260         for_all_rx_queues(adapter, rxo, i) {
261                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264                 /*  no space in linux buffers: best possible approximation */
265                 dev_stats->rx_dropped +=
266                         erx_stats->rx_drops_no_fragments[rxo->q.id];
267         }
268
269         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
271
272         /* bad pkts received */
273         dev_stats->rx_errors = port_stats->rx_crc_errors +
274                 port_stats->rx_alignment_symbol_errors +
275                 port_stats->rx_in_range_errors +
276                 port_stats->rx_out_range_errors +
277                 port_stats->rx_frame_too_long +
278                 port_stats->rx_dropped_too_small +
279                 port_stats->rx_dropped_too_short +
280                 port_stats->rx_dropped_header_too_small +
281                 port_stats->rx_dropped_tcp_length +
282                 port_stats->rx_dropped_runt +
283                 port_stats->rx_tcp_checksum_errs +
284                 port_stats->rx_ip_checksum_errs +
285                 port_stats->rx_udp_checksum_errs;
286
287         /* detailed rx errors */
288         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
289                 port_stats->rx_out_range_errors +
290                 port_stats->rx_frame_too_long;
291
292         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294         /* frame alignment errors */
295         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
296
297         /* receiver fifo overrun */
298         /* drops_no_pbuf is no per i/f, it's per BE card */
299         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300                                         port_stats->rx_input_fifo_overflow +
301                                         rxf_stats->rx_drops_no_pbuf;
302 }
303
304 void be_link_status_update(struct be_adapter *adapter, bool link_up)
305 {
306         struct net_device *netdev = adapter->netdev;
307
308         /* If link came up or went down */
309         if (adapter->link_up != link_up) {
310                 adapter->link_speed = -1;
311                 if (link_up) {
312                         netif_carrier_on(netdev);
313                         printk(KERN_INFO "%s: Link up\n", netdev->name);
314                 } else {
315                         netif_carrier_off(netdev);
316                         printk(KERN_INFO "%s: Link down\n", netdev->name);
317                 }
318                 adapter->link_up = link_up;
319         }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325         struct be_eq_obj *rx_eq = &rxo->rx_eq;
326         struct be_rx_stats *stats = &rxo->stats;
327         ulong now = jiffies;
328         u32 eqd;
329
330         if (!rx_eq->enable_aic)
331                 return;
332
333         /* Wrapped around */
334         if (time_before(now, stats->rx_fps_jiffies)) {
335                 stats->rx_fps_jiffies = now;
336                 return;
337         }
338
339         /* Update once a second */
340         if ((now - stats->rx_fps_jiffies) < HZ)
341                 return;
342
343         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344                         ((now - stats->rx_fps_jiffies) / HZ);
345
346         stats->rx_fps_jiffies = now;
347         stats->prev_rx_frags = stats->rx_frags;
348         eqd = stats->rx_fps / 110000;
349         eqd = eqd << 3;
350         if (eqd > rx_eq->max_eqd)
351                 eqd = rx_eq->max_eqd;
352         if (eqd < rx_eq->min_eqd)
353                 eqd = rx_eq->min_eqd;
354         if (eqd < 10)
355                 eqd = 0;
356         if (eqd != rx_eq->cur_eqd)
357                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359         rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364         u64 rate = bytes;
365
366         do_div(rate, ticks / HZ);
367         rate <<= 3;                     /* bytes/sec -> bits/sec */
368         do_div(rate, 1000000ul);        /* MB/Sec */
369
370         return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375         struct be_tx_stats *stats = tx_stats(adapter);
376         ulong now = jiffies;
377
378         /* Wrapped around? */
379         if (time_before(now, stats->be_tx_jiffies)) {
380                 stats->be_tx_jiffies = now;
381                 return;
382         }
383
384         /* Update tx rate once in two seconds */
385         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387                                                   - stats->be_tx_bytes_prev,
388                                                  now - stats->be_tx_jiffies);
389                 stats->be_tx_jiffies = now;
390                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391         }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397         struct be_tx_stats *stats = tx_stats(adapter);
398         stats->be_tx_reqs++;
399         stats->be_tx_wrbs += wrb_cnt;
400         stats->be_tx_bytes += copied;
401         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402         if (stopped)
403                 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408                                                                 bool *dummy)
409 {
410         int cnt = (skb->len > skb->data_len);
411
412         cnt += skb_shinfo(skb)->nr_frags;
413
414         /* to account for hdr wrb */
415         cnt++;
416         if (lancer_chip(adapter) || !(cnt & 1)) {
417                 *dummy = false;
418         } else {
419                 /* add a dummy to make it an even num */
420                 cnt++;
421                 *dummy = true;
422         }
423         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424         return cnt;
425 }
426
427 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428 {
429         wrb->frag_pa_hi = upper_32_bits(addr);
430         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432 }
433
434 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
436 {
437         u8 vlan_prio = 0;
438         u16 vlan_tag = 0;
439
440         memset(hdr, 0, sizeof(*hdr));
441
442         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
444         if (skb_is_gso(skb)) {
445                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447                         hdr, skb_shinfo(skb)->gso_size);
448                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
449                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
450                 if (lancer_chip(adapter) && adapter->sli_family  ==
451                                                         LANCER_A0_SLI_FAMILY) {
452                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453                         if (is_tcp_pkt(skb))
454                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455                                                                 tcpcs, hdr, 1);
456                         else if (is_udp_pkt(skb))
457                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458                                                                 udpcs, hdr, 1);
459                 }
460         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461                 if (is_tcp_pkt(skb))
462                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463                 else if (is_udp_pkt(skb))
464                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465         }
466
467         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
468                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
469                 vlan_tag = vlan_tx_tag_get(skb);
470                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471                 /* If vlan priority provided by OS is NOT in available bmap */
472                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474                                         adapter->recommended_prio;
475                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
476         }
477
478         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482 }
483
484 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
485                 bool unmap_single)
486 {
487         dma_addr_t dma;
488
489         be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
492         if (wrb->frag_len) {
493                 if (unmap_single)
494                         dma_unmap_single(dev, dma, wrb->frag_len,
495                                          DMA_TO_DEVICE);
496                 else
497                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
498         }
499 }
500
501 static int make_tx_wrbs(struct be_adapter *adapter,
502                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503 {
504         dma_addr_t busaddr;
505         int i, copied = 0;
506         struct device *dev = &adapter->pdev->dev;
507         struct sk_buff *first_skb = skb;
508         struct be_queue_info *txq = &adapter->tx_obj.q;
509         struct be_eth_wrb *wrb;
510         struct be_eth_hdr_wrb *hdr;
511         bool map_single = false;
512         u16 map_head;
513
514         hdr = queue_head_node(txq);
515         queue_head_inc(txq);
516         map_head = txq->head;
517
518         if (skb->len > skb->data_len) {
519                 int len = skb_headlen(skb);
520                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521                 if (dma_mapping_error(dev, busaddr))
522                         goto dma_err;
523                 map_single = true;
524                 wrb = queue_head_node(txq);
525                 wrb_fill(wrb, busaddr, len);
526                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527                 queue_head_inc(txq);
528                 copied += len;
529         }
530
531         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532                 struct skb_frag_struct *frag =
533                         &skb_shinfo(skb)->frags[i];
534                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535                                        frag->size, DMA_TO_DEVICE);
536                 if (dma_mapping_error(dev, busaddr))
537                         goto dma_err;
538                 wrb = queue_head_node(txq);
539                 wrb_fill(wrb, busaddr, frag->size);
540                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541                 queue_head_inc(txq);
542                 copied += frag->size;
543         }
544
545         if (dummy_wrb) {
546                 wrb = queue_head_node(txq);
547                 wrb_fill(wrb, 0, 0);
548                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549                 queue_head_inc(txq);
550         }
551
552         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
553         be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555         return copied;
556 dma_err:
557         txq->head = map_head;
558         while (copied) {
559                 wrb = queue_head_node(txq);
560                 unmap_tx_frag(dev, wrb, map_single);
561                 map_single = false;
562                 copied -= wrb->frag_len;
563                 queue_head_inc(txq);
564         }
565         return 0;
566 }
567
568 static netdev_tx_t be_xmit(struct sk_buff *skb,
569                         struct net_device *netdev)
570 {
571         struct be_adapter *adapter = netdev_priv(netdev);
572         struct be_tx_obj *tx_obj = &adapter->tx_obj;
573         struct be_queue_info *txq = &tx_obj->q;
574         u32 wrb_cnt = 0, copied = 0;
575         u32 start = txq->head;
576         bool dummy_wrb, stopped = false;
577
578         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
579
580         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
581         if (copied) {
582                 /* record the sent skb in the sent_skb table */
583                 BUG_ON(tx_obj->sent_skb_list[start]);
584                 tx_obj->sent_skb_list[start] = skb;
585
586                 /* Ensure txq has space for the next skb; Else stop the queue
587                  * *BEFORE* ringing the tx doorbell, so that we serialze the
588                  * tx compls of the current transmit which'll wake up the queue
589                  */
590                 atomic_add(wrb_cnt, &txq->used);
591                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592                                                                 txq->len) {
593                         netif_stop_queue(netdev);
594                         stopped = true;
595                 }
596
597                 be_txq_notify(adapter, txq->id, wrb_cnt);
598
599                 be_tx_stats_update(adapter, wrb_cnt, copied,
600                                 skb_shinfo(skb)->gso_segs, stopped);
601         } else {
602                 txq->head = start;
603                 dev_kfree_skb_any(skb);
604         }
605         return NETDEV_TX_OK;
606 }
607
608 static int be_change_mtu(struct net_device *netdev, int new_mtu)
609 {
610         struct be_adapter *adapter = netdev_priv(netdev);
611         if (new_mtu < BE_MIN_MTU ||
612                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613                                         (ETH_HLEN + ETH_FCS_LEN))) {
614                 dev_info(&adapter->pdev->dev,
615                         "MTU must be between %d and %d bytes\n",
616                         BE_MIN_MTU,
617                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
618                 return -EINVAL;
619         }
620         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621                         netdev->mtu, new_mtu);
622         netdev->mtu = new_mtu;
623         return 0;
624 }
625
626 /*
627  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628  * If the user configures more, place BE in vlan promiscuous mode.
629  */
630 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
631 {
632         u16 vtag[BE_NUM_VLANS_SUPPORTED];
633         u16 ntags = 0, i;
634         int status = 0;
635         u32 if_handle;
636
637         if (vf) {
638                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641         }
642
643         if (adapter->vlans_added <= adapter->max_vlans)  {
644                 /* Construct VLAN Table to give to HW */
645                 for (i = 0; i < VLAN_N_VID; i++) {
646                         if (adapter->vlan_tag[i]) {
647                                 vtag[ntags] = cpu_to_le16(i);
648                                 ntags++;
649                         }
650                 }
651                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652                                         vtag, ntags, 1, 0);
653         } else {
654                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655                                         NULL, 0, 1, 1);
656         }
657
658         return status;
659 }
660
661 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662 {
663         struct be_adapter *adapter = netdev_priv(netdev);
664
665         adapter->vlan_grp = grp;
666 }
667
668 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669 {
670         struct be_adapter *adapter = netdev_priv(netdev);
671
672         adapter->vlans_added++;
673         if (!be_physfn(adapter))
674                 return;
675
676         adapter->vlan_tag[vid] = 1;
677         if (adapter->vlans_added <= (adapter->max_vlans + 1))
678                 be_vid_config(adapter, false, 0);
679 }
680
681 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682 {
683         struct be_adapter *adapter = netdev_priv(netdev);
684
685         adapter->vlans_added--;
686         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
688         if (!be_physfn(adapter))
689                 return;
690
691         adapter->vlan_tag[vid] = 0;
692         if (adapter->vlans_added <= adapter->max_vlans)
693                 be_vid_config(adapter, false, 0);
694 }
695
696 static void be_set_multicast_list(struct net_device *netdev)
697 {
698         struct be_adapter *adapter = netdev_priv(netdev);
699
700         if (netdev->flags & IFF_PROMISC) {
701                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
702                 adapter->promiscuous = true;
703                 goto done;
704         }
705
706         /* BE was previously in promiscuous mode; disable it */
707         if (adapter->promiscuous) {
708                 adapter->promiscuous = false;
709                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
710         }
711
712         /* Enable multicast promisc if num configured exceeds what we support */
713         if (netdev->flags & IFF_ALLMULTI ||
714             netdev_mc_count(netdev) > BE_MAX_MC) {
715                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
716                                 &adapter->mc_cmd_mem);
717                 goto done;
718         }
719
720         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
721                 &adapter->mc_cmd_mem);
722 done:
723         return;
724 }
725
726 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727 {
728         struct be_adapter *adapter = netdev_priv(netdev);
729         int status;
730
731         if (!adapter->sriov_enabled)
732                 return -EPERM;
733
734         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735                 return -EINVAL;
736
737         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738                 status = be_cmd_pmac_del(adapter,
739                                         adapter->vf_cfg[vf].vf_if_handle,
740                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
741
742         status = be_cmd_pmac_add(adapter, mac,
743                                 adapter->vf_cfg[vf].vf_if_handle,
744                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
745
746         if (status)
747                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748                                 mac, vf);
749         else
750                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
752         return status;
753 }
754
755 static int be_get_vf_config(struct net_device *netdev, int vf,
756                         struct ifla_vf_info *vi)
757 {
758         struct be_adapter *adapter = netdev_priv(netdev);
759
760         if (!adapter->sriov_enabled)
761                 return -EPERM;
762
763         if (vf >= num_vfs)
764                 return -EINVAL;
765
766         vi->vf = vf;
767         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
768         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
769         vi->qos = 0;
770         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772         return 0;
773 }
774
775 static int be_set_vf_vlan(struct net_device *netdev,
776                         int vf, u16 vlan, u8 qos)
777 {
778         struct be_adapter *adapter = netdev_priv(netdev);
779         int status = 0;
780
781         if (!adapter->sriov_enabled)
782                 return -EPERM;
783
784         if ((vf >= num_vfs) || (vlan > 4095))
785                 return -EINVAL;
786
787         if (vlan) {
788                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789                 adapter->vlans_added++;
790         } else {
791                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792                 adapter->vlans_added--;
793         }
794
795         status = be_vid_config(adapter, true, vf);
796
797         if (status)
798                 dev_info(&adapter->pdev->dev,
799                                 "VLAN %d config on VF %d failed\n", vlan, vf);
800         return status;
801 }
802
803 static int be_set_vf_tx_rate(struct net_device *netdev,
804                         int vf, int rate)
805 {
806         struct be_adapter *adapter = netdev_priv(netdev);
807         int status = 0;
808
809         if (!adapter->sriov_enabled)
810                 return -EPERM;
811
812         if ((vf >= num_vfs) || (rate < 0))
813                 return -EINVAL;
814
815         if (rate > 10000)
816                 rate = 10000;
817
818         adapter->vf_cfg[vf].vf_tx_rate = rate;
819         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
820
821         if (status)
822                 dev_info(&adapter->pdev->dev,
823                                 "tx rate %d on VF %d failed\n", rate, vf);
824         return status;
825 }
826
827 static void be_rx_rate_update(struct be_rx_obj *rxo)
828 {
829         struct be_rx_stats *stats = &rxo->stats;
830         ulong now = jiffies;
831
832         /* Wrapped around */
833         if (time_before(now, stats->rx_jiffies)) {
834                 stats->rx_jiffies = now;
835                 return;
836         }
837
838         /* Update the rate once in two seconds */
839         if ((now - stats->rx_jiffies) < 2 * HZ)
840                 return;
841
842         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843                                 now - stats->rx_jiffies);
844         stats->rx_jiffies = now;
845         stats->rx_bytes_prev = stats->rx_bytes;
846 }
847
848 static void be_rx_stats_update(struct be_rx_obj *rxo,
849                 struct be_rx_compl_info *rxcp)
850 {
851         struct be_rx_stats *stats = &rxo->stats;
852
853         stats->rx_compl++;
854         stats->rx_frags += rxcp->num_rcvd;
855         stats->rx_bytes += rxcp->pkt_size;
856         stats->rx_pkts++;
857         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
858                 stats->rx_mcast_pkts++;
859         if (rxcp->err)
860                 stats->rxcp_err++;
861 }
862
863 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
864 {
865         /* L4 checksum is not reliable for non TCP/UDP packets.
866          * Also ignore ipcksm for ipv6 pkts */
867         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868                                 (rxcp->ip_csum || rxcp->ipv6);
869 }
870
871 static struct be_rx_page_info *
872 get_rx_page_info(struct be_adapter *adapter,
873                 struct be_rx_obj *rxo,
874                 u16 frag_idx)
875 {
876         struct be_rx_page_info *rx_page_info;
877         struct be_queue_info *rxq = &rxo->q;
878
879         rx_page_info = &rxo->page_info_tbl[frag_idx];
880         BUG_ON(!rx_page_info->page);
881
882         if (rx_page_info->last_page_user) {
883                 dma_unmap_page(&adapter->pdev->dev,
884                                dma_unmap_addr(rx_page_info, bus),
885                                adapter->big_page_size, DMA_FROM_DEVICE);
886                 rx_page_info->last_page_user = false;
887         }
888
889         atomic_dec(&rxq->used);
890         return rx_page_info;
891 }
892
893 /* Throwaway the data in the Rx completion */
894 static void be_rx_compl_discard(struct be_adapter *adapter,
895                 struct be_rx_obj *rxo,
896                 struct be_rx_compl_info *rxcp)
897 {
898         struct be_queue_info *rxq = &rxo->q;
899         struct be_rx_page_info *page_info;
900         u16 i, num_rcvd = rxcp->num_rcvd;
901
902         for (i = 0; i < num_rcvd; i++) {
903                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
904                 put_page(page_info->page);
905                 memset(page_info, 0, sizeof(*page_info));
906                 index_inc(&rxcp->rxq_idx, rxq->len);
907         }
908 }
909
910 /*
911  * skb_fill_rx_data forms a complete skb for an ether frame
912  * indicated by rxcp.
913  */
914 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
915                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
916 {
917         struct be_queue_info *rxq = &rxo->q;
918         struct be_rx_page_info *page_info;
919         u16 i, j;
920         u16 hdr_len, curr_frag_len, remaining;
921         u8 *start;
922
923         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
924         start = page_address(page_info->page) + page_info->page_offset;
925         prefetch(start);
926
927         /* Copy data in the first descriptor of this completion */
928         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
929
930         /* Copy the header portion into skb_data */
931         hdr_len = min(BE_HDR_LEN, curr_frag_len);
932         memcpy(skb->data, start, hdr_len);
933         skb->len = curr_frag_len;
934         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935                 /* Complete packet has now been moved to data */
936                 put_page(page_info->page);
937                 skb->data_len = 0;
938                 skb->tail += curr_frag_len;
939         } else {
940                 skb_shinfo(skb)->nr_frags = 1;
941                 skb_shinfo(skb)->frags[0].page = page_info->page;
942                 skb_shinfo(skb)->frags[0].page_offset =
943                                         page_info->page_offset + hdr_len;
944                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945                 skb->data_len = curr_frag_len - hdr_len;
946                 skb->tail += hdr_len;
947         }
948         page_info->page = NULL;
949
950         if (rxcp->pkt_size <= rx_frag_size) {
951                 BUG_ON(rxcp->num_rcvd != 1);
952                 return;
953         }
954
955         /* More frags present for this completion */
956         index_inc(&rxcp->rxq_idx, rxq->len);
957         remaining = rxcp->pkt_size - curr_frag_len;
958         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960                 curr_frag_len = min(remaining, rx_frag_size);
961
962                 /* Coalesce all frags from the same physical page in one slot */
963                 if (page_info->page_offset == 0) {
964                         /* Fresh page */
965                         j++;
966                         skb_shinfo(skb)->frags[j].page = page_info->page;
967                         skb_shinfo(skb)->frags[j].page_offset =
968                                                         page_info->page_offset;
969                         skb_shinfo(skb)->frags[j].size = 0;
970                         skb_shinfo(skb)->nr_frags++;
971                 } else {
972                         put_page(page_info->page);
973                 }
974
975                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976                 skb->len += curr_frag_len;
977                 skb->data_len += curr_frag_len;
978
979                 remaining -= curr_frag_len;
980                 index_inc(&rxcp->rxq_idx, rxq->len);
981                 page_info->page = NULL;
982         }
983         BUG_ON(j > MAX_SKB_FRAGS);
984 }
985
986 /* Process the RX completion indicated by rxcp when GRO is disabled */
987 static void be_rx_compl_process(struct be_adapter *adapter,
988                         struct be_rx_obj *rxo,
989                         struct be_rx_compl_info *rxcp)
990 {
991         struct net_device *netdev = adapter->netdev;
992         struct sk_buff *skb;
993
994         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
995         if (unlikely(!skb)) {
996                 if (net_ratelimit())
997                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
998                 be_rx_compl_discard(adapter, rxo, rxcp);
999                 return;
1000         }
1001
1002         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1003
1004         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1005                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1006         else
1007                 skb_checksum_none_assert(skb);
1008
1009         skb->truesize = skb->len + sizeof(struct sk_buff);
1010         skb->protocol = eth_type_trans(skb, netdev);
1011         if (adapter->netdev->features & NETIF_F_RXHASH)
1012                 skb->rxhash = rxcp->rss_hash;
1013
1014
1015         if (unlikely(rxcp->vlanf)) {
1016                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1017                         kfree_skb(skb);
1018                         return;
1019                 }
1020                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1021         } else {
1022                 netif_receive_skb(skb);
1023         }
1024 }
1025
1026 /* Process the RX completion indicated by rxcp when GRO is enabled */
1027 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1028                 struct be_rx_obj *rxo,
1029                 struct be_rx_compl_info *rxcp)
1030 {
1031         struct be_rx_page_info *page_info;
1032         struct sk_buff *skb = NULL;
1033         struct be_queue_info *rxq = &rxo->q;
1034         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1035         u16 remaining, curr_frag_len;
1036         u16 i, j;
1037
1038         skb = napi_get_frags(&eq_obj->napi);
1039         if (!skb) {
1040                 be_rx_compl_discard(adapter, rxo, rxcp);
1041                 return;
1042         }
1043
1044         remaining = rxcp->pkt_size;
1045         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1046                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1047
1048                 curr_frag_len = min(remaining, rx_frag_size);
1049
1050                 /* Coalesce all frags from the same physical page in one slot */
1051                 if (i == 0 || page_info->page_offset == 0) {
1052                         /* First frag or Fresh page */
1053                         j++;
1054                         skb_shinfo(skb)->frags[j].page = page_info->page;
1055                         skb_shinfo(skb)->frags[j].page_offset =
1056                                                         page_info->page_offset;
1057                         skb_shinfo(skb)->frags[j].size = 0;
1058                 } else {
1059                         put_page(page_info->page);
1060                 }
1061                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1062
1063                 remaining -= curr_frag_len;
1064                 index_inc(&rxcp->rxq_idx, rxq->len);
1065                 memset(page_info, 0, sizeof(*page_info));
1066         }
1067         BUG_ON(j > MAX_SKB_FRAGS);
1068
1069         skb_shinfo(skb)->nr_frags = j + 1;
1070         skb->len = rxcp->pkt_size;
1071         skb->data_len = rxcp->pkt_size;
1072         skb->truesize += rxcp->pkt_size;
1073         skb->ip_summed = CHECKSUM_UNNECESSARY;
1074         if (adapter->netdev->features & NETIF_F_RXHASH)
1075                 skb->rxhash = rxcp->rss_hash;
1076
1077         if (likely(!rxcp->vlanf))
1078                 napi_gro_frags(&eq_obj->napi);
1079         else
1080                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1081 }
1082
1083 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1084                                 struct be_eth_rx_compl *compl,
1085                                 struct be_rx_compl_info *rxcp)
1086 {
1087         rxcp->pkt_size =
1088                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1089         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1090         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1091         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1092         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1093         rxcp->ip_csum =
1094                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1095         rxcp->l4_csum =
1096                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1097         rxcp->ipv6 =
1098                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1099         rxcp->rxq_idx =
1100                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1101         rxcp->num_rcvd =
1102                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1103         rxcp->pkt_type =
1104                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1105         rxcp->rss_hash =
1106                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1107         if (rxcp->vlanf) {
1108                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1109                                 compl);
1110                 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1111                                 compl);
1112         }
1113 }
1114
1115 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1116                                 struct be_eth_rx_compl *compl,
1117                                 struct be_rx_compl_info *rxcp)
1118 {
1119         rxcp->pkt_size =
1120                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1121         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1122         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1123         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1124         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1125         rxcp->ip_csum =
1126                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1127         rxcp->l4_csum =
1128                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1129         rxcp->ipv6 =
1130                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1131         rxcp->rxq_idx =
1132                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1133         rxcp->num_rcvd =
1134                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1135         rxcp->pkt_type =
1136                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1137         rxcp->rss_hash =
1138                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1139         if (rxcp->vlanf) {
1140                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1141                                 compl);
1142                 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1143                                 compl);
1144         }
1145 }
1146
1147 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1148 {
1149         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1150         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1151         struct be_adapter *adapter = rxo->adapter;
1152
1153         /* For checking the valid bit it is Ok to use either definition as the
1154          * valid bit is at the same position in both v0 and v1 Rx compl */
1155         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1156                 return NULL;
1157
1158         rmb();
1159         be_dws_le_to_cpu(compl, sizeof(*compl));
1160
1161         if (adapter->be3_native)
1162                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1163         else
1164                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1165
1166         if (rxcp->vlanf) {
1167                 /* vlanf could be wrongly set in some cards.
1168                  * ignore if vtm is not set */
1169                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1170                         rxcp->vlanf = 0;
1171
1172                 if (!lancer_chip(adapter))
1173                         rxcp->vid = swab16(rxcp->vid);
1174
1175                 if ((adapter->pvid == rxcp->vid) &&
1176                         !adapter->vlan_tag[rxcp->vid])
1177                         rxcp->vlanf = 0;
1178         }
1179
1180         /* As the compl has been parsed, reset it; we wont touch it again */
1181         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1182
1183         queue_tail_inc(&rxo->cq);
1184         return rxcp;
1185 }
1186
1187 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1188 {
1189         u32 order = get_order(size);
1190
1191         if (order > 0)
1192                 gfp |= __GFP_COMP;
1193         return  alloc_pages(gfp, order);
1194 }
1195
1196 /*
1197  * Allocate a page, split it to fragments of size rx_frag_size and post as
1198  * receive buffers to BE
1199  */
1200 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1201 {
1202         struct be_adapter *adapter = rxo->adapter;
1203         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1204         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1205         struct be_queue_info *rxq = &rxo->q;
1206         struct page *pagep = NULL;
1207         struct be_eth_rx_d *rxd;
1208         u64 page_dmaaddr = 0, frag_dmaaddr;
1209         u32 posted, page_offset = 0;
1210
1211         page_info = &rxo->page_info_tbl[rxq->head];
1212         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1213                 if (!pagep) {
1214                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1215                         if (unlikely(!pagep)) {
1216                                 rxo->stats.rx_post_fail++;
1217                                 break;
1218                         }
1219                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1220                                                     0, adapter->big_page_size,
1221                                                     DMA_FROM_DEVICE);
1222                         page_info->page_offset = 0;
1223                 } else {
1224                         get_page(pagep);
1225                         page_info->page_offset = page_offset + rx_frag_size;
1226                 }
1227                 page_offset = page_info->page_offset;
1228                 page_info->page = pagep;
1229                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1230                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1231
1232                 rxd = queue_head_node(rxq);
1233                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1234                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1235
1236                 /* Any space left in the current big page for another frag? */
1237                 if ((page_offset + rx_frag_size + rx_frag_size) >
1238                                         adapter->big_page_size) {
1239                         pagep = NULL;
1240                         page_info->last_page_user = true;
1241                 }
1242
1243                 prev_page_info = page_info;
1244                 queue_head_inc(rxq);
1245                 page_info = &page_info_tbl[rxq->head];
1246         }
1247         if (pagep)
1248                 prev_page_info->last_page_user = true;
1249
1250         if (posted) {
1251                 atomic_add(posted, &rxq->used);
1252                 be_rxq_notify(adapter, rxq->id, posted);
1253         } else if (atomic_read(&rxq->used) == 0) {
1254                 /* Let be_worker replenish when memory is available */
1255                 rxo->rx_post_starved = true;
1256         }
1257 }
1258
1259 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1260 {
1261         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1262
1263         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1264                 return NULL;
1265
1266         rmb();
1267         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1268
1269         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1270
1271         queue_tail_inc(tx_cq);
1272         return txcp;
1273 }
1274
1275 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1276 {
1277         struct be_queue_info *txq = &adapter->tx_obj.q;
1278         struct be_eth_wrb *wrb;
1279         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1280         struct sk_buff *sent_skb;
1281         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1282         bool unmap_skb_hdr = true;
1283
1284         sent_skb = sent_skbs[txq->tail];
1285         BUG_ON(!sent_skb);
1286         sent_skbs[txq->tail] = NULL;
1287
1288         /* skip header wrb */
1289         queue_tail_inc(txq);
1290
1291         do {
1292                 cur_index = txq->tail;
1293                 wrb = queue_tail_node(txq);
1294                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1295                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1296                 unmap_skb_hdr = false;
1297
1298                 num_wrbs++;
1299                 queue_tail_inc(txq);
1300         } while (cur_index != last_index);
1301
1302         atomic_sub(num_wrbs, &txq->used);
1303
1304         kfree_skb(sent_skb);
1305 }
1306
1307 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1308 {
1309         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1310
1311         if (!eqe->evt)
1312                 return NULL;
1313
1314         rmb();
1315         eqe->evt = le32_to_cpu(eqe->evt);
1316         queue_tail_inc(&eq_obj->q);
1317         return eqe;
1318 }
1319
1320 static int event_handle(struct be_adapter *adapter,
1321                         struct be_eq_obj *eq_obj)
1322 {
1323         struct be_eq_entry *eqe;
1324         u16 num = 0;
1325
1326         while ((eqe = event_get(eq_obj)) != NULL) {
1327                 eqe->evt = 0;
1328                 num++;
1329         }
1330
1331         /* Deal with any spurious interrupts that come
1332          * without events
1333          */
1334         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1335         if (num)
1336                 napi_schedule(&eq_obj->napi);
1337
1338         return num;
1339 }
1340
1341 /* Just read and notify events without processing them.
1342  * Used at the time of destroying event queues */
1343 static void be_eq_clean(struct be_adapter *adapter,
1344                         struct be_eq_obj *eq_obj)
1345 {
1346         struct be_eq_entry *eqe;
1347         u16 num = 0;
1348
1349         while ((eqe = event_get(eq_obj)) != NULL) {
1350                 eqe->evt = 0;
1351                 num++;
1352         }
1353
1354         if (num)
1355                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1356 }
1357
1358 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1359 {
1360         struct be_rx_page_info *page_info;
1361         struct be_queue_info *rxq = &rxo->q;
1362         struct be_queue_info *rx_cq = &rxo->cq;
1363         struct be_rx_compl_info *rxcp;
1364         u16 tail;
1365
1366         /* First cleanup pending rx completions */
1367         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1368                 be_rx_compl_discard(adapter, rxo, rxcp);
1369                 be_cq_notify(adapter, rx_cq->id, false, 1);
1370         }
1371
1372         /* Then free posted rx buffer that were not used */
1373         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1374         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1375                 page_info = get_rx_page_info(adapter, rxo, tail);
1376                 put_page(page_info->page);
1377                 memset(page_info, 0, sizeof(*page_info));
1378         }
1379         BUG_ON(atomic_read(&rxq->used));
1380 }
1381
1382 static void be_tx_compl_clean(struct be_adapter *adapter)
1383 {
1384         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1385         struct be_queue_info *txq = &adapter->tx_obj.q;
1386         struct be_eth_tx_compl *txcp;
1387         u16 end_idx, cmpl = 0, timeo = 0;
1388         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1389         struct sk_buff *sent_skb;
1390         bool dummy_wrb;
1391
1392         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1393         do {
1394                 while ((txcp = be_tx_compl_get(tx_cq))) {
1395                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1396                                         wrb_index, txcp);
1397                         be_tx_compl_process(adapter, end_idx);
1398                         cmpl++;
1399                 }
1400                 if (cmpl) {
1401                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1402                         cmpl = 0;
1403                 }
1404
1405                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1406                         break;
1407
1408                 mdelay(1);
1409         } while (true);
1410
1411         if (atomic_read(&txq->used))
1412                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1413                         atomic_read(&txq->used));
1414
1415         /* free posted tx for which compls will never arrive */
1416         while (atomic_read(&txq->used)) {
1417                 sent_skb = sent_skbs[txq->tail];
1418                 end_idx = txq->tail;
1419                 index_adv(&end_idx,
1420                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1421                         txq->len);
1422                 be_tx_compl_process(adapter, end_idx);
1423         }
1424 }
1425
1426 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1427 {
1428         struct be_queue_info *q;
1429
1430         q = &adapter->mcc_obj.q;
1431         if (q->created)
1432                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1433         be_queue_free(adapter, q);
1434
1435         q = &adapter->mcc_obj.cq;
1436         if (q->created)
1437                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1438         be_queue_free(adapter, q);
1439 }
1440
1441 /* Must be called only after TX qs are created as MCC shares TX EQ */
1442 static int be_mcc_queues_create(struct be_adapter *adapter)
1443 {
1444         struct be_queue_info *q, *cq;
1445
1446         /* Alloc MCC compl queue */
1447         cq = &adapter->mcc_obj.cq;
1448         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1449                         sizeof(struct be_mcc_compl)))
1450                 goto err;
1451
1452         /* Ask BE to create MCC compl queue; share TX's eq */
1453         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1454                 goto mcc_cq_free;
1455
1456         /* Alloc MCC queue */
1457         q = &adapter->mcc_obj.q;
1458         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1459                 goto mcc_cq_destroy;
1460
1461         /* Ask BE to create MCC queue */
1462         if (be_cmd_mccq_create(adapter, q, cq))
1463                 goto mcc_q_free;
1464
1465         return 0;
1466
1467 mcc_q_free:
1468         be_queue_free(adapter, q);
1469 mcc_cq_destroy:
1470         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1471 mcc_cq_free:
1472         be_queue_free(adapter, cq);
1473 err:
1474         return -1;
1475 }
1476
1477 static void be_tx_queues_destroy(struct be_adapter *adapter)
1478 {
1479         struct be_queue_info *q;
1480
1481         q = &adapter->tx_obj.q;
1482         if (q->created)
1483                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1484         be_queue_free(adapter, q);
1485
1486         q = &adapter->tx_obj.cq;
1487         if (q->created)
1488                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1489         be_queue_free(adapter, q);
1490
1491         /* Clear any residual events */
1492         be_eq_clean(adapter, &adapter->tx_eq);
1493
1494         q = &adapter->tx_eq.q;
1495         if (q->created)
1496                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1497         be_queue_free(adapter, q);
1498 }
1499
1500 static int be_tx_queues_create(struct be_adapter *adapter)
1501 {
1502         struct be_queue_info *eq, *q, *cq;
1503
1504         adapter->tx_eq.max_eqd = 0;
1505         adapter->tx_eq.min_eqd = 0;
1506         adapter->tx_eq.cur_eqd = 96;
1507         adapter->tx_eq.enable_aic = false;
1508         /* Alloc Tx Event queue */
1509         eq = &adapter->tx_eq.q;
1510         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1511                 return -1;
1512
1513         /* Ask BE to create Tx Event queue */
1514         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1515                 goto tx_eq_free;
1516
1517         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1518
1519
1520         /* Alloc TX eth compl queue */
1521         cq = &adapter->tx_obj.cq;
1522         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1523                         sizeof(struct be_eth_tx_compl)))
1524                 goto tx_eq_destroy;
1525
1526         /* Ask BE to create Tx eth compl queue */
1527         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1528                 goto tx_cq_free;
1529
1530         /* Alloc TX eth queue */
1531         q = &adapter->tx_obj.q;
1532         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1533                 goto tx_cq_destroy;
1534
1535         /* Ask BE to create Tx eth queue */
1536         if (be_cmd_txq_create(adapter, q, cq))
1537                 goto tx_q_free;
1538         return 0;
1539
1540 tx_q_free:
1541         be_queue_free(adapter, q);
1542 tx_cq_destroy:
1543         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1544 tx_cq_free:
1545         be_queue_free(adapter, cq);
1546 tx_eq_destroy:
1547         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1548 tx_eq_free:
1549         be_queue_free(adapter, eq);
1550         return -1;
1551 }
1552
1553 static void be_rx_queues_destroy(struct be_adapter *adapter)
1554 {
1555         struct be_queue_info *q;
1556         struct be_rx_obj *rxo;
1557         int i;
1558
1559         for_all_rx_queues(adapter, rxo, i) {
1560                 q = &rxo->q;
1561                 if (q->created) {
1562                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1563                         /* After the rxq is invalidated, wait for a grace time
1564                          * of 1ms for all dma to end and the flush compl to
1565                          * arrive
1566                          */
1567                         mdelay(1);
1568                         be_rx_q_clean(adapter, rxo);
1569                 }
1570                 be_queue_free(adapter, q);
1571
1572                 q = &rxo->cq;
1573                 if (q->created)
1574                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1575                 be_queue_free(adapter, q);
1576
1577                 /* Clear any residual events */
1578                 q = &rxo->rx_eq.q;
1579                 if (q->created) {
1580                         be_eq_clean(adapter, &rxo->rx_eq);
1581                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1582                 }
1583                 be_queue_free(adapter, q);
1584         }
1585 }
1586
1587 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1588 {
1589         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1590                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1591                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1592         } else {
1593                 dev_warn(&adapter->pdev->dev,
1594                         "No support for multiple RX queues\n");
1595                 return 1;
1596         }
1597 }
1598
1599 static int be_rx_queues_create(struct be_adapter *adapter)
1600 {
1601         struct be_queue_info *eq, *q, *cq;
1602         struct be_rx_obj *rxo;
1603         int rc, i;
1604
1605         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1606                                 msix_enabled(adapter) ?
1607                                         adapter->num_msix_vec - 1 : 1);
1608         if (adapter->num_rx_qs != MAX_RX_QS)
1609                 dev_warn(&adapter->pdev->dev,
1610                         "Can create only %d RX queues", adapter->num_rx_qs);
1611
1612         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1613         for_all_rx_queues(adapter, rxo, i) {
1614                 rxo->adapter = adapter;
1615                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1616                 rxo->rx_eq.enable_aic = true;
1617
1618                 /* EQ */
1619                 eq = &rxo->rx_eq.q;
1620                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1621                                         sizeof(struct be_eq_entry));
1622                 if (rc)
1623                         goto err;
1624
1625                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1626                 if (rc)
1627                         goto err;
1628
1629                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1630
1631                 /* CQ */
1632                 cq = &rxo->cq;
1633                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1634                                 sizeof(struct be_eth_rx_compl));
1635                 if (rc)
1636                         goto err;
1637
1638                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1639                 if (rc)
1640                         goto err;
1641                 /* Rx Q */
1642                 q = &rxo->q;
1643                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1644                                 sizeof(struct be_eth_rx_d));
1645                 if (rc)
1646                         goto err;
1647
1648                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1649                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1650                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1651                 if (rc)
1652                         goto err;
1653         }
1654
1655         if (be_multi_rxq(adapter)) {
1656                 u8 rsstable[MAX_RSS_QS];
1657
1658                 for_all_rss_queues(adapter, rxo, i)
1659                         rsstable[i] = rxo->rss_id;
1660
1661                 rc = be_cmd_rss_config(adapter, rsstable,
1662                         adapter->num_rx_qs - 1);
1663                 if (rc)
1664                         goto err;
1665         }
1666
1667         return 0;
1668 err:
1669         be_rx_queues_destroy(adapter);
1670         return -1;
1671 }
1672
1673 static bool event_peek(struct be_eq_obj *eq_obj)
1674 {
1675         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1676         if (!eqe->evt)
1677                 return false;
1678         else
1679                 return true;
1680 }
1681
1682 static irqreturn_t be_intx(int irq, void *dev)
1683 {
1684         struct be_adapter *adapter = dev;
1685         struct be_rx_obj *rxo;
1686         int isr, i, tx = 0 , rx = 0;
1687
1688         if (lancer_chip(adapter)) {
1689                 if (event_peek(&adapter->tx_eq))
1690                         tx = event_handle(adapter, &adapter->tx_eq);
1691                 for_all_rx_queues(adapter, rxo, i) {
1692                         if (event_peek(&rxo->rx_eq))
1693                                 rx |= event_handle(adapter, &rxo->rx_eq);
1694                 }
1695
1696                 if (!(tx || rx))
1697                         return IRQ_NONE;
1698
1699         } else {
1700                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1701                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1702                 if (!isr)
1703                         return IRQ_NONE;
1704
1705                 if ((1 << adapter->tx_eq.eq_idx & isr))
1706                         event_handle(adapter, &adapter->tx_eq);
1707
1708                 for_all_rx_queues(adapter, rxo, i) {
1709                         if ((1 << rxo->rx_eq.eq_idx & isr))
1710                                 event_handle(adapter, &rxo->rx_eq);
1711                 }
1712         }
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 static irqreturn_t be_msix_rx(int irq, void *dev)
1718 {
1719         struct be_rx_obj *rxo = dev;
1720         struct be_adapter *adapter = rxo->adapter;
1721
1722         event_handle(adapter, &rxo->rx_eq);
1723
1724         return IRQ_HANDLED;
1725 }
1726
1727 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1728 {
1729         struct be_adapter *adapter = dev;
1730
1731         event_handle(adapter, &adapter->tx_eq);
1732
1733         return IRQ_HANDLED;
1734 }
1735
1736 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1737 {
1738         return (rxcp->tcpf && !rxcp->err) ? true : false;
1739 }
1740
1741 static int be_poll_rx(struct napi_struct *napi, int budget)
1742 {
1743         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1744         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1745         struct be_adapter *adapter = rxo->adapter;
1746         struct be_queue_info *rx_cq = &rxo->cq;
1747         struct be_rx_compl_info *rxcp;
1748         u32 work_done;
1749
1750         rxo->stats.rx_polls++;
1751         for (work_done = 0; work_done < budget; work_done++) {
1752                 rxcp = be_rx_compl_get(rxo);
1753                 if (!rxcp)
1754                         break;
1755
1756                 /* Ignore flush completions */
1757                 if (rxcp->num_rcvd) {
1758                         if (do_gro(rxcp))
1759                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1760                         else
1761                                 be_rx_compl_process(adapter, rxo, rxcp);
1762                 }
1763                 be_rx_stats_update(rxo, rxcp);
1764         }
1765
1766         /* Refill the queue */
1767         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1768                 be_post_rx_frags(rxo, GFP_ATOMIC);
1769
1770         /* All consumed */
1771         if (work_done < budget) {
1772                 napi_complete(napi);
1773                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1774         } else {
1775                 /* More to be consumed; continue with interrupts disabled */
1776                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1777         }
1778         return work_done;
1779 }
1780
1781 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1782  * For TX/MCC we don't honour budget; consume everything
1783  */
1784 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1785 {
1786         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1787         struct be_adapter *adapter =
1788                 container_of(tx_eq, struct be_adapter, tx_eq);
1789         struct be_queue_info *txq = &adapter->tx_obj.q;
1790         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1791         struct be_eth_tx_compl *txcp;
1792         int tx_compl = 0, mcc_compl, status = 0;
1793         u16 end_idx;
1794
1795         while ((txcp = be_tx_compl_get(tx_cq))) {
1796                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1797                                 wrb_index, txcp);
1798                 be_tx_compl_process(adapter, end_idx);
1799                 tx_compl++;
1800         }
1801
1802         mcc_compl = be_process_mcc(adapter, &status);
1803
1804         napi_complete(napi);
1805
1806         if (mcc_compl) {
1807                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1808                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1809         }
1810
1811         if (tx_compl) {
1812                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1813
1814                 /* As Tx wrbs have been freed up, wake up netdev queue if
1815                  * it was stopped due to lack of tx wrbs.
1816                  */
1817                 if (netif_queue_stopped(adapter->netdev) &&
1818                         atomic_read(&txq->used) < txq->len / 2) {
1819                         netif_wake_queue(adapter->netdev);
1820                 }
1821
1822                 tx_stats(adapter)->be_tx_events++;
1823                 tx_stats(adapter)->be_tx_compl += tx_compl;
1824         }
1825
1826         return 1;
1827 }
1828
1829 void be_detect_dump_ue(struct be_adapter *adapter)
1830 {
1831         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1832         u32 i;
1833
1834         pci_read_config_dword(adapter->pdev,
1835                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1836         pci_read_config_dword(adapter->pdev,
1837                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1838         pci_read_config_dword(adapter->pdev,
1839                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1840         pci_read_config_dword(adapter->pdev,
1841                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1842
1843         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1844         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1845
1846         if (ue_status_lo || ue_status_hi) {
1847                 adapter->ue_detected = true;
1848                 adapter->eeh_err = true;
1849                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1850         }
1851
1852         if (ue_status_lo) {
1853                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1854                         if (ue_status_lo & 1)
1855                                 dev_err(&adapter->pdev->dev,
1856                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1857                 }
1858         }
1859         if (ue_status_hi) {
1860                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1861                         if (ue_status_hi & 1)
1862                                 dev_err(&adapter->pdev->dev,
1863                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1864                 }
1865         }
1866
1867 }
1868
1869 static void be_worker(struct work_struct *work)
1870 {
1871         struct be_adapter *adapter =
1872                 container_of(work, struct be_adapter, work.work);
1873         struct be_rx_obj *rxo;
1874         int i;
1875
1876         if (!adapter->ue_detected && !lancer_chip(adapter))
1877                 be_detect_dump_ue(adapter);
1878
1879         /* when interrupts are not yet enabled, just reap any pending
1880         * mcc completions */
1881         if (!netif_running(adapter->netdev)) {
1882                 int mcc_compl, status = 0;
1883
1884                 mcc_compl = be_process_mcc(adapter, &status);
1885
1886                 if (mcc_compl) {
1887                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1888                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1889                 }
1890
1891                 goto reschedule;
1892         }
1893
1894         if (!adapter->stats_cmd_sent)
1895                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1896
1897         be_tx_rate_update(adapter);
1898
1899         for_all_rx_queues(adapter, rxo, i) {
1900                 be_rx_rate_update(rxo);
1901                 be_rx_eqd_update(adapter, rxo);
1902
1903                 if (rxo->rx_post_starved) {
1904                         rxo->rx_post_starved = false;
1905                         be_post_rx_frags(rxo, GFP_KERNEL);
1906                 }
1907         }
1908
1909 reschedule:
1910         adapter->work_counter++;
1911         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1912 }
1913
1914 static void be_msix_disable(struct be_adapter *adapter)
1915 {
1916         if (msix_enabled(adapter)) {
1917                 pci_disable_msix(adapter->pdev);
1918                 adapter->num_msix_vec = 0;
1919         }
1920 }
1921
1922 static void be_msix_enable(struct be_adapter *adapter)
1923 {
1924 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1925         int i, status, num_vec;
1926
1927         num_vec = be_num_rxqs_want(adapter) + 1;
1928
1929         for (i = 0; i < num_vec; i++)
1930                 adapter->msix_entries[i].entry = i;
1931
1932         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1933         if (status == 0) {
1934                 goto done;
1935         } else if (status >= BE_MIN_MSIX_VECTORS) {
1936                 num_vec = status;
1937                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1938                                 num_vec) == 0)
1939                         goto done;
1940         }
1941         return;
1942 done:
1943         adapter->num_msix_vec = num_vec;
1944         return;
1945 }
1946
1947 static void be_sriov_enable(struct be_adapter *adapter)
1948 {
1949         be_check_sriov_fn_type(adapter);
1950 #ifdef CONFIG_PCI_IOV
1951         if (be_physfn(adapter) && num_vfs) {
1952                 int status, pos;
1953                 u16 nvfs;
1954
1955                 pos = pci_find_ext_capability(adapter->pdev,
1956                                                 PCI_EXT_CAP_ID_SRIOV);
1957                 pci_read_config_word(adapter->pdev,
1958                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1959
1960                 if (num_vfs > nvfs) {
1961                         dev_info(&adapter->pdev->dev,
1962                                         "Device supports %d VFs and not %d\n",
1963                                         nvfs, num_vfs);
1964                         num_vfs = nvfs;
1965                 }
1966
1967                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1968                 adapter->sriov_enabled = status ? false : true;
1969         }
1970 #endif
1971 }
1972
1973 static void be_sriov_disable(struct be_adapter *adapter)
1974 {
1975 #ifdef CONFIG_PCI_IOV
1976         if (adapter->sriov_enabled) {
1977                 pci_disable_sriov(adapter->pdev);
1978                 adapter->sriov_enabled = false;
1979         }
1980 #endif
1981 }
1982
1983 static inline int be_msix_vec_get(struct be_adapter *adapter,
1984                                         struct be_eq_obj *eq_obj)
1985 {
1986         return adapter->msix_entries[eq_obj->eq_idx].vector;
1987 }
1988
1989 static int be_request_irq(struct be_adapter *adapter,
1990                 struct be_eq_obj *eq_obj,
1991                 void *handler, char *desc, void *context)
1992 {
1993         struct net_device *netdev = adapter->netdev;
1994         int vec;
1995
1996         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1997         vec = be_msix_vec_get(adapter, eq_obj);
1998         return request_irq(vec, handler, 0, eq_obj->desc, context);
1999 }
2000
2001 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2002                         void *context)
2003 {
2004         int vec = be_msix_vec_get(adapter, eq_obj);
2005         free_irq(vec, context);
2006 }
2007
2008 static int be_msix_register(struct be_adapter *adapter)
2009 {
2010         struct be_rx_obj *rxo;
2011         int status, i;
2012         char qname[10];
2013
2014         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2015                                 adapter);
2016         if (status)
2017                 goto err;
2018
2019         for_all_rx_queues(adapter, rxo, i) {
2020                 sprintf(qname, "rxq%d", i);
2021                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2022                                 qname, rxo);
2023                 if (status)
2024                         goto err_msix;
2025         }
2026
2027         return 0;
2028
2029 err_msix:
2030         be_free_irq(adapter, &adapter->tx_eq, adapter);
2031
2032         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2033                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2034
2035 err:
2036         dev_warn(&adapter->pdev->dev,
2037                 "MSIX Request IRQ failed - err %d\n", status);
2038         be_msix_disable(adapter);
2039         return status;
2040 }
2041
2042 static int be_irq_register(struct be_adapter *adapter)
2043 {
2044         struct net_device *netdev = adapter->netdev;
2045         int status;
2046
2047         if (msix_enabled(adapter)) {
2048                 status = be_msix_register(adapter);
2049                 if (status == 0)
2050                         goto done;
2051                 /* INTx is not supported for VF */
2052                 if (!be_physfn(adapter))
2053                         return status;
2054         }
2055
2056         /* INTx */
2057         netdev->irq = adapter->pdev->irq;
2058         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2059                         adapter);
2060         if (status) {
2061                 dev_err(&adapter->pdev->dev,
2062                         "INTx request IRQ failed - err %d\n", status);
2063                 return status;
2064         }
2065 done:
2066         adapter->isr_registered = true;
2067         return 0;
2068 }
2069
2070 static void be_irq_unregister(struct be_adapter *adapter)
2071 {
2072         struct net_device *netdev = adapter->netdev;
2073         struct be_rx_obj *rxo;
2074         int i;
2075
2076         if (!adapter->isr_registered)
2077                 return;
2078
2079         /* INTx */
2080         if (!msix_enabled(adapter)) {
2081                 free_irq(netdev->irq, adapter);
2082                 goto done;
2083         }
2084
2085         /* MSIx */
2086         be_free_irq(adapter, &adapter->tx_eq, adapter);
2087
2088         for_all_rx_queues(adapter, rxo, i)
2089                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2090
2091 done:
2092         adapter->isr_registered = false;
2093 }
2094
2095 static int be_close(struct net_device *netdev)
2096 {
2097         struct be_adapter *adapter = netdev_priv(netdev);
2098         struct be_rx_obj *rxo;
2099         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2100         int vec, i;
2101
2102         be_async_mcc_disable(adapter);
2103
2104         netif_carrier_off(netdev);
2105         adapter->link_up = false;
2106
2107         if (!lancer_chip(adapter))
2108                 be_intr_set(adapter, false);
2109
2110         for_all_rx_queues(adapter, rxo, i)
2111                 napi_disable(&rxo->rx_eq.napi);
2112
2113         napi_disable(&tx_eq->napi);
2114
2115         if (lancer_chip(adapter)) {
2116                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2117                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2118                 for_all_rx_queues(adapter, rxo, i)
2119                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2120         }
2121
2122         if (msix_enabled(adapter)) {
2123                 vec = be_msix_vec_get(adapter, tx_eq);
2124                 synchronize_irq(vec);
2125
2126                 for_all_rx_queues(adapter, rxo, i) {
2127                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2128                         synchronize_irq(vec);
2129                 }
2130         } else {
2131                 synchronize_irq(netdev->irq);
2132         }
2133         be_irq_unregister(adapter);
2134
2135         /* Wait for all pending tx completions to arrive so that
2136          * all tx skbs are freed.
2137          */
2138         be_tx_compl_clean(adapter);
2139
2140         return 0;
2141 }
2142
2143 static int be_open(struct net_device *netdev)
2144 {
2145         struct be_adapter *adapter = netdev_priv(netdev);
2146         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2147         struct be_rx_obj *rxo;
2148         bool link_up;
2149         int status, i;
2150         u8 mac_speed;
2151         u16 link_speed;
2152
2153         for_all_rx_queues(adapter, rxo, i) {
2154                 be_post_rx_frags(rxo, GFP_KERNEL);
2155                 napi_enable(&rxo->rx_eq.napi);
2156         }
2157         napi_enable(&tx_eq->napi);
2158
2159         be_irq_register(adapter);
2160
2161         if (!lancer_chip(adapter))
2162                 be_intr_set(adapter, true);
2163
2164         /* The evt queues are created in unarmed state; arm them */
2165         for_all_rx_queues(adapter, rxo, i) {
2166                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2167                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2168         }
2169         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2170
2171         /* Now that interrupts are on we can process async mcc */
2172         be_async_mcc_enable(adapter);
2173
2174         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2175                         &link_speed, 0);
2176         if (status)
2177                 goto err;
2178         be_link_status_update(adapter, link_up);
2179
2180         if (be_physfn(adapter)) {
2181                 status = be_vid_config(adapter, false, 0);
2182                 if (status)
2183                         goto err;
2184
2185                 status = be_cmd_set_flow_control(adapter,
2186                                 adapter->tx_fc, adapter->rx_fc);
2187                 if (status)
2188                         goto err;
2189         }
2190
2191         return 0;
2192 err:
2193         be_close(adapter->netdev);
2194         return -EIO;
2195 }
2196
2197 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2198 {
2199         struct be_dma_mem cmd;
2200         int status = 0;
2201         u8 mac[ETH_ALEN];
2202
2203         memset(mac, 0, ETH_ALEN);
2204
2205         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2206         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2207                                     GFP_KERNEL);
2208         if (cmd.va == NULL)
2209                 return -1;
2210         memset(cmd.va, 0, cmd.size);
2211
2212         if (enable) {
2213                 status = pci_write_config_dword(adapter->pdev,
2214                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2215                 if (status) {
2216                         dev_err(&adapter->pdev->dev,
2217                                 "Could not enable Wake-on-lan\n");
2218                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2219                                           cmd.dma);
2220                         return status;
2221                 }
2222                 status = be_cmd_enable_magic_wol(adapter,
2223                                 adapter->netdev->dev_addr, &cmd);
2224                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2225                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2226         } else {
2227                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2228                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2229                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2230         }
2231
2232         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2233         return status;
2234 }
2235
2236 /*
2237  * Generate a seed MAC address from the PF MAC Address using jhash.
2238  * MAC Address for VFs are assigned incrementally starting from the seed.
2239  * These addresses are programmed in the ASIC by the PF and the VF driver
2240  * queries for the MAC address during its probe.
2241  */
2242 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2243 {
2244         u32 vf = 0;
2245         int status = 0;
2246         u8 mac[ETH_ALEN];
2247
2248         be_vf_eth_addr_generate(adapter, mac);
2249
2250         for (vf = 0; vf < num_vfs; vf++) {
2251                 status = be_cmd_pmac_add(adapter, mac,
2252                                         adapter->vf_cfg[vf].vf_if_handle,
2253                                         &adapter->vf_cfg[vf].vf_pmac_id,
2254                                         vf + 1);
2255                 if (status)
2256                         dev_err(&adapter->pdev->dev,
2257                                 "Mac address add failed for VF %d\n", vf);
2258                 else
2259                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2260
2261                 mac[5] += 1;
2262         }
2263         return status;
2264 }
2265
2266 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2267 {
2268         u32 vf;
2269
2270         for (vf = 0; vf < num_vfs; vf++) {
2271                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2272                         be_cmd_pmac_del(adapter,
2273                                         adapter->vf_cfg[vf].vf_if_handle,
2274                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2275         }
2276 }
2277
2278 static int be_setup(struct be_adapter *adapter)
2279 {
2280         struct net_device *netdev = adapter->netdev;
2281         u32 cap_flags, en_flags, vf = 0;
2282         int status;
2283         u8 mac[ETH_ALEN];
2284
2285         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2286                                 BE_IF_FLAGS_BROADCAST |
2287                                 BE_IF_FLAGS_MULTICAST;
2288
2289         if (be_physfn(adapter)) {
2290                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2291                                 BE_IF_FLAGS_PROMISCUOUS |
2292                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2293                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2294
2295                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2296                         cap_flags |= BE_IF_FLAGS_RSS;
2297                         en_flags |= BE_IF_FLAGS_RSS;
2298                 }
2299         }
2300
2301         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2302                         netdev->dev_addr, false/* pmac_invalid */,
2303                         &adapter->if_handle, &adapter->pmac_id, 0);
2304         if (status != 0)
2305                 goto do_none;
2306
2307         if (be_physfn(adapter)) {
2308                 if (adapter->sriov_enabled) {
2309                         while (vf < num_vfs) {
2310                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2311                                                         BE_IF_FLAGS_BROADCAST;
2312                                 status = be_cmd_if_create(adapter, cap_flags,
2313                                         en_flags, mac, true,
2314                                         &adapter->vf_cfg[vf].vf_if_handle,
2315                                         NULL, vf+1);
2316                                 if (status) {
2317                                         dev_err(&adapter->pdev->dev,
2318                                         "Interface Create failed for VF %d\n",
2319                                         vf);
2320                                         goto if_destroy;
2321                                 }
2322                                 adapter->vf_cfg[vf].vf_pmac_id =
2323                                                         BE_INVALID_PMAC_ID;
2324                                 vf++;
2325                         }
2326                 }
2327         } else {
2328                 status = be_cmd_mac_addr_query(adapter, mac,
2329                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2330                 if (!status) {
2331                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2332                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2333                 }
2334         }
2335
2336         status = be_tx_queues_create(adapter);
2337         if (status != 0)
2338                 goto if_destroy;
2339
2340         status = be_rx_queues_create(adapter);
2341         if (status != 0)
2342                 goto tx_qs_destroy;
2343
2344         status = be_mcc_queues_create(adapter);
2345         if (status != 0)
2346                 goto rx_qs_destroy;
2347
2348         adapter->link_speed = -1;
2349
2350         return 0;
2351
2352 rx_qs_destroy:
2353         be_rx_queues_destroy(adapter);
2354 tx_qs_destroy:
2355         be_tx_queues_destroy(adapter);
2356 if_destroy:
2357         if (be_physfn(adapter) && adapter->sriov_enabled)
2358                 for (vf = 0; vf < num_vfs; vf++)
2359                         if (adapter->vf_cfg[vf].vf_if_handle)
2360                                 be_cmd_if_destroy(adapter,
2361                                         adapter->vf_cfg[vf].vf_if_handle,
2362                                         vf + 1);
2363         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2364 do_none:
2365         return status;
2366 }
2367
2368 static int be_clear(struct be_adapter *adapter)
2369 {
2370         int vf;
2371
2372         if (be_physfn(adapter) && adapter->sriov_enabled)
2373                 be_vf_eth_addr_rem(adapter);
2374
2375         be_mcc_queues_destroy(adapter);
2376         be_rx_queues_destroy(adapter);
2377         be_tx_queues_destroy(adapter);
2378         adapter->eq_next_idx = 0;
2379
2380         if (be_physfn(adapter) && adapter->sriov_enabled)
2381                 for (vf = 0; vf < num_vfs; vf++)
2382                         if (adapter->vf_cfg[vf].vf_if_handle)
2383                                 be_cmd_if_destroy(adapter,
2384                                         adapter->vf_cfg[vf].vf_if_handle,
2385                                         vf + 1);
2386
2387         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2388
2389         /* tell fw we're done with firing cmds */
2390         be_cmd_fw_clean(adapter);
2391         return 0;
2392 }
2393
2394
2395 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2396 static bool be_flash_redboot(struct be_adapter *adapter,
2397                         const u8 *p, u32 img_start, int image_size,
2398                         int hdr_size)
2399 {
2400         u32 crc_offset;
2401         u8 flashed_crc[4];
2402         int status;
2403
2404         crc_offset = hdr_size + img_start + image_size - 4;
2405
2406         p += crc_offset;
2407
2408         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2409                         (image_size - 4));
2410         if (status) {
2411                 dev_err(&adapter->pdev->dev,
2412                 "could not get crc from flash, not flashing redboot\n");
2413                 return false;
2414         }
2415
2416         /*update redboot only if crc does not match*/
2417         if (!memcmp(flashed_crc, p, 4))
2418                 return false;
2419         else
2420                 return true;
2421 }
2422
2423 static int be_flash_data(struct be_adapter *adapter,
2424                         const struct firmware *fw,
2425                         struct be_dma_mem *flash_cmd, int num_of_images)
2426
2427 {
2428         int status = 0, i, filehdr_size = 0;
2429         u32 total_bytes = 0, flash_op;
2430         int num_bytes;
2431         const u8 *p = fw->data;
2432         struct be_cmd_write_flashrom *req = flash_cmd->va;
2433         const struct flash_comp *pflashcomp;
2434         int num_comp;
2435
2436         static const struct flash_comp gen3_flash_types[9] = {
2437                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2438                         FLASH_IMAGE_MAX_SIZE_g3},
2439                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2440                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2441                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2442                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2443                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2444                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2445                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2446                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2447                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2448                         FLASH_IMAGE_MAX_SIZE_g3},
2449                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2450                         FLASH_IMAGE_MAX_SIZE_g3},
2451                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2452                         FLASH_IMAGE_MAX_SIZE_g3},
2453                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2454                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2455         };
2456         static const struct flash_comp gen2_flash_types[8] = {
2457                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2458                         FLASH_IMAGE_MAX_SIZE_g2},
2459                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2460                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2461                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2462                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2463                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2464                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2465                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2466                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2467                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2468                         FLASH_IMAGE_MAX_SIZE_g2},
2469                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2470                         FLASH_IMAGE_MAX_SIZE_g2},
2471                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2472                          FLASH_IMAGE_MAX_SIZE_g2}
2473         };
2474
2475         if (adapter->generation == BE_GEN3) {
2476                 pflashcomp = gen3_flash_types;
2477                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2478                 num_comp = ARRAY_SIZE(gen3_flash_types);
2479         } else {
2480                 pflashcomp = gen2_flash_types;
2481                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2482                 num_comp = ARRAY_SIZE(gen2_flash_types);
2483         }
2484         for (i = 0; i < num_comp; i++) {
2485                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2486                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2487                         continue;
2488                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2489                         (!be_flash_redboot(adapter, fw->data,
2490                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2491                         (num_of_images * sizeof(struct image_hdr)))))
2492                         continue;
2493                 p = fw->data;
2494                 p += filehdr_size + pflashcomp[i].offset
2495                         + (num_of_images * sizeof(struct image_hdr));
2496         if (p + pflashcomp[i].size > fw->data + fw->size)
2497                 return -1;
2498         total_bytes = pflashcomp[i].size;
2499                 while (total_bytes) {
2500                         if (total_bytes > 32*1024)
2501                                 num_bytes = 32*1024;
2502                         else
2503                                 num_bytes = total_bytes;
2504                         total_bytes -= num_bytes;
2505
2506                         if (!total_bytes)
2507                                 flash_op = FLASHROM_OPER_FLASH;
2508                         else
2509                                 flash_op = FLASHROM_OPER_SAVE;
2510                         memcpy(req->params.data_buf, p, num_bytes);
2511                         p += num_bytes;
2512                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2513                                 pflashcomp[i].optype, flash_op, num_bytes);
2514                         if (status) {
2515                                 dev_err(&adapter->pdev->dev,
2516                                         "cmd to write to flash rom failed.\n");
2517                                 return -1;
2518                         }
2519                         yield();
2520                 }
2521         }
2522         return 0;
2523 }
2524
2525 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2526 {
2527         if (fhdr == NULL)
2528                 return 0;
2529         if (fhdr->build[0] == '3')
2530                 return BE_GEN3;
2531         else if (fhdr->build[0] == '2')
2532                 return BE_GEN2;
2533         else
2534                 return 0;
2535 }
2536
2537 int be_load_fw(struct be_adapter *adapter, u8 *func)
2538 {
2539         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2540         const struct firmware *fw;
2541         struct flash_file_hdr_g2 *fhdr;
2542         struct flash_file_hdr_g3 *fhdr3;
2543         struct image_hdr *img_hdr_ptr = NULL;
2544         struct be_dma_mem flash_cmd;
2545         int status, i = 0, num_imgs = 0;
2546         const u8 *p;
2547
2548         if (!netif_running(adapter->netdev)) {
2549                 dev_err(&adapter->pdev->dev,
2550                         "Firmware load not allowed (interface is down)\n");
2551                 return -EPERM;
2552         }
2553
2554         strcpy(fw_file, func);
2555
2556         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2557         if (status)
2558                 goto fw_exit;
2559
2560         p = fw->data;
2561         fhdr = (struct flash_file_hdr_g2 *) p;
2562         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2563
2564         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2565         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2566                                           &flash_cmd.dma, GFP_KERNEL);
2567         if (!flash_cmd.va) {
2568                 status = -ENOMEM;
2569                 dev_err(&adapter->pdev->dev,
2570                         "Memory allocation failure while flashing\n");
2571                 goto fw_exit;
2572         }
2573
2574         if ((adapter->generation == BE_GEN3) &&
2575                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2576                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2577                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2578                 for (i = 0; i < num_imgs; i++) {
2579                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2580                                         (sizeof(struct flash_file_hdr_g3) +
2581                                          i * sizeof(struct image_hdr)));
2582                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2583                                 status = be_flash_data(adapter, fw, &flash_cmd,
2584                                                         num_imgs);
2585                 }
2586         } else if ((adapter->generation == BE_GEN2) &&
2587                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2588                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2589         } else {
2590                 dev_err(&adapter->pdev->dev,
2591                         "UFI and Interface are not compatible for flashing\n");
2592                 status = -1;
2593         }
2594
2595         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2596                           flash_cmd.dma);
2597         if (status) {
2598                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2599                 goto fw_exit;
2600         }
2601
2602         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2603
2604 fw_exit:
2605         release_firmware(fw);
2606         return status;
2607 }
2608
2609 static struct net_device_ops be_netdev_ops = {
2610         .ndo_open               = be_open,
2611         .ndo_stop               = be_close,
2612         .ndo_start_xmit         = be_xmit,
2613         .ndo_set_rx_mode        = be_set_multicast_list,
2614         .ndo_set_mac_address    = be_mac_addr_set,
2615         .ndo_change_mtu         = be_change_mtu,
2616         .ndo_validate_addr      = eth_validate_addr,
2617         .ndo_vlan_rx_register   = be_vlan_register,
2618         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2619         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2620         .ndo_set_vf_mac         = be_set_vf_mac,
2621         .ndo_set_vf_vlan        = be_set_vf_vlan,
2622         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2623         .ndo_get_vf_config      = be_get_vf_config
2624 };
2625
2626 static void be_netdev_init(struct net_device *netdev)
2627 {
2628         struct be_adapter *adapter = netdev_priv(netdev);
2629         struct be_rx_obj *rxo;
2630         int i;
2631
2632         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2633                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2634                 NETIF_F_HW_VLAN_TX;
2635         if (be_multi_rxq(adapter))
2636                 netdev->hw_features |= NETIF_F_RXHASH;
2637
2638         netdev->features |= netdev->hw_features |
2639                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2640
2641         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2642                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2643
2644         if (lancer_chip(adapter))
2645                 netdev->vlan_features |= NETIF_F_TSO6;
2646
2647         netdev->flags |= IFF_MULTICAST;
2648
2649         /* Default settings for Rx and Tx flow control */
2650         adapter->rx_fc = true;
2651         adapter->tx_fc = true;
2652
2653         netif_set_gso_max_size(netdev, 65535);
2654
2655         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2656
2657         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2658
2659         for_all_rx_queues(adapter, rxo, i)
2660                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2661                                 BE_NAPI_WEIGHT);
2662
2663         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2664                 BE_NAPI_WEIGHT);
2665 }
2666
2667 static void be_unmap_pci_bars(struct be_adapter *adapter)
2668 {
2669         if (adapter->csr)
2670                 iounmap(adapter->csr);
2671         if (adapter->db)
2672                 iounmap(adapter->db);
2673         if (adapter->pcicfg && be_physfn(adapter))
2674                 iounmap(adapter->pcicfg);
2675 }
2676
2677 static int be_map_pci_bars(struct be_adapter *adapter)
2678 {
2679         u8 __iomem *addr;
2680         int pcicfg_reg, db_reg;
2681
2682         if (lancer_chip(adapter)) {
2683                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2684                         pci_resource_len(adapter->pdev, 0));
2685                 if (addr == NULL)
2686                         return -ENOMEM;
2687                 adapter->db = addr;
2688                 return 0;
2689         }
2690
2691         if (be_physfn(adapter)) {
2692                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2693                                 pci_resource_len(adapter->pdev, 2));
2694                 if (addr == NULL)
2695                         return -ENOMEM;
2696                 adapter->csr = addr;
2697         }
2698
2699         if (adapter->generation == BE_GEN2) {
2700                 pcicfg_reg = 1;
2701                 db_reg = 4;
2702         } else {
2703                 pcicfg_reg = 0;
2704                 if (be_physfn(adapter))
2705                         db_reg = 4;
2706                 else
2707                         db_reg = 0;
2708         }
2709         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2710                                 pci_resource_len(adapter->pdev, db_reg));
2711         if (addr == NULL)
2712                 goto pci_map_err;
2713         adapter->db = addr;
2714
2715         if (be_physfn(adapter)) {
2716                 addr = ioremap_nocache(
2717                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2718                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2719                 if (addr == NULL)
2720                         goto pci_map_err;
2721                 adapter->pcicfg = addr;
2722         } else
2723                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2724
2725         return 0;
2726 pci_map_err:
2727         be_unmap_pci_bars(adapter);
2728         return -ENOMEM;
2729 }
2730
2731
2732 static void be_ctrl_cleanup(struct be_adapter *adapter)
2733 {
2734         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2735
2736         be_unmap_pci_bars(adapter);
2737
2738         if (mem->va)
2739                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2740                                   mem->dma);
2741
2742         mem = &adapter->mc_cmd_mem;
2743         if (mem->va)
2744                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2745                                   mem->dma);
2746 }
2747
2748 static int be_ctrl_init(struct be_adapter *adapter)
2749 {
2750         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2751         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2752         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2753         int status;
2754
2755         status = be_map_pci_bars(adapter);
2756         if (status)
2757                 goto done;
2758
2759         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2760         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2761                                                 mbox_mem_alloc->size,
2762                                                 &mbox_mem_alloc->dma,
2763                                                 GFP_KERNEL);
2764         if (!mbox_mem_alloc->va) {
2765                 status = -ENOMEM;
2766                 goto unmap_pci_bars;
2767         }
2768
2769         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2770         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2771         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2772         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2773
2774         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2775         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2776                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2777                                             GFP_KERNEL);
2778         if (mc_cmd_mem->va == NULL) {
2779                 status = -ENOMEM;
2780                 goto free_mbox;
2781         }
2782         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2783
2784         mutex_init(&adapter->mbox_lock);
2785         spin_lock_init(&adapter->mcc_lock);
2786         spin_lock_init(&adapter->mcc_cq_lock);
2787
2788         init_completion(&adapter->flash_compl);
2789         pci_save_state(adapter->pdev);
2790         return 0;
2791
2792 free_mbox:
2793         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2794                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2795
2796 unmap_pci_bars:
2797         be_unmap_pci_bars(adapter);
2798
2799 done:
2800         return status;
2801 }
2802
2803 static void be_stats_cleanup(struct be_adapter *adapter)
2804 {
2805         struct be_dma_mem *cmd = &adapter->stats_cmd;
2806
2807         if (cmd->va)
2808                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2809                                   cmd->va, cmd->dma);
2810 }
2811
2812 static int be_stats_init(struct be_adapter *adapter)
2813 {
2814         struct be_dma_mem *cmd = &adapter->stats_cmd;
2815
2816         cmd->size = sizeof(struct be_cmd_req_get_stats);
2817         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2818                                      GFP_KERNEL);
2819         if (cmd->va == NULL)
2820                 return -1;
2821         memset(cmd->va, 0, cmd->size);
2822         return 0;
2823 }
2824
2825 static void __devexit be_remove(struct pci_dev *pdev)
2826 {
2827         struct be_adapter *adapter = pci_get_drvdata(pdev);
2828
2829         if (!adapter)
2830                 return;
2831
2832         cancel_delayed_work_sync(&adapter->work);
2833
2834         unregister_netdev(adapter->netdev);
2835
2836         be_clear(adapter);
2837
2838         be_stats_cleanup(adapter);
2839
2840         be_ctrl_cleanup(adapter);
2841
2842         kfree(adapter->vf_cfg);
2843         be_sriov_disable(adapter);
2844
2845         be_msix_disable(adapter);
2846
2847         pci_set_drvdata(pdev, NULL);
2848         pci_release_regions(pdev);
2849         pci_disable_device(pdev);
2850
2851         free_netdev(adapter->netdev);
2852 }
2853
2854 static int be_get_config(struct be_adapter *adapter)
2855 {
2856         int status;
2857         u8 mac[ETH_ALEN];
2858
2859         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2860         if (status)
2861                 return status;
2862
2863         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2864                         &adapter->function_mode, &adapter->function_caps);
2865         if (status)
2866                 return status;
2867
2868         memset(mac, 0, ETH_ALEN);
2869
2870         if (be_physfn(adapter)) {
2871                 status = be_cmd_mac_addr_query(adapter, mac,
2872                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2873
2874                 if (status)
2875                         return status;
2876
2877                 if (!is_valid_ether_addr(mac))
2878                         return -EADDRNOTAVAIL;
2879
2880                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2881                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2882         }
2883
2884         if (adapter->function_mode & 0x400)
2885                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2886         else
2887                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2888
2889         status = be_cmd_get_cntl_attributes(adapter);
2890         if (status)
2891                 return status;
2892
2893         be_cmd_check_native_mode(adapter);
2894         return 0;
2895 }
2896
2897 static int be_dev_family_check(struct be_adapter *adapter)
2898 {
2899         struct pci_dev *pdev = adapter->pdev;
2900         u32 sli_intf = 0, if_type;
2901
2902         switch (pdev->device) {
2903         case BE_DEVICE_ID1:
2904         case OC_DEVICE_ID1:
2905                 adapter->generation = BE_GEN2;
2906                 break;
2907         case BE_DEVICE_ID2:
2908         case OC_DEVICE_ID2:
2909                 adapter->generation = BE_GEN3;
2910                 break;
2911         case OC_DEVICE_ID3:
2912                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2913                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2914                                                 SLI_INTF_IF_TYPE_SHIFT;
2915
2916                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2917                         if_type != 0x02) {
2918                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2919                         return -EINVAL;
2920                 }
2921                 if (num_vfs > 0) {
2922                         dev_err(&pdev->dev, "VFs not supported\n");
2923                         return -EINVAL;
2924                 }
2925                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2926                                          SLI_INTF_FAMILY_SHIFT);
2927                 adapter->generation = BE_GEN3;
2928                 break;
2929         default:
2930                 adapter->generation = 0;
2931         }
2932         return 0;
2933 }
2934
2935 static int lancer_wait_ready(struct be_adapter *adapter)
2936 {
2937 #define SLIPORT_READY_TIMEOUT 500
2938         u32 sliport_status;
2939         int status = 0, i;
2940
2941         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2942                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2943                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2944                         break;
2945
2946                 msleep(20);
2947         }
2948
2949         if (i == SLIPORT_READY_TIMEOUT)
2950                 status = -1;
2951
2952         return status;
2953 }
2954
2955 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2956 {
2957         int status;
2958         u32 sliport_status, err, reset_needed;
2959         status = lancer_wait_ready(adapter);
2960         if (!status) {
2961                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2962                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2963                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2964                 if (err && reset_needed) {
2965                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2966                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2967
2968                         /* check adapter has corrected the error */
2969                         status = lancer_wait_ready(adapter);
2970                         sliport_status = ioread32(adapter->db +
2971                                                         SLIPORT_STATUS_OFFSET);
2972                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2973                                                 SLIPORT_STATUS_RN_MASK);
2974                         if (status || sliport_status)
2975                                 status = -1;
2976                 } else if (err || reset_needed) {
2977                         status = -1;
2978                 }
2979         }
2980         return status;
2981 }
2982
2983 static int __devinit be_probe(struct pci_dev *pdev,
2984                         const struct pci_device_id *pdev_id)
2985 {
2986         int status = 0;
2987         struct be_adapter *adapter;
2988         struct net_device *netdev;
2989
2990         status = pci_enable_device(pdev);
2991         if (status)
2992                 goto do_none;
2993
2994         status = pci_request_regions(pdev, DRV_NAME);
2995         if (status)
2996                 goto disable_dev;
2997         pci_set_master(pdev);
2998
2999         netdev = alloc_etherdev(sizeof(struct be_adapter));
3000         if (netdev == NULL) {
3001                 status = -ENOMEM;
3002                 goto rel_reg;
3003         }
3004         adapter = netdev_priv(netdev);
3005         adapter->pdev = pdev;
3006         pci_set_drvdata(pdev, adapter);
3007
3008         status = be_dev_family_check(adapter);
3009         if (status)
3010                 goto free_netdev;
3011
3012         adapter->netdev = netdev;
3013         SET_NETDEV_DEV(netdev, &pdev->dev);
3014
3015         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3016         if (!status) {
3017                 netdev->features |= NETIF_F_HIGHDMA;
3018         } else {
3019                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3020                 if (status) {
3021                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3022                         goto free_netdev;
3023                 }
3024         }
3025
3026         be_sriov_enable(adapter);
3027         if (adapter->sriov_enabled) {
3028                 adapter->vf_cfg = kcalloc(num_vfs,
3029                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3030
3031                 if (!adapter->vf_cfg)
3032                         goto free_netdev;
3033         }
3034
3035         status = be_ctrl_init(adapter);
3036         if (status)
3037                 goto free_vf_cfg;
3038
3039         if (lancer_chip(adapter)) {
3040                 status = lancer_test_and_set_rdy_state(adapter);
3041                 if (status) {
3042                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3043                         goto ctrl_clean;
3044                 }
3045         }
3046
3047         /* sync up with fw's ready state */
3048         if (be_physfn(adapter)) {
3049                 status = be_cmd_POST(adapter);
3050                 if (status)
3051                         goto ctrl_clean;
3052         }
3053
3054         /* tell fw we're ready to fire cmds */
3055         status = be_cmd_fw_init(adapter);
3056         if (status)
3057                 goto ctrl_clean;
3058
3059         status = be_cmd_reset_function(adapter);
3060         if (status)
3061                 goto ctrl_clean;
3062
3063         status = be_stats_init(adapter);
3064         if (status)
3065                 goto ctrl_clean;
3066
3067         status = be_get_config(adapter);
3068         if (status)
3069                 goto stats_clean;
3070
3071         be_msix_enable(adapter);
3072
3073         INIT_DELAYED_WORK(&adapter->work, be_worker);
3074
3075         status = be_setup(adapter);
3076         if (status)
3077                 goto msix_disable;
3078
3079         be_netdev_init(netdev);
3080         status = register_netdev(netdev);
3081         if (status != 0)
3082                 goto unsetup;
3083         netif_carrier_off(netdev);
3084
3085         if (be_physfn(adapter) && adapter->sriov_enabled) {
3086                 u8 mac_speed;
3087                 bool link_up;
3088                 u16 vf, lnk_speed;
3089
3090                 status = be_vf_eth_addr_config(adapter);
3091                 if (status)
3092                         goto unreg_netdev;
3093
3094                 for (vf = 0; vf < num_vfs; vf++) {
3095                         status = be_cmd_link_status_query(adapter, &link_up,
3096                                         &mac_speed, &lnk_speed, vf + 1);
3097                         if (!status)
3098                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3099                         else
3100                                 goto unreg_netdev;
3101                 }
3102         }
3103
3104         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3105         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3106         return 0;
3107
3108 unreg_netdev:
3109         unregister_netdev(netdev);
3110 unsetup:
3111         be_clear(adapter);
3112 msix_disable:
3113         be_msix_disable(adapter);
3114 stats_clean:
3115         be_stats_cleanup(adapter);
3116 ctrl_clean:
3117         be_ctrl_cleanup(adapter);
3118 free_vf_cfg:
3119         kfree(adapter->vf_cfg);
3120 free_netdev:
3121         be_sriov_disable(adapter);
3122         free_netdev(netdev);
3123         pci_set_drvdata(pdev, NULL);
3124 rel_reg:
3125         pci_release_regions(pdev);
3126 disable_dev:
3127         pci_disable_device(pdev);
3128 do_none:
3129         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3130         return status;
3131 }
3132
3133 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3134 {
3135         struct be_adapter *adapter = pci_get_drvdata(pdev);
3136         struct net_device *netdev =  adapter->netdev;
3137
3138         cancel_delayed_work_sync(&adapter->work);
3139         if (adapter->wol)
3140                 be_setup_wol(adapter, true);
3141
3142         netif_device_detach(netdev);
3143         if (netif_running(netdev)) {
3144                 rtnl_lock();
3145                 be_close(netdev);
3146                 rtnl_unlock();
3147         }
3148         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3149         be_clear(adapter);
3150
3151         be_msix_disable(adapter);
3152         pci_save_state(pdev);
3153         pci_disable_device(pdev);
3154         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3155         return 0;
3156 }
3157
3158 static int be_resume(struct pci_dev *pdev)
3159 {
3160         int status = 0;
3161         struct be_adapter *adapter = pci_get_drvdata(pdev);
3162         struct net_device *netdev =  adapter->netdev;
3163
3164         netif_device_detach(netdev);
3165
3166         status = pci_enable_device(pdev);
3167         if (status)
3168                 return status;
3169
3170         pci_set_power_state(pdev, 0);
3171         pci_restore_state(pdev);
3172
3173         be_msix_enable(adapter);
3174         /* tell fw we're ready to fire cmds */
3175         status = be_cmd_fw_init(adapter);
3176         if (status)
3177                 return status;
3178
3179         be_setup(adapter);
3180         if (netif_running(netdev)) {
3181                 rtnl_lock();
3182                 be_open(netdev);
3183                 rtnl_unlock();
3184         }
3185         netif_device_attach(netdev);
3186
3187         if (adapter->wol)
3188                 be_setup_wol(adapter, false);
3189
3190         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3191         return 0;
3192 }
3193
3194 /*
3195  * An FLR will stop BE from DMAing any data.
3196  */
3197 static void be_shutdown(struct pci_dev *pdev)
3198 {
3199         struct be_adapter *adapter = pci_get_drvdata(pdev);
3200
3201         if (!adapter)
3202                 return;
3203
3204         cancel_delayed_work_sync(&adapter->work);
3205
3206         netif_device_detach(adapter->netdev);
3207
3208         if (adapter->wol)
3209                 be_setup_wol(adapter, true);
3210
3211         be_cmd_reset_function(adapter);
3212
3213         pci_disable_device(pdev);
3214 }
3215
3216 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3217                                 pci_channel_state_t state)
3218 {
3219         struct be_adapter *adapter = pci_get_drvdata(pdev);
3220         struct net_device *netdev =  adapter->netdev;
3221
3222         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3223
3224         adapter->eeh_err = true;
3225
3226         netif_device_detach(netdev);
3227
3228         if (netif_running(netdev)) {
3229                 rtnl_lock();
3230                 be_close(netdev);
3231                 rtnl_unlock();
3232         }
3233         be_clear(adapter);
3234
3235         if (state == pci_channel_io_perm_failure)
3236                 return PCI_ERS_RESULT_DISCONNECT;
3237
3238         pci_disable_device(pdev);
3239
3240         return PCI_ERS_RESULT_NEED_RESET;
3241 }
3242
3243 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3244 {
3245         struct be_adapter *adapter = pci_get_drvdata(pdev);
3246         int status;
3247
3248         dev_info(&adapter->pdev->dev, "EEH reset\n");
3249         adapter->eeh_err = false;
3250
3251         status = pci_enable_device(pdev);
3252         if (status)
3253                 return PCI_ERS_RESULT_DISCONNECT;
3254
3255         pci_set_master(pdev);
3256         pci_set_power_state(pdev, 0);
3257         pci_restore_state(pdev);
3258
3259         /* Check if card is ok and fw is ready */
3260         status = be_cmd_POST(adapter);
3261         if (status)
3262                 return PCI_ERS_RESULT_DISCONNECT;
3263
3264         return PCI_ERS_RESULT_RECOVERED;
3265 }
3266
3267 static void be_eeh_resume(struct pci_dev *pdev)
3268 {
3269         int status = 0;
3270         struct be_adapter *adapter = pci_get_drvdata(pdev);
3271         struct net_device *netdev =  adapter->netdev;
3272
3273         dev_info(&adapter->pdev->dev, "EEH resume\n");
3274
3275         pci_save_state(pdev);
3276
3277         /* tell fw we're ready to fire cmds */
3278         status = be_cmd_fw_init(adapter);
3279         if (status)
3280                 goto err;
3281
3282         status = be_setup(adapter);
3283         if (status)
3284                 goto err;
3285
3286         if (netif_running(netdev)) {
3287                 status = be_open(netdev);
3288                 if (status)
3289                         goto err;
3290         }
3291         netif_device_attach(netdev);
3292         return;
3293 err:
3294         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3295 }
3296
3297 static struct pci_error_handlers be_eeh_handlers = {
3298         .error_detected = be_eeh_err_detected,
3299         .slot_reset = be_eeh_reset,
3300         .resume = be_eeh_resume,
3301 };
3302
3303 static struct pci_driver be_driver = {
3304         .name = DRV_NAME,
3305         .id_table = be_dev_ids,
3306         .probe = be_probe,
3307         .remove = be_remove,
3308         .suspend = be_suspend,
3309         .resume = be_resume,
3310         .shutdown = be_shutdown,
3311         .err_handler = &be_eeh_handlers
3312 };
3313
3314 static int __init be_init_module(void)
3315 {
3316         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3317             rx_frag_size != 2048) {
3318                 printk(KERN_WARNING DRV_NAME
3319                         " : Module param rx_frag_size must be 2048/4096/8192."
3320                         " Using 2048\n");
3321                 rx_frag_size = 2048;
3322         }
3323
3324         return pci_register_driver(&be_driver);
3325 }
3326 module_init(be_init_module);
3327
3328 static void __exit be_exit_module(void)
3329 {
3330         pci_unregister_driver(&be_driver);
3331 }
3332 module_exit(be_exit_module);