Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static char *ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC"
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 static inline bool be_multi_rxq(struct be_adapter *adapter)
119 {
120         return (adapter->num_rx_qs > 1);
121 }
122
123 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
124 {
125         struct be_dma_mem *mem = &q->dma_mem;
126         if (mem->va)
127                 pci_free_consistent(adapter->pdev, mem->size,
128                         mem->va, mem->dma);
129 }
130
131 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132                 u16 len, u16 entry_size)
133 {
134         struct be_dma_mem *mem = &q->dma_mem;
135
136         memset(q, 0, sizeof(*q));
137         q->len = len;
138         q->entry_size = entry_size;
139         mem->size = len * entry_size;
140         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141         if (!mem->va)
142                 return -1;
143         memset(mem->va, 0, mem->size);
144         return 0;
145 }
146
147 static void be_intr_set(struct be_adapter *adapter, bool enable)
148 {
149         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
150         u32 reg = ioread32(addr);
151         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153         if (adapter->eeh_err)
154                 return;
155
156         if (!enabled && enable)
157                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158         else if (enabled && !enable)
159                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else
161                 return;
162
163         iowrite32(reg, addr);
164 }
165
166 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
167 {
168         u32 val = 0;
169         val |= qid & DB_RQ_RING_ID_MASK;
170         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
171
172         wmb();
173         iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 }
175
176 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_TXULP_RING_ID_MASK;
180         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 }
185
186 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
187                 bool arm, bool clear_int, u16 num_popped)
188 {
189         u32 val = 0;
190         val |= qid & DB_EQ_RING_ID_MASK;
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208
209         if (adapter->eeh_err)
210                 return;
211
212         if (arm)
213                 val |= 1 << DB_CQ_REARM_SHIFT;
214         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_CQ_OFFSET);
216 }
217
218 static int be_mac_addr_set(struct net_device *netdev, void *p)
219 {
220         struct be_adapter *adapter = netdev_priv(netdev);
221         struct sockaddr *addr = p;
222         int status = 0;
223
224         if (!is_valid_ether_addr(addr->sa_data))
225                 return -EADDRNOTAVAIL;
226
227         /* MAC addr configuration will be done in hardware for VFs
228          * by their corresponding PFs. Just copy to netdev addr here
229          */
230         if (!be_physfn(adapter))
231                 goto netdev_addr;
232
233         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                         adapter->if_handle, &adapter->pmac_id);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 void netdev_stats_update(struct be_adapter *adapter)
247 {
248         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
249         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250         struct be_port_rxf_stats *port_stats =
251                         &rxf_stats->port[adapter->port_num];
252         struct net_device_stats *dev_stats = &adapter->netdev->stats;
253         struct be_erx_stats *erx_stats = &hw_stats->erx;
254         struct be_rx_obj *rxo;
255         int i;
256
257         memset(dev_stats, 0, sizeof(*dev_stats));
258         for_all_rx_queues(adapter, rxo, i) {
259                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262                 /*  no space in linux buffers: best possible approximation */
263                 dev_stats->rx_dropped +=
264                         erx_stats->rx_drops_no_fragments[rxo->q.id];
265         }
266
267         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
269
270         /* bad pkts received */
271         dev_stats->rx_errors = port_stats->rx_crc_errors +
272                 port_stats->rx_alignment_symbol_errors +
273                 port_stats->rx_in_range_errors +
274                 port_stats->rx_out_range_errors +
275                 port_stats->rx_frame_too_long +
276                 port_stats->rx_dropped_too_small +
277                 port_stats->rx_dropped_too_short +
278                 port_stats->rx_dropped_header_too_small +
279                 port_stats->rx_dropped_tcp_length +
280                 port_stats->rx_dropped_runt +
281                 port_stats->rx_tcp_checksum_errs +
282                 port_stats->rx_ip_checksum_errs +
283                 port_stats->rx_udp_checksum_errs;
284
285         /* detailed rx errors */
286         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
287                 port_stats->rx_out_range_errors +
288                 port_stats->rx_frame_too_long;
289
290         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
291
292         /* frame alignment errors */
293         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
294
295         /* receiver fifo overrun */
296         /* drops_no_pbuf is no per i/f, it's per BE card */
297         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298                                         port_stats->rx_input_fifo_overflow +
299                                         rxf_stats->rx_drops_no_pbuf;
300 }
301
302 void be_link_status_update(struct be_adapter *adapter, bool link_up)
303 {
304         struct net_device *netdev = adapter->netdev;
305
306         /* If link came up or went down */
307         if (adapter->link_up != link_up) {
308                 adapter->link_speed = -1;
309                 if (link_up) {
310                         netif_start_queue(netdev);
311                         netif_carrier_on(netdev);
312                         printk(KERN_INFO "%s: Link up\n", netdev->name);
313                 } else {
314                         netif_stop_queue(netdev);
315                         netif_carrier_off(netdev);
316                         printk(KERN_INFO "%s: Link down\n", netdev->name);
317                 }
318                 adapter->link_up = link_up;
319         }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325         struct be_eq_obj *rx_eq = &rxo->rx_eq;
326         struct be_rx_stats *stats = &rxo->stats;
327         ulong now = jiffies;
328         u32 eqd;
329
330         if (!rx_eq->enable_aic)
331                 return;
332
333         /* Wrapped around */
334         if (time_before(now, stats->rx_fps_jiffies)) {
335                 stats->rx_fps_jiffies = now;
336                 return;
337         }
338
339         /* Update once a second */
340         if ((now - stats->rx_fps_jiffies) < HZ)
341                 return;
342
343         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344                         ((now - stats->rx_fps_jiffies) / HZ);
345
346         stats->rx_fps_jiffies = now;
347         stats->prev_rx_frags = stats->rx_frags;
348         eqd = stats->rx_fps / 110000;
349         eqd = eqd << 3;
350         if (eqd > rx_eq->max_eqd)
351                 eqd = rx_eq->max_eqd;
352         if (eqd < rx_eq->min_eqd)
353                 eqd = rx_eq->min_eqd;
354         if (eqd < 10)
355                 eqd = 0;
356         if (eqd != rx_eq->cur_eqd)
357                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359         rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364         u64 rate = bytes;
365
366         do_div(rate, ticks / HZ);
367         rate <<= 3;                     /* bytes/sec -> bits/sec */
368         do_div(rate, 1000000ul);        /* MB/Sec */
369
370         return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375         struct be_tx_stats *stats = tx_stats(adapter);
376         ulong now = jiffies;
377
378         /* Wrapped around? */
379         if (time_before(now, stats->be_tx_jiffies)) {
380                 stats->be_tx_jiffies = now;
381                 return;
382         }
383
384         /* Update tx rate once in two seconds */
385         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387                                                   - stats->be_tx_bytes_prev,
388                                                  now - stats->be_tx_jiffies);
389                 stats->be_tx_jiffies = now;
390                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391         }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397         struct be_tx_stats *stats = tx_stats(adapter);
398         stats->be_tx_reqs++;
399         stats->be_tx_wrbs += wrb_cnt;
400         stats->be_tx_bytes += copied;
401         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402         if (stopped)
403                 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
408 {
409         int cnt = (skb->len > skb->data_len);
410
411         cnt += skb_shinfo(skb)->nr_frags;
412
413         /* to account for hdr wrb */
414         cnt++;
415         if (cnt & 1) {
416                 /* add a dummy to make it an even num */
417                 cnt++;
418                 *dummy = true;
419         } else
420                 *dummy = false;
421         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422         return cnt;
423 }
424
425 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
426 {
427         wrb->frag_pa_hi = upper_32_bits(addr);
428         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
430 }
431
432 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
433                 bool vlan, u32 wrb_cnt, u32 len)
434 {
435         memset(hdr, 0, sizeof(*hdr));
436
437         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
438
439         if (skb_is_gso(skb)) {
440                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
441                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
442                         hdr, skb_shinfo(skb)->gso_size);
443                 if (skb_is_gso_v6(skb))
444                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
445         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
446                 if (is_tcp_pkt(skb))
447                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
448                 else if (is_udp_pkt(skb))
449                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
450         }
451
452         if (vlan && vlan_tx_tag_present(skb)) {
453                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
454                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
455                         hdr, vlan_tx_tag_get(skb));
456         }
457
458         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
459         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
460         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
461         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
462 }
463
464 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
465                 bool unmap_single)
466 {
467         dma_addr_t dma;
468
469         be_dws_le_to_cpu(wrb, sizeof(*wrb));
470
471         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
472         if (wrb->frag_len) {
473                 if (unmap_single)
474                         pci_unmap_single(pdev, dma, wrb->frag_len,
475                                 PCI_DMA_TODEVICE);
476                 else
477                         pci_unmap_page(pdev, dma, wrb->frag_len,
478                                 PCI_DMA_TODEVICE);
479         }
480 }
481
482 static int make_tx_wrbs(struct be_adapter *adapter,
483                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
484 {
485         dma_addr_t busaddr;
486         int i, copied = 0;
487         struct pci_dev *pdev = adapter->pdev;
488         struct sk_buff *first_skb = skb;
489         struct be_queue_info *txq = &adapter->tx_obj.q;
490         struct be_eth_wrb *wrb;
491         struct be_eth_hdr_wrb *hdr;
492         bool map_single = false;
493         u16 map_head;
494
495         hdr = queue_head_node(txq);
496         queue_head_inc(txq);
497         map_head = txq->head;
498
499         if (skb->len > skb->data_len) {
500                 int len = skb_headlen(skb);
501                 busaddr = pci_map_single(pdev, skb->data, len,
502                                          PCI_DMA_TODEVICE);
503                 if (pci_dma_mapping_error(pdev, busaddr))
504                         goto dma_err;
505                 map_single = true;
506                 wrb = queue_head_node(txq);
507                 wrb_fill(wrb, busaddr, len);
508                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
509                 queue_head_inc(txq);
510                 copied += len;
511         }
512
513         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
514                 struct skb_frag_struct *frag =
515                         &skb_shinfo(skb)->frags[i];
516                 busaddr = pci_map_page(pdev, frag->page,
517                                        frag->page_offset,
518                                        frag->size, PCI_DMA_TODEVICE);
519                 if (pci_dma_mapping_error(pdev, busaddr))
520                         goto dma_err;
521                 wrb = queue_head_node(txq);
522                 wrb_fill(wrb, busaddr, frag->size);
523                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
524                 queue_head_inc(txq);
525                 copied += frag->size;
526         }
527
528         if (dummy_wrb) {
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, 0, 0);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533         }
534
535         wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
536                 wrb_cnt, copied);
537         be_dws_cpu_to_le(hdr, sizeof(*hdr));
538
539         return copied;
540 dma_err:
541         txq->head = map_head;
542         while (copied) {
543                 wrb = queue_head_node(txq);
544                 unmap_tx_frag(pdev, wrb, map_single);
545                 map_single = false;
546                 copied -= wrb->frag_len;
547                 queue_head_inc(txq);
548         }
549         return 0;
550 }
551
552 static netdev_tx_t be_xmit(struct sk_buff *skb,
553                         struct net_device *netdev)
554 {
555         struct be_adapter *adapter = netdev_priv(netdev);
556         struct be_tx_obj *tx_obj = &adapter->tx_obj;
557         struct be_queue_info *txq = &tx_obj->q;
558         u32 wrb_cnt = 0, copied = 0;
559         u32 start = txq->head;
560         bool dummy_wrb, stopped = false;
561
562         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
563
564         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
565         if (copied) {
566                 /* record the sent skb in the sent_skb table */
567                 BUG_ON(tx_obj->sent_skb_list[start]);
568                 tx_obj->sent_skb_list[start] = skb;
569
570                 /* Ensure txq has space for the next skb; Else stop the queue
571                  * *BEFORE* ringing the tx doorbell, so that we serialze the
572                  * tx compls of the current transmit which'll wake up the queue
573                  */
574                 atomic_add(wrb_cnt, &txq->used);
575                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
576                                                                 txq->len) {
577                         netif_stop_queue(netdev);
578                         stopped = true;
579                 }
580
581                 be_txq_notify(adapter, txq->id, wrb_cnt);
582
583                 be_tx_stats_update(adapter, wrb_cnt, copied,
584                                 skb_shinfo(skb)->gso_segs, stopped);
585         } else {
586                 txq->head = start;
587                 dev_kfree_skb_any(skb);
588         }
589         return NETDEV_TX_OK;
590 }
591
592 static int be_change_mtu(struct net_device *netdev, int new_mtu)
593 {
594         struct be_adapter *adapter = netdev_priv(netdev);
595         if (new_mtu < BE_MIN_MTU ||
596                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
597                                         (ETH_HLEN + ETH_FCS_LEN))) {
598                 dev_info(&adapter->pdev->dev,
599                         "MTU must be between %d and %d bytes\n",
600                         BE_MIN_MTU,
601                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
602                 return -EINVAL;
603         }
604         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
605                         netdev->mtu, new_mtu);
606         netdev->mtu = new_mtu;
607         return 0;
608 }
609
610 /*
611  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
612  * If the user configures more, place BE in vlan promiscuous mode.
613  */
614 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
615 {
616         u16 vtag[BE_NUM_VLANS_SUPPORTED];
617         u16 ntags = 0, i;
618         int status = 0;
619         u32 if_handle;
620
621         if (vf) {
622                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
623                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
624                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
625         }
626
627         if (adapter->vlans_added <= adapter->max_vlans)  {
628                 /* Construct VLAN Table to give to HW */
629                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
630                         if (adapter->vlan_tag[i]) {
631                                 vtag[ntags] = cpu_to_le16(i);
632                                 ntags++;
633                         }
634                 }
635                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
636                                         vtag, ntags, 1, 0);
637         } else {
638                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
639                                         NULL, 0, 1, 1);
640         }
641
642         return status;
643 }
644
645 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
646 {
647         struct be_adapter *adapter = netdev_priv(netdev);
648
649         adapter->vlan_grp = grp;
650 }
651
652 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
653 {
654         struct be_adapter *adapter = netdev_priv(netdev);
655
656         adapter->vlans_added++;
657         if (!be_physfn(adapter))
658                 return;
659
660         adapter->vlan_tag[vid] = 1;
661         if (adapter->vlans_added <= (adapter->max_vlans + 1))
662                 be_vid_config(adapter, false, 0);
663 }
664
665 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
666 {
667         struct be_adapter *adapter = netdev_priv(netdev);
668
669         adapter->vlans_added--;
670         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
671
672         if (!be_physfn(adapter))
673                 return;
674
675         adapter->vlan_tag[vid] = 0;
676         if (adapter->vlans_added <= adapter->max_vlans)
677                 be_vid_config(adapter, false, 0);
678 }
679
680 static void be_set_multicast_list(struct net_device *netdev)
681 {
682         struct be_adapter *adapter = netdev_priv(netdev);
683
684         if (netdev->flags & IFF_PROMISC) {
685                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
686                 adapter->promiscuous = true;
687                 goto done;
688         }
689
690         /* BE was previously in promiscous mode; disable it */
691         if (adapter->promiscuous) {
692                 adapter->promiscuous = false;
693                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
694         }
695
696         /* Enable multicast promisc if num configured exceeds what we support */
697         if (netdev->flags & IFF_ALLMULTI ||
698             netdev_mc_count(netdev) > BE_MAX_MC) {
699                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
700                                 &adapter->mc_cmd_mem);
701                 goto done;
702         }
703
704         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
705                 &adapter->mc_cmd_mem);
706 done:
707         return;
708 }
709
710 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
711 {
712         struct be_adapter *adapter = netdev_priv(netdev);
713         int status;
714
715         if (!adapter->sriov_enabled)
716                 return -EPERM;
717
718         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
719                 return -EINVAL;
720
721         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
722                 status = be_cmd_pmac_del(adapter,
723                                         adapter->vf_cfg[vf].vf_if_handle,
724                                         adapter->vf_cfg[vf].vf_pmac_id);
725
726         status = be_cmd_pmac_add(adapter, mac,
727                                 adapter->vf_cfg[vf].vf_if_handle,
728                                 &adapter->vf_cfg[vf].vf_pmac_id);
729
730         if (status)
731                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
732                                 mac, vf);
733         else
734                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
735
736         return status;
737 }
738
739 static int be_get_vf_config(struct net_device *netdev, int vf,
740                         struct ifla_vf_info *vi)
741 {
742         struct be_adapter *adapter = netdev_priv(netdev);
743
744         if (!adapter->sriov_enabled)
745                 return -EPERM;
746
747         if (vf >= num_vfs)
748                 return -EINVAL;
749
750         vi->vf = vf;
751         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
752         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
753         vi->qos = 0;
754         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
755
756         return 0;
757 }
758
759 static int be_set_vf_vlan(struct net_device *netdev,
760                         int vf, u16 vlan, u8 qos)
761 {
762         struct be_adapter *adapter = netdev_priv(netdev);
763         int status = 0;
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if ((vf >= num_vfs) || (vlan > 4095))
769                 return -EINVAL;
770
771         if (vlan) {
772                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
773                 adapter->vlans_added++;
774         } else {
775                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
776                 adapter->vlans_added--;
777         }
778
779         status = be_vid_config(adapter, true, vf);
780
781         if (status)
782                 dev_info(&adapter->pdev->dev,
783                                 "VLAN %d config on VF %d failed\n", vlan, vf);
784         return status;
785 }
786
787 static int be_set_vf_tx_rate(struct net_device *netdev,
788                         int vf, int rate)
789 {
790         struct be_adapter *adapter = netdev_priv(netdev);
791         int status = 0;
792
793         if (!adapter->sriov_enabled)
794                 return -EPERM;
795
796         if ((vf >= num_vfs) || (rate < 0))
797                 return -EINVAL;
798
799         if (rate > 10000)
800                 rate = 10000;
801
802         adapter->vf_cfg[vf].vf_tx_rate = rate;
803         status = be_cmd_set_qos(adapter, rate / 10, vf);
804
805         if (status)
806                 dev_info(&adapter->pdev->dev,
807                                 "tx rate %d on VF %d failed\n", rate, vf);
808         return status;
809 }
810
811 static void be_rx_rate_update(struct be_rx_obj *rxo)
812 {
813         struct be_rx_stats *stats = &rxo->stats;
814         ulong now = jiffies;
815
816         /* Wrapped around */
817         if (time_before(now, stats->rx_jiffies)) {
818                 stats->rx_jiffies = now;
819                 return;
820         }
821
822         /* Update the rate once in two seconds */
823         if ((now - stats->rx_jiffies) < 2 * HZ)
824                 return;
825
826         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
827                                 now - stats->rx_jiffies);
828         stats->rx_jiffies = now;
829         stats->rx_bytes_prev = stats->rx_bytes;
830 }
831
832 static void be_rx_stats_update(struct be_rx_obj *rxo,
833                 u32 pktsize, u16 numfrags, u8 pkt_type)
834 {
835         struct be_rx_stats *stats = &rxo->stats;
836
837         stats->rx_compl++;
838         stats->rx_frags += numfrags;
839         stats->rx_bytes += pktsize;
840         stats->rx_pkts++;
841         if (pkt_type == BE_MULTICAST_PACKET)
842                 stats->rx_mcast_pkts++;
843 }
844
845 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
846 {
847         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
848
849         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
850         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
851         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
852         if (ip_version) {
853                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
854                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
855         }
856         ipv6_chk = (ip_version && (tcpf || udpf));
857
858         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
859 }
860
861 static struct be_rx_page_info *
862 get_rx_page_info(struct be_adapter *adapter,
863                 struct be_rx_obj *rxo,
864                 u16 frag_idx)
865 {
866         struct be_rx_page_info *rx_page_info;
867         struct be_queue_info *rxq = &rxo->q;
868
869         rx_page_info = &rxo->page_info_tbl[frag_idx];
870         BUG_ON(!rx_page_info->page);
871
872         if (rx_page_info->last_page_user) {
873                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
874                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
875                 rx_page_info->last_page_user = false;
876         }
877
878         atomic_dec(&rxq->used);
879         return rx_page_info;
880 }
881
882 /* Throwaway the data in the Rx completion */
883 static void be_rx_compl_discard(struct be_adapter *adapter,
884                 struct be_rx_obj *rxo,
885                 struct be_eth_rx_compl *rxcp)
886 {
887         struct be_queue_info *rxq = &rxo->q;
888         struct be_rx_page_info *page_info;
889         u16 rxq_idx, i, num_rcvd;
890
891         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
892         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
893
894         for (i = 0; i < num_rcvd; i++) {
895                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
896                 put_page(page_info->page);
897                 memset(page_info, 0, sizeof(*page_info));
898                 index_inc(&rxq_idx, rxq->len);
899         }
900 }
901
902 /*
903  * skb_fill_rx_data forms a complete skb for an ether frame
904  * indicated by rxcp.
905  */
906 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
907                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
908                         u16 num_rcvd)
909 {
910         struct be_queue_info *rxq = &rxo->q;
911         struct be_rx_page_info *page_info;
912         u16 rxq_idx, i, j;
913         u32 pktsize, hdr_len, curr_frag_len, size;
914         u8 *start;
915         u8 pkt_type;
916
917         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
918         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
919         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
920
921         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
922
923         start = page_address(page_info->page) + page_info->page_offset;
924         prefetch(start);
925
926         /* Copy data in the first descriptor of this completion */
927         curr_frag_len = min(pktsize, rx_frag_size);
928
929         /* Copy the header portion into skb_data */
930         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
931         memcpy(skb->data, start, hdr_len);
932         skb->len = curr_frag_len;
933         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
934                 /* Complete packet has now been moved to data */
935                 put_page(page_info->page);
936                 skb->data_len = 0;
937                 skb->tail += curr_frag_len;
938         } else {
939                 skb_shinfo(skb)->nr_frags = 1;
940                 skb_shinfo(skb)->frags[0].page = page_info->page;
941                 skb_shinfo(skb)->frags[0].page_offset =
942                                         page_info->page_offset + hdr_len;
943                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
944                 skb->data_len = curr_frag_len - hdr_len;
945                 skb->tail += hdr_len;
946         }
947         page_info->page = NULL;
948
949         if (pktsize <= rx_frag_size) {
950                 BUG_ON(num_rcvd != 1);
951                 goto done;
952         }
953
954         /* More frags present for this completion */
955         size = pktsize;
956         for (i = 1, j = 0; i < num_rcvd; i++) {
957                 size -= curr_frag_len;
958                 index_inc(&rxq_idx, rxq->len);
959                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
960
961                 curr_frag_len = min(size, rx_frag_size);
962
963                 /* Coalesce all frags from the same physical page in one slot */
964                 if (page_info->page_offset == 0) {
965                         /* Fresh page */
966                         j++;
967                         skb_shinfo(skb)->frags[j].page = page_info->page;
968                         skb_shinfo(skb)->frags[j].page_offset =
969                                                         page_info->page_offset;
970                         skb_shinfo(skb)->frags[j].size = 0;
971                         skb_shinfo(skb)->nr_frags++;
972                 } else {
973                         put_page(page_info->page);
974                 }
975
976                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
977                 skb->len += curr_frag_len;
978                 skb->data_len += curr_frag_len;
979
980                 page_info->page = NULL;
981         }
982         BUG_ON(j > MAX_SKB_FRAGS);
983
984 done:
985         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
986 }
987
988 /* Process the RX completion indicated by rxcp when GRO is disabled */
989 static void be_rx_compl_process(struct be_adapter *adapter,
990                         struct be_rx_obj *rxo,
991                         struct be_eth_rx_compl *rxcp)
992 {
993         struct sk_buff *skb;
994         u32 vlanf, vid;
995         u16 num_rcvd;
996         u8 vtm;
997
998         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
999         /* Is it a flush compl that has no data */
1000         if (unlikely(num_rcvd == 0))
1001                 return;
1002
1003         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1004         if (unlikely(!skb)) {
1005                 if (net_ratelimit())
1006                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1007                 be_rx_compl_discard(adapter, rxo, rxcp);
1008                 return;
1009         }
1010
1011         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1012
1013         if (do_pkt_csum(rxcp, adapter->rx_csum))
1014                 skb_checksum_none_assert(skb);
1015         else
1016                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1017
1018         skb->truesize = skb->len + sizeof(struct sk_buff);
1019         skb->protocol = eth_type_trans(skb, adapter->netdev);
1020
1021         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1022         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1023
1024         /* vlanf could be wrongly set in some cards.
1025          * ignore if vtm is not set */
1026         if ((adapter->function_mode & 0x400) && !vtm)
1027                 vlanf = 0;
1028
1029         if (unlikely(vlanf)) {
1030                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1031                         kfree_skb(skb);
1032                         return;
1033                 }
1034                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1035                 vid = swab16(vid);
1036                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1037         } else {
1038                 netif_receive_skb(skb);
1039         }
1040 }
1041
1042 /* Process the RX completion indicated by rxcp when GRO is enabled */
1043 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1044                 struct be_rx_obj *rxo,
1045                 struct be_eth_rx_compl *rxcp)
1046 {
1047         struct be_rx_page_info *page_info;
1048         struct sk_buff *skb = NULL;
1049         struct be_queue_info *rxq = &rxo->q;
1050         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1051         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1052         u16 i, rxq_idx = 0, vid, j;
1053         u8 vtm;
1054         u8 pkt_type;
1055
1056         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1057         /* Is it a flush compl that has no data */
1058         if (unlikely(num_rcvd == 0))
1059                 return;
1060
1061         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1062         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1063         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1064         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1065         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1066
1067         /* vlanf could be wrongly set in some cards.
1068          * ignore if vtm is not set */
1069         if ((adapter->function_mode & 0x400) && !vtm)
1070                 vlanf = 0;
1071
1072         skb = napi_get_frags(&eq_obj->napi);
1073         if (!skb) {
1074                 be_rx_compl_discard(adapter, rxo, rxcp);
1075                 return;
1076         }
1077
1078         remaining = pkt_size;
1079         for (i = 0, j = -1; i < num_rcvd; i++) {
1080                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1081
1082                 curr_frag_len = min(remaining, rx_frag_size);
1083
1084                 /* Coalesce all frags from the same physical page in one slot */
1085                 if (i == 0 || page_info->page_offset == 0) {
1086                         /* First frag or Fresh page */
1087                         j++;
1088                         skb_shinfo(skb)->frags[j].page = page_info->page;
1089                         skb_shinfo(skb)->frags[j].page_offset =
1090                                                         page_info->page_offset;
1091                         skb_shinfo(skb)->frags[j].size = 0;
1092                 } else {
1093                         put_page(page_info->page);
1094                 }
1095                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1096
1097                 remaining -= curr_frag_len;
1098                 index_inc(&rxq_idx, rxq->len);
1099                 memset(page_info, 0, sizeof(*page_info));
1100         }
1101         BUG_ON(j > MAX_SKB_FRAGS);
1102
1103         skb_shinfo(skb)->nr_frags = j + 1;
1104         skb->len = pkt_size;
1105         skb->data_len = pkt_size;
1106         skb->truesize += pkt_size;
1107         skb->ip_summed = CHECKSUM_UNNECESSARY;
1108
1109         if (likely(!vlanf)) {
1110                 napi_gro_frags(&eq_obj->napi);
1111         } else {
1112                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1113                 vid = swab16(vid);
1114
1115                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1116                         return;
1117
1118                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1119         }
1120
1121         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1122 }
1123
1124 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1125 {
1126         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1127
1128         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1129                 return NULL;
1130
1131         rmb();
1132         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1133
1134         queue_tail_inc(&rxo->cq);
1135         return rxcp;
1136 }
1137
1138 /* To reset the valid bit, we need to reset the whole word as
1139  * when walking the queue the valid entries are little-endian
1140  * and invalid entries are host endian
1141  */
1142 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1143 {
1144         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1145 }
1146
1147 static inline struct page *be_alloc_pages(u32 size)
1148 {
1149         gfp_t alloc_flags = GFP_ATOMIC;
1150         u32 order = get_order(size);
1151         if (order > 0)
1152                 alloc_flags |= __GFP_COMP;
1153         return  alloc_pages(alloc_flags, order);
1154 }
1155
1156 /*
1157  * Allocate a page, split it to fragments of size rx_frag_size and post as
1158  * receive buffers to BE
1159  */
1160 static void be_post_rx_frags(struct be_rx_obj *rxo)
1161 {
1162         struct be_adapter *adapter = rxo->adapter;
1163         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1164         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1165         struct be_queue_info *rxq = &rxo->q;
1166         struct page *pagep = NULL;
1167         struct be_eth_rx_d *rxd;
1168         u64 page_dmaaddr = 0, frag_dmaaddr;
1169         u32 posted, page_offset = 0;
1170
1171         page_info = &rxo->page_info_tbl[rxq->head];
1172         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1173                 if (!pagep) {
1174                         pagep = be_alloc_pages(adapter->big_page_size);
1175                         if (unlikely(!pagep)) {
1176                                 rxo->stats.rx_post_fail++;
1177                                 break;
1178                         }
1179                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1180                                                 adapter->big_page_size,
1181                                                 PCI_DMA_FROMDEVICE);
1182                         page_info->page_offset = 0;
1183                 } else {
1184                         get_page(pagep);
1185                         page_info->page_offset = page_offset + rx_frag_size;
1186                 }
1187                 page_offset = page_info->page_offset;
1188                 page_info->page = pagep;
1189                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1190                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1191
1192                 rxd = queue_head_node(rxq);
1193                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1194                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1195
1196                 /* Any space left in the current big page for another frag? */
1197                 if ((page_offset + rx_frag_size + rx_frag_size) >
1198                                         adapter->big_page_size) {
1199                         pagep = NULL;
1200                         page_info->last_page_user = true;
1201                 }
1202
1203                 prev_page_info = page_info;
1204                 queue_head_inc(rxq);
1205                 page_info = &page_info_tbl[rxq->head];
1206         }
1207         if (pagep)
1208                 prev_page_info->last_page_user = true;
1209
1210         if (posted) {
1211                 atomic_add(posted, &rxq->used);
1212                 be_rxq_notify(adapter, rxq->id, posted);
1213         } else if (atomic_read(&rxq->used) == 0) {
1214                 /* Let be_worker replenish when memory is available */
1215                 rxo->rx_post_starved = true;
1216         }
1217 }
1218
1219 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1220 {
1221         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1222
1223         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1224                 return NULL;
1225
1226         rmb();
1227         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1228
1229         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1230
1231         queue_tail_inc(tx_cq);
1232         return txcp;
1233 }
1234
1235 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1236 {
1237         struct be_queue_info *txq = &adapter->tx_obj.q;
1238         struct be_eth_wrb *wrb;
1239         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1240         struct sk_buff *sent_skb;
1241         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1242         bool unmap_skb_hdr = true;
1243
1244         sent_skb = sent_skbs[txq->tail];
1245         BUG_ON(!sent_skb);
1246         sent_skbs[txq->tail] = NULL;
1247
1248         /* skip header wrb */
1249         queue_tail_inc(txq);
1250
1251         do {
1252                 cur_index = txq->tail;
1253                 wrb = queue_tail_node(txq);
1254                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1255                                         skb_headlen(sent_skb)));
1256                 unmap_skb_hdr = false;
1257
1258                 num_wrbs++;
1259                 queue_tail_inc(txq);
1260         } while (cur_index != last_index);
1261
1262         atomic_sub(num_wrbs, &txq->used);
1263
1264         kfree_skb(sent_skb);
1265 }
1266
1267 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1268 {
1269         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1270
1271         if (!eqe->evt)
1272                 return NULL;
1273
1274         rmb();
1275         eqe->evt = le32_to_cpu(eqe->evt);
1276         queue_tail_inc(&eq_obj->q);
1277         return eqe;
1278 }
1279
1280 static int event_handle(struct be_adapter *adapter,
1281                         struct be_eq_obj *eq_obj)
1282 {
1283         struct be_eq_entry *eqe;
1284         u16 num = 0;
1285
1286         while ((eqe = event_get(eq_obj)) != NULL) {
1287                 eqe->evt = 0;
1288                 num++;
1289         }
1290
1291         /* Deal with any spurious interrupts that come
1292          * without events
1293          */
1294         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1295         if (num)
1296                 napi_schedule(&eq_obj->napi);
1297
1298         return num;
1299 }
1300
1301 /* Just read and notify events without processing them.
1302  * Used at the time of destroying event queues */
1303 static void be_eq_clean(struct be_adapter *adapter,
1304                         struct be_eq_obj *eq_obj)
1305 {
1306         struct be_eq_entry *eqe;
1307         u16 num = 0;
1308
1309         while ((eqe = event_get(eq_obj)) != NULL) {
1310                 eqe->evt = 0;
1311                 num++;
1312         }
1313
1314         if (num)
1315                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1316 }
1317
1318 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1319 {
1320         struct be_rx_page_info *page_info;
1321         struct be_queue_info *rxq = &rxo->q;
1322         struct be_queue_info *rx_cq = &rxo->cq;
1323         struct be_eth_rx_compl *rxcp;
1324         u16 tail;
1325
1326         /* First cleanup pending rx completions */
1327         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1328                 be_rx_compl_discard(adapter, rxo, rxcp);
1329                 be_rx_compl_reset(rxcp);
1330                 be_cq_notify(adapter, rx_cq->id, true, 1);
1331         }
1332
1333         /* Then free posted rx buffer that were not used */
1334         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1335         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1336                 page_info = get_rx_page_info(adapter, rxo, tail);
1337                 put_page(page_info->page);
1338                 memset(page_info, 0, sizeof(*page_info));
1339         }
1340         BUG_ON(atomic_read(&rxq->used));
1341 }
1342
1343 static void be_tx_compl_clean(struct be_adapter *adapter)
1344 {
1345         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1346         struct be_queue_info *txq = &adapter->tx_obj.q;
1347         struct be_eth_tx_compl *txcp;
1348         u16 end_idx, cmpl = 0, timeo = 0;
1349         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1350         struct sk_buff *sent_skb;
1351         bool dummy_wrb;
1352
1353         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1354         do {
1355                 while ((txcp = be_tx_compl_get(tx_cq))) {
1356                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1357                                         wrb_index, txcp);
1358                         be_tx_compl_process(adapter, end_idx);
1359                         cmpl++;
1360                 }
1361                 if (cmpl) {
1362                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1363                         cmpl = 0;
1364                 }
1365
1366                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1367                         break;
1368
1369                 mdelay(1);
1370         } while (true);
1371
1372         if (atomic_read(&txq->used))
1373                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1374                         atomic_read(&txq->used));
1375
1376         /* free posted tx for which compls will never arrive */
1377         while (atomic_read(&txq->used)) {
1378                 sent_skb = sent_skbs[txq->tail];
1379                 end_idx = txq->tail;
1380                 index_adv(&end_idx,
1381                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1382                 be_tx_compl_process(adapter, end_idx);
1383         }
1384 }
1385
1386 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1387 {
1388         struct be_queue_info *q;
1389
1390         q = &adapter->mcc_obj.q;
1391         if (q->created)
1392                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1393         be_queue_free(adapter, q);
1394
1395         q = &adapter->mcc_obj.cq;
1396         if (q->created)
1397                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1398         be_queue_free(adapter, q);
1399 }
1400
1401 /* Must be called only after TX qs are created as MCC shares TX EQ */
1402 static int be_mcc_queues_create(struct be_adapter *adapter)
1403 {
1404         struct be_queue_info *q, *cq;
1405
1406         /* Alloc MCC compl queue */
1407         cq = &adapter->mcc_obj.cq;
1408         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1409                         sizeof(struct be_mcc_compl)))
1410                 goto err;
1411
1412         /* Ask BE to create MCC compl queue; share TX's eq */
1413         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1414                 goto mcc_cq_free;
1415
1416         /* Alloc MCC queue */
1417         q = &adapter->mcc_obj.q;
1418         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1419                 goto mcc_cq_destroy;
1420
1421         /* Ask BE to create MCC queue */
1422         if (be_cmd_mccq_create(adapter, q, cq))
1423                 goto mcc_q_free;
1424
1425         return 0;
1426
1427 mcc_q_free:
1428         be_queue_free(adapter, q);
1429 mcc_cq_destroy:
1430         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1431 mcc_cq_free:
1432         be_queue_free(adapter, cq);
1433 err:
1434         return -1;
1435 }
1436
1437 static void be_tx_queues_destroy(struct be_adapter *adapter)
1438 {
1439         struct be_queue_info *q;
1440
1441         q = &adapter->tx_obj.q;
1442         if (q->created)
1443                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1444         be_queue_free(adapter, q);
1445
1446         q = &adapter->tx_obj.cq;
1447         if (q->created)
1448                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1449         be_queue_free(adapter, q);
1450
1451         /* Clear any residual events */
1452         be_eq_clean(adapter, &adapter->tx_eq);
1453
1454         q = &adapter->tx_eq.q;
1455         if (q->created)
1456                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1457         be_queue_free(adapter, q);
1458 }
1459
1460 static int be_tx_queues_create(struct be_adapter *adapter)
1461 {
1462         struct be_queue_info *eq, *q, *cq;
1463
1464         adapter->tx_eq.max_eqd = 0;
1465         adapter->tx_eq.min_eqd = 0;
1466         adapter->tx_eq.cur_eqd = 96;
1467         adapter->tx_eq.enable_aic = false;
1468         /* Alloc Tx Event queue */
1469         eq = &adapter->tx_eq.q;
1470         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1471                 return -1;
1472
1473         /* Ask BE to create Tx Event queue */
1474         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1475                 goto tx_eq_free;
1476         adapter->base_eq_id = adapter->tx_eq.q.id;
1477
1478         /* Alloc TX eth compl queue */
1479         cq = &adapter->tx_obj.cq;
1480         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1481                         sizeof(struct be_eth_tx_compl)))
1482                 goto tx_eq_destroy;
1483
1484         /* Ask BE to create Tx eth compl queue */
1485         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1486                 goto tx_cq_free;
1487
1488         /* Alloc TX eth queue */
1489         q = &adapter->tx_obj.q;
1490         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1491                 goto tx_cq_destroy;
1492
1493         /* Ask BE to create Tx eth queue */
1494         if (be_cmd_txq_create(adapter, q, cq))
1495                 goto tx_q_free;
1496         return 0;
1497
1498 tx_q_free:
1499         be_queue_free(adapter, q);
1500 tx_cq_destroy:
1501         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1502 tx_cq_free:
1503         be_queue_free(adapter, cq);
1504 tx_eq_destroy:
1505         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1506 tx_eq_free:
1507         be_queue_free(adapter, eq);
1508         return -1;
1509 }
1510
1511 static void be_rx_queues_destroy(struct be_adapter *adapter)
1512 {
1513         struct be_queue_info *q;
1514         struct be_rx_obj *rxo;
1515         int i;
1516
1517         for_all_rx_queues(adapter, rxo, i) {
1518                 q = &rxo->q;
1519                 if (q->created) {
1520                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1521                         /* After the rxq is invalidated, wait for a grace time
1522                          * of 1ms for all dma to end and the flush compl to
1523                          * arrive
1524                          */
1525                         mdelay(1);
1526                         be_rx_q_clean(adapter, rxo);
1527                 }
1528                 be_queue_free(adapter, q);
1529
1530                 q = &rxo->cq;
1531                 if (q->created)
1532                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1533                 be_queue_free(adapter, q);
1534
1535                 /* Clear any residual events */
1536                 q = &rxo->rx_eq.q;
1537                 if (q->created) {
1538                         be_eq_clean(adapter, &rxo->rx_eq);
1539                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1540                 }
1541                 be_queue_free(adapter, q);
1542         }
1543 }
1544
1545 static int be_rx_queues_create(struct be_adapter *adapter)
1546 {
1547         struct be_queue_info *eq, *q, *cq;
1548         struct be_rx_obj *rxo;
1549         int rc, i;
1550
1551         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1552         for_all_rx_queues(adapter, rxo, i) {
1553                 rxo->adapter = adapter;
1554                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1555                 rxo->rx_eq.enable_aic = true;
1556
1557                 /* EQ */
1558                 eq = &rxo->rx_eq.q;
1559                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1560                                         sizeof(struct be_eq_entry));
1561                 if (rc)
1562                         goto err;
1563
1564                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1565                 if (rc)
1566                         goto err;
1567
1568                 /* CQ */
1569                 cq = &rxo->cq;
1570                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1571                                 sizeof(struct be_eth_rx_compl));
1572                 if (rc)
1573                         goto err;
1574
1575                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1576                 if (rc)
1577                         goto err;
1578
1579                 /* Rx Q */
1580                 q = &rxo->q;
1581                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1582                                 sizeof(struct be_eth_rx_d));
1583                 if (rc)
1584                         goto err;
1585
1586                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1587                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1588                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1589                 if (rc)
1590                         goto err;
1591         }
1592
1593         if (be_multi_rxq(adapter)) {
1594                 u8 rsstable[MAX_RSS_QS];
1595
1596                 for_all_rss_queues(adapter, rxo, i)
1597                         rsstable[i] = rxo->rss_id;
1598
1599                 rc = be_cmd_rss_config(adapter, rsstable,
1600                         adapter->num_rx_qs - 1);
1601                 if (rc)
1602                         goto err;
1603         }
1604
1605         return 0;
1606 err:
1607         be_rx_queues_destroy(adapter);
1608         return -1;
1609 }
1610
1611 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1612 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1613 {
1614         return eq_id - adapter->base_eq_id;
1615 }
1616
1617 static irqreturn_t be_intx(int irq, void *dev)
1618 {
1619         struct be_adapter *adapter = dev;
1620         struct be_rx_obj *rxo;
1621         int isr, i;
1622
1623         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1624                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1625         if (!isr)
1626                 return IRQ_NONE;
1627
1628         if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1629                 event_handle(adapter, &adapter->tx_eq);
1630
1631         for_all_rx_queues(adapter, rxo, i) {
1632                 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1633                         event_handle(adapter, &rxo->rx_eq);
1634         }
1635
1636         return IRQ_HANDLED;
1637 }
1638
1639 static irqreturn_t be_msix_rx(int irq, void *dev)
1640 {
1641         struct be_rx_obj *rxo = dev;
1642         struct be_adapter *adapter = rxo->adapter;
1643
1644         event_handle(adapter, &rxo->rx_eq);
1645
1646         return IRQ_HANDLED;
1647 }
1648
1649 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1650 {
1651         struct be_adapter *adapter = dev;
1652
1653         event_handle(adapter, &adapter->tx_eq);
1654
1655         return IRQ_HANDLED;
1656 }
1657
1658 static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1659                         struct be_eth_rx_compl *rxcp)
1660 {
1661         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1662         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1663
1664         if (err)
1665                 rxo->stats.rxcp_err++;
1666
1667         return (tcp_frame && !err) ? true : false;
1668 }
1669
1670 int be_poll_rx(struct napi_struct *napi, int budget)
1671 {
1672         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1673         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1674         struct be_adapter *adapter = rxo->adapter;
1675         struct be_queue_info *rx_cq = &rxo->cq;
1676         struct be_eth_rx_compl *rxcp;
1677         u32 work_done;
1678
1679         rxo->stats.rx_polls++;
1680         for (work_done = 0; work_done < budget; work_done++) {
1681                 rxcp = be_rx_compl_get(rxo);
1682                 if (!rxcp)
1683                         break;
1684
1685                 if (do_gro(adapter, rxo, rxcp))
1686                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1687                 else
1688                         be_rx_compl_process(adapter, rxo, rxcp);
1689
1690                 be_rx_compl_reset(rxcp);
1691         }
1692
1693         /* Refill the queue */
1694         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1695                 be_post_rx_frags(rxo);
1696
1697         /* All consumed */
1698         if (work_done < budget) {
1699                 napi_complete(napi);
1700                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1701         } else {
1702                 /* More to be consumed; continue with interrupts disabled */
1703                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1704         }
1705         return work_done;
1706 }
1707
1708 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1709  * For TX/MCC we don't honour budget; consume everything
1710  */
1711 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1712 {
1713         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1714         struct be_adapter *adapter =
1715                 container_of(tx_eq, struct be_adapter, tx_eq);
1716         struct be_queue_info *txq = &adapter->tx_obj.q;
1717         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1718         struct be_eth_tx_compl *txcp;
1719         int tx_compl = 0, mcc_compl, status = 0;
1720         u16 end_idx;
1721
1722         while ((txcp = be_tx_compl_get(tx_cq))) {
1723                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1724                                 wrb_index, txcp);
1725                 be_tx_compl_process(adapter, end_idx);
1726                 tx_compl++;
1727         }
1728
1729         mcc_compl = be_process_mcc(adapter, &status);
1730
1731         napi_complete(napi);
1732
1733         if (mcc_compl) {
1734                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1735                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1736         }
1737
1738         if (tx_compl) {
1739                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1740
1741                 /* As Tx wrbs have been freed up, wake up netdev queue if
1742                  * it was stopped due to lack of tx wrbs.
1743                  */
1744                 if (netif_queue_stopped(adapter->netdev) &&
1745                         atomic_read(&txq->used) < txq->len / 2) {
1746                         netif_wake_queue(adapter->netdev);
1747                 }
1748
1749                 tx_stats(adapter)->be_tx_events++;
1750                 tx_stats(adapter)->be_tx_compl += tx_compl;
1751         }
1752
1753         return 1;
1754 }
1755
1756 void be_detect_dump_ue(struct be_adapter *adapter)
1757 {
1758         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1759         u32 i;
1760
1761         pci_read_config_dword(adapter->pdev,
1762                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1763         pci_read_config_dword(adapter->pdev,
1764                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1765         pci_read_config_dword(adapter->pdev,
1766                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1767         pci_read_config_dword(adapter->pdev,
1768                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1769
1770         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1771         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1772
1773         if (ue_status_lo || ue_status_hi) {
1774                 adapter->ue_detected = true;
1775                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1776         }
1777
1778         if (ue_status_lo) {
1779                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1780                         if (ue_status_lo & 1)
1781                                 dev_err(&adapter->pdev->dev,
1782                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1783                 }
1784         }
1785         if (ue_status_hi) {
1786                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1787                         if (ue_status_hi & 1)
1788                                 dev_err(&adapter->pdev->dev,
1789                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1790                 }
1791         }
1792
1793 }
1794
1795 static void be_worker(struct work_struct *work)
1796 {
1797         struct be_adapter *adapter =
1798                 container_of(work, struct be_adapter, work.work);
1799         struct be_rx_obj *rxo;
1800         int i;
1801
1802         if (!adapter->stats_ioctl_sent)
1803                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1804
1805         be_tx_rate_update(adapter);
1806
1807         for_all_rx_queues(adapter, rxo, i) {
1808                 be_rx_rate_update(rxo);
1809                 be_rx_eqd_update(adapter, rxo);
1810
1811                 if (rxo->rx_post_starved) {
1812                         rxo->rx_post_starved = false;
1813                         be_post_rx_frags(rxo);
1814                 }
1815         }
1816
1817         if (!adapter->ue_detected)
1818                 be_detect_dump_ue(adapter);
1819
1820         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1821 }
1822
1823 static void be_msix_disable(struct be_adapter *adapter)
1824 {
1825         if (adapter->msix_enabled) {
1826                 pci_disable_msix(adapter->pdev);
1827                 adapter->msix_enabled = false;
1828         }
1829 }
1830
1831 static int be_num_rxqs_get(struct be_adapter *adapter)
1832 {
1833         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1834                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1835                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1836         } else {
1837                 dev_warn(&adapter->pdev->dev,
1838                         "No support for multiple RX queues\n");
1839                 return 1;
1840         }
1841 }
1842
1843 static void be_msix_enable(struct be_adapter *adapter)
1844 {
1845 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1846         int i, status;
1847
1848         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1849
1850         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1851                 adapter->msix_entries[i].entry = i;
1852
1853         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1854                         adapter->num_rx_qs + 1);
1855         if (status == 0) {
1856                 goto done;
1857         } else if (status >= BE_MIN_MSIX_VECTORS) {
1858                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1859                                 status) == 0) {
1860                         adapter->num_rx_qs = status - 1;
1861                         dev_warn(&adapter->pdev->dev,
1862                                 "Could alloc only %d MSIx vectors. "
1863                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1864                         goto done;
1865                 }
1866         }
1867         return;
1868 done:
1869         adapter->msix_enabled = true;
1870 }
1871
1872 static void be_sriov_enable(struct be_adapter *adapter)
1873 {
1874         be_check_sriov_fn_type(adapter);
1875 #ifdef CONFIG_PCI_IOV
1876         if (be_physfn(adapter) && num_vfs) {
1877                 int status;
1878
1879                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1880                 adapter->sriov_enabled = status ? false : true;
1881         }
1882 #endif
1883 }
1884
1885 static void be_sriov_disable(struct be_adapter *adapter)
1886 {
1887 #ifdef CONFIG_PCI_IOV
1888         if (adapter->sriov_enabled) {
1889                 pci_disable_sriov(adapter->pdev);
1890                 adapter->sriov_enabled = false;
1891         }
1892 #endif
1893 }
1894
1895 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1896 {
1897         return adapter->msix_entries[
1898                         be_evt_bit_get(adapter, eq_id)].vector;
1899 }
1900
1901 static int be_request_irq(struct be_adapter *adapter,
1902                 struct be_eq_obj *eq_obj,
1903                 void *handler, char *desc, void *context)
1904 {
1905         struct net_device *netdev = adapter->netdev;
1906         int vec;
1907
1908         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1909         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1910         return request_irq(vec, handler, 0, eq_obj->desc, context);
1911 }
1912
1913 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1914                         void *context)
1915 {
1916         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1917         free_irq(vec, context);
1918 }
1919
1920 static int be_msix_register(struct be_adapter *adapter)
1921 {
1922         struct be_rx_obj *rxo;
1923         int status, i;
1924         char qname[10];
1925
1926         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1927                                 adapter);
1928         if (status)
1929                 goto err;
1930
1931         for_all_rx_queues(adapter, rxo, i) {
1932                 sprintf(qname, "rxq%d", i);
1933                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1934                                 qname, rxo);
1935                 if (status)
1936                         goto err_msix;
1937         }
1938
1939         return 0;
1940
1941 err_msix:
1942         be_free_irq(adapter, &adapter->tx_eq, adapter);
1943
1944         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1945                 be_free_irq(adapter, &rxo->rx_eq, rxo);
1946
1947 err:
1948         dev_warn(&adapter->pdev->dev,
1949                 "MSIX Request IRQ failed - err %d\n", status);
1950         pci_disable_msix(adapter->pdev);
1951         adapter->msix_enabled = false;
1952         return status;
1953 }
1954
1955 static int be_irq_register(struct be_adapter *adapter)
1956 {
1957         struct net_device *netdev = adapter->netdev;
1958         int status;
1959
1960         if (adapter->msix_enabled) {
1961                 status = be_msix_register(adapter);
1962                 if (status == 0)
1963                         goto done;
1964                 /* INTx is not supported for VF */
1965                 if (!be_physfn(adapter))
1966                         return status;
1967         }
1968
1969         /* INTx */
1970         netdev->irq = adapter->pdev->irq;
1971         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1972                         adapter);
1973         if (status) {
1974                 dev_err(&adapter->pdev->dev,
1975                         "INTx request IRQ failed - err %d\n", status);
1976                 return status;
1977         }
1978 done:
1979         adapter->isr_registered = true;
1980         return 0;
1981 }
1982
1983 static void be_irq_unregister(struct be_adapter *adapter)
1984 {
1985         struct net_device *netdev = adapter->netdev;
1986         struct be_rx_obj *rxo;
1987         int i;
1988
1989         if (!adapter->isr_registered)
1990                 return;
1991
1992         /* INTx */
1993         if (!adapter->msix_enabled) {
1994                 free_irq(netdev->irq, adapter);
1995                 goto done;
1996         }
1997
1998         /* MSIx */
1999         be_free_irq(adapter, &adapter->tx_eq, adapter);
2000
2001         for_all_rx_queues(adapter, rxo, i)
2002                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2003
2004 done:
2005         adapter->isr_registered = false;
2006 }
2007
2008 static int be_close(struct net_device *netdev)
2009 {
2010         struct be_adapter *adapter = netdev_priv(netdev);
2011         struct be_rx_obj *rxo;
2012         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2013         int vec, i;
2014
2015         cancel_delayed_work_sync(&adapter->work);
2016
2017         be_async_mcc_disable(adapter);
2018
2019         netif_stop_queue(netdev);
2020         netif_carrier_off(netdev);
2021         adapter->link_up = false;
2022
2023         be_intr_set(adapter, false);
2024
2025         if (adapter->msix_enabled) {
2026                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2027                 synchronize_irq(vec);
2028
2029                 for_all_rx_queues(adapter, rxo, i) {
2030                         vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2031                         synchronize_irq(vec);
2032                 }
2033         } else {
2034                 synchronize_irq(netdev->irq);
2035         }
2036         be_irq_unregister(adapter);
2037
2038         for_all_rx_queues(adapter, rxo, i)
2039                 napi_disable(&rxo->rx_eq.napi);
2040
2041         napi_disable(&tx_eq->napi);
2042
2043         /* Wait for all pending tx completions to arrive so that
2044          * all tx skbs are freed.
2045          */
2046         be_tx_compl_clean(adapter);
2047
2048         return 0;
2049 }
2050
2051 static int be_open(struct net_device *netdev)
2052 {
2053         struct be_adapter *adapter = netdev_priv(netdev);
2054         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2055         struct be_rx_obj *rxo;
2056         bool link_up;
2057         int status, i;
2058         u8 mac_speed;
2059         u16 link_speed;
2060
2061         for_all_rx_queues(adapter, rxo, i) {
2062                 be_post_rx_frags(rxo);
2063                 napi_enable(&rxo->rx_eq.napi);
2064         }
2065         napi_enable(&tx_eq->napi);
2066
2067         be_irq_register(adapter);
2068
2069         be_intr_set(adapter, true);
2070
2071         /* The evt queues are created in unarmed state; arm them */
2072         for_all_rx_queues(adapter, rxo, i) {
2073                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2074                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2075         }
2076         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2077
2078         /* Now that interrupts are on we can process async mcc */
2079         be_async_mcc_enable(adapter);
2080
2081         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2082
2083         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2084                         &link_speed);
2085         if (status)
2086                 goto err;
2087         be_link_status_update(adapter, link_up);
2088
2089         if (be_physfn(adapter)) {
2090                 status = be_vid_config(adapter, false, 0);
2091                 if (status)
2092                         goto err;
2093
2094                 status = be_cmd_set_flow_control(adapter,
2095                                 adapter->tx_fc, adapter->rx_fc);
2096                 if (status)
2097                         goto err;
2098         }
2099
2100         return 0;
2101 err:
2102         be_close(adapter->netdev);
2103         return -EIO;
2104 }
2105
2106 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2107 {
2108         struct be_dma_mem cmd;
2109         int status = 0;
2110         u8 mac[ETH_ALEN];
2111
2112         memset(mac, 0, ETH_ALEN);
2113
2114         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2115         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2116         if (cmd.va == NULL)
2117                 return -1;
2118         memset(cmd.va, 0, cmd.size);
2119
2120         if (enable) {
2121                 status = pci_write_config_dword(adapter->pdev,
2122                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2123                 if (status) {
2124                         dev_err(&adapter->pdev->dev,
2125                                 "Could not enable Wake-on-lan\n");
2126                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2127                                         cmd.dma);
2128                         return status;
2129                 }
2130                 status = be_cmd_enable_magic_wol(adapter,
2131                                 adapter->netdev->dev_addr, &cmd);
2132                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2133                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2134         } else {
2135                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2136                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2137                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2138         }
2139
2140         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2141         return status;
2142 }
2143
2144 /*
2145  * Generate a seed MAC address from the PF MAC Address using jhash.
2146  * MAC Address for VFs are assigned incrementally starting from the seed.
2147  * These addresses are programmed in the ASIC by the PF and the VF driver
2148  * queries for the MAC address during its probe.
2149  */
2150 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2151 {
2152         u32 vf = 0;
2153         int status = 0;
2154         u8 mac[ETH_ALEN];
2155
2156         be_vf_eth_addr_generate(adapter, mac);
2157
2158         for (vf = 0; vf < num_vfs; vf++) {
2159                 status = be_cmd_pmac_add(adapter, mac,
2160                                         adapter->vf_cfg[vf].vf_if_handle,
2161                                         &adapter->vf_cfg[vf].vf_pmac_id);
2162                 if (status)
2163                         dev_err(&adapter->pdev->dev,
2164                                 "Mac address add failed for VF %d\n", vf);
2165                 else
2166                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2167
2168                 mac[5] += 1;
2169         }
2170         return status;
2171 }
2172
2173 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2174 {
2175         u32 vf;
2176
2177         for (vf = 0; vf < num_vfs; vf++) {
2178                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2179                         be_cmd_pmac_del(adapter,
2180                                         adapter->vf_cfg[vf].vf_if_handle,
2181                                         adapter->vf_cfg[vf].vf_pmac_id);
2182         }
2183 }
2184
2185 static int be_setup(struct be_adapter *adapter)
2186 {
2187         struct net_device *netdev = adapter->netdev;
2188         u32 cap_flags, en_flags, vf = 0;
2189         int status;
2190         u8 mac[ETH_ALEN];
2191
2192         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2193
2194         if (be_physfn(adapter)) {
2195                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2196                                 BE_IF_FLAGS_PROMISCUOUS |
2197                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2198                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2199
2200                 if (be_multi_rxq(adapter)) {
2201                         cap_flags |= BE_IF_FLAGS_RSS;
2202                         en_flags |= BE_IF_FLAGS_RSS;
2203                 }
2204         }
2205
2206         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2207                         netdev->dev_addr, false/* pmac_invalid */,
2208                         &adapter->if_handle, &adapter->pmac_id, 0);
2209         if (status != 0)
2210                 goto do_none;
2211
2212         if (be_physfn(adapter)) {
2213                 while (vf < num_vfs) {
2214                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2215                                         | BE_IF_FLAGS_BROADCAST;
2216                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2217                                         mac, true,
2218                                         &adapter->vf_cfg[vf].vf_if_handle,
2219                                         NULL, vf+1);
2220                         if (status) {
2221                                 dev_err(&adapter->pdev->dev,
2222                                 "Interface Create failed for VF %d\n", vf);
2223                                 goto if_destroy;
2224                         }
2225                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2226                         vf++;
2227                 }
2228         } else if (!be_physfn(adapter)) {
2229                 status = be_cmd_mac_addr_query(adapter, mac,
2230                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2231                 if (!status) {
2232                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2233                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2234                 }
2235         }
2236
2237         status = be_tx_queues_create(adapter);
2238         if (status != 0)
2239                 goto if_destroy;
2240
2241         status = be_rx_queues_create(adapter);
2242         if (status != 0)
2243                 goto tx_qs_destroy;
2244
2245         status = be_mcc_queues_create(adapter);
2246         if (status != 0)
2247                 goto rx_qs_destroy;
2248
2249         if (be_physfn(adapter)) {
2250                 status = be_vf_eth_addr_config(adapter);
2251                 if (status)
2252                         goto mcc_q_destroy;
2253         }
2254
2255         adapter->link_speed = -1;
2256
2257         return 0;
2258
2259 mcc_q_destroy:
2260         if (be_physfn(adapter))
2261                 be_vf_eth_addr_rem(adapter);
2262         be_mcc_queues_destroy(adapter);
2263 rx_qs_destroy:
2264         be_rx_queues_destroy(adapter);
2265 tx_qs_destroy:
2266         be_tx_queues_destroy(adapter);
2267 if_destroy:
2268         for (vf = 0; vf < num_vfs; vf++)
2269                 if (adapter->vf_cfg[vf].vf_if_handle)
2270                         be_cmd_if_destroy(adapter,
2271                                         adapter->vf_cfg[vf].vf_if_handle);
2272         be_cmd_if_destroy(adapter, adapter->if_handle);
2273 do_none:
2274         return status;
2275 }
2276
2277 static int be_clear(struct be_adapter *adapter)
2278 {
2279         if (be_physfn(adapter))
2280                 be_vf_eth_addr_rem(adapter);
2281
2282         be_mcc_queues_destroy(adapter);
2283         be_rx_queues_destroy(adapter);
2284         be_tx_queues_destroy(adapter);
2285
2286         be_cmd_if_destroy(adapter, adapter->if_handle);
2287
2288         /* tell fw we're done with firing cmds */
2289         be_cmd_fw_clean(adapter);
2290         return 0;
2291 }
2292
2293
2294 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2295 char flash_cookie[2][16] =      {"*** SE FLAS",
2296                                 "H DIRECTORY *** "};
2297
2298 static bool be_flash_redboot(struct be_adapter *adapter,
2299                         const u8 *p, u32 img_start, int image_size,
2300                         int hdr_size)
2301 {
2302         u32 crc_offset;
2303         u8 flashed_crc[4];
2304         int status;
2305
2306         crc_offset = hdr_size + img_start + image_size - 4;
2307
2308         p += crc_offset;
2309
2310         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2311                         (image_size - 4));
2312         if (status) {
2313                 dev_err(&adapter->pdev->dev,
2314                 "could not get crc from flash, not flashing redboot\n");
2315                 return false;
2316         }
2317
2318         /*update redboot only if crc does not match*/
2319         if (!memcmp(flashed_crc, p, 4))
2320                 return false;
2321         else
2322                 return true;
2323 }
2324
2325 static int be_flash_data(struct be_adapter *adapter,
2326                         const struct firmware *fw,
2327                         struct be_dma_mem *flash_cmd, int num_of_images)
2328
2329 {
2330         int status = 0, i, filehdr_size = 0;
2331         u32 total_bytes = 0, flash_op;
2332         int num_bytes;
2333         const u8 *p = fw->data;
2334         struct be_cmd_write_flashrom *req = flash_cmd->va;
2335         struct flash_comp *pflashcomp;
2336         int num_comp;
2337
2338         struct flash_comp gen3_flash_types[9] = {
2339                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2340                         FLASH_IMAGE_MAX_SIZE_g3},
2341                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2342                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2343                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2344                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2345                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2346                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2347                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2348                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2349                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2350                         FLASH_IMAGE_MAX_SIZE_g3},
2351                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2352                         FLASH_IMAGE_MAX_SIZE_g3},
2353                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2354                         FLASH_IMAGE_MAX_SIZE_g3},
2355                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2356                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2357         };
2358         struct flash_comp gen2_flash_types[8] = {
2359                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2360                         FLASH_IMAGE_MAX_SIZE_g2},
2361                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2362                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2363                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2364                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2365                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2366                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2367                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2368                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2369                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2370                         FLASH_IMAGE_MAX_SIZE_g2},
2371                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2372                         FLASH_IMAGE_MAX_SIZE_g2},
2373                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2374                          FLASH_IMAGE_MAX_SIZE_g2}
2375         };
2376
2377         if (adapter->generation == BE_GEN3) {
2378                 pflashcomp = gen3_flash_types;
2379                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2380                 num_comp = 9;
2381         } else {
2382                 pflashcomp = gen2_flash_types;
2383                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2384                 num_comp = 8;
2385         }
2386         for (i = 0; i < num_comp; i++) {
2387                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2388                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2389                         continue;
2390                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2391                         (!be_flash_redboot(adapter, fw->data,
2392                          pflashcomp[i].offset, pflashcomp[i].size,
2393                          filehdr_size)))
2394                         continue;
2395                 p = fw->data;
2396                 p += filehdr_size + pflashcomp[i].offset
2397                         + (num_of_images * sizeof(struct image_hdr));
2398         if (p + pflashcomp[i].size > fw->data + fw->size)
2399                 return -1;
2400         total_bytes = pflashcomp[i].size;
2401                 while (total_bytes) {
2402                         if (total_bytes > 32*1024)
2403                                 num_bytes = 32*1024;
2404                         else
2405                                 num_bytes = total_bytes;
2406                         total_bytes -= num_bytes;
2407
2408                         if (!total_bytes)
2409                                 flash_op = FLASHROM_OPER_FLASH;
2410                         else
2411                                 flash_op = FLASHROM_OPER_SAVE;
2412                         memcpy(req->params.data_buf, p, num_bytes);
2413                         p += num_bytes;
2414                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2415                                 pflashcomp[i].optype, flash_op, num_bytes);
2416                         if (status) {
2417                                 dev_err(&adapter->pdev->dev,
2418                                         "cmd to write to flash rom failed.\n");
2419                                 return -1;
2420                         }
2421                         yield();
2422                 }
2423         }
2424         return 0;
2425 }
2426
2427 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2428 {
2429         if (fhdr == NULL)
2430                 return 0;
2431         if (fhdr->build[0] == '3')
2432                 return BE_GEN3;
2433         else if (fhdr->build[0] == '2')
2434                 return BE_GEN2;
2435         else
2436                 return 0;
2437 }
2438
2439 int be_load_fw(struct be_adapter *adapter, u8 *func)
2440 {
2441         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2442         const struct firmware *fw;
2443         struct flash_file_hdr_g2 *fhdr;
2444         struct flash_file_hdr_g3 *fhdr3;
2445         struct image_hdr *img_hdr_ptr = NULL;
2446         struct be_dma_mem flash_cmd;
2447         int status, i = 0, num_imgs = 0;
2448         const u8 *p;
2449
2450         strcpy(fw_file, func);
2451
2452         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2453         if (status)
2454                 goto fw_exit;
2455
2456         p = fw->data;
2457         fhdr = (struct flash_file_hdr_g2 *) p;
2458         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2459
2460         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2461         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2462                                         &flash_cmd.dma);
2463         if (!flash_cmd.va) {
2464                 status = -ENOMEM;
2465                 dev_err(&adapter->pdev->dev,
2466                         "Memory allocation failure while flashing\n");
2467                 goto fw_exit;
2468         }
2469
2470         if ((adapter->generation == BE_GEN3) &&
2471                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2472                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2473                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2474                 for (i = 0; i < num_imgs; i++) {
2475                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2476                                         (sizeof(struct flash_file_hdr_g3) +
2477                                          i * sizeof(struct image_hdr)));
2478                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2479                                 status = be_flash_data(adapter, fw, &flash_cmd,
2480                                                         num_imgs);
2481                 }
2482         } else if ((adapter->generation == BE_GEN2) &&
2483                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2484                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2485         } else {
2486                 dev_err(&adapter->pdev->dev,
2487                         "UFI and Interface are not compatible for flashing\n");
2488                 status = -1;
2489         }
2490
2491         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2492                                 flash_cmd.dma);
2493         if (status) {
2494                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2495                 goto fw_exit;
2496         }
2497
2498         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2499
2500 fw_exit:
2501         release_firmware(fw);
2502         return status;
2503 }
2504
2505 static struct net_device_ops be_netdev_ops = {
2506         .ndo_open               = be_open,
2507         .ndo_stop               = be_close,
2508         .ndo_start_xmit         = be_xmit,
2509         .ndo_set_rx_mode        = be_set_multicast_list,
2510         .ndo_set_mac_address    = be_mac_addr_set,
2511         .ndo_change_mtu         = be_change_mtu,
2512         .ndo_validate_addr      = eth_validate_addr,
2513         .ndo_vlan_rx_register   = be_vlan_register,
2514         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2515         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2516         .ndo_set_vf_mac         = be_set_vf_mac,
2517         .ndo_set_vf_vlan        = be_set_vf_vlan,
2518         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2519         .ndo_get_vf_config      = be_get_vf_config
2520 };
2521
2522 static void be_netdev_init(struct net_device *netdev)
2523 {
2524         struct be_adapter *adapter = netdev_priv(netdev);
2525         struct be_rx_obj *rxo;
2526         int i;
2527
2528         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2529                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2530                 NETIF_F_GRO | NETIF_F_TSO6;
2531
2532         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2533
2534         netdev->flags |= IFF_MULTICAST;
2535
2536         adapter->rx_csum = true;
2537
2538         /* Default settings for Rx and Tx flow control */
2539         adapter->rx_fc = true;
2540         adapter->tx_fc = true;
2541
2542         netif_set_gso_max_size(netdev, 65535);
2543
2544         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2545
2546         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2547
2548         for_all_rx_queues(adapter, rxo, i)
2549                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2550                                 BE_NAPI_WEIGHT);
2551
2552         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2553                 BE_NAPI_WEIGHT);
2554
2555         netif_carrier_off(netdev);
2556         netif_stop_queue(netdev);
2557 }
2558
2559 static void be_unmap_pci_bars(struct be_adapter *adapter)
2560 {
2561         if (adapter->csr)
2562                 iounmap(adapter->csr);
2563         if (adapter->db)
2564                 iounmap(adapter->db);
2565         if (adapter->pcicfg && be_physfn(adapter))
2566                 iounmap(adapter->pcicfg);
2567 }
2568
2569 static int be_map_pci_bars(struct be_adapter *adapter)
2570 {
2571         u8 __iomem *addr;
2572         int pcicfg_reg, db_reg;
2573
2574         if (be_physfn(adapter)) {
2575                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2576                                 pci_resource_len(adapter->pdev, 2));
2577                 if (addr == NULL)
2578                         return -ENOMEM;
2579                 adapter->csr = addr;
2580         }
2581
2582         if (adapter->generation == BE_GEN2) {
2583                 pcicfg_reg = 1;
2584                 db_reg = 4;
2585         } else {
2586                 pcicfg_reg = 0;
2587                 if (be_physfn(adapter))
2588                         db_reg = 4;
2589                 else
2590                         db_reg = 0;
2591         }
2592         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2593                                 pci_resource_len(adapter->pdev, db_reg));
2594         if (addr == NULL)
2595                 goto pci_map_err;
2596         adapter->db = addr;
2597
2598         if (be_physfn(adapter)) {
2599                 addr = ioremap_nocache(
2600                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2601                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2602                 if (addr == NULL)
2603                         goto pci_map_err;
2604                 adapter->pcicfg = addr;
2605         } else
2606                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2607
2608         return 0;
2609 pci_map_err:
2610         be_unmap_pci_bars(adapter);
2611         return -ENOMEM;
2612 }
2613
2614
2615 static void be_ctrl_cleanup(struct be_adapter *adapter)
2616 {
2617         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2618
2619         be_unmap_pci_bars(adapter);
2620
2621         if (mem->va)
2622                 pci_free_consistent(adapter->pdev, mem->size,
2623                         mem->va, mem->dma);
2624
2625         mem = &adapter->mc_cmd_mem;
2626         if (mem->va)
2627                 pci_free_consistent(adapter->pdev, mem->size,
2628                         mem->va, mem->dma);
2629 }
2630
2631 static int be_ctrl_init(struct be_adapter *adapter)
2632 {
2633         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2634         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2635         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2636         int status;
2637
2638         status = be_map_pci_bars(adapter);
2639         if (status)
2640                 goto done;
2641
2642         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2643         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2644                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2645         if (!mbox_mem_alloc->va) {
2646                 status = -ENOMEM;
2647                 goto unmap_pci_bars;
2648         }
2649
2650         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2651         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2652         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2653         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2654
2655         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2656         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2657                         &mc_cmd_mem->dma);
2658         if (mc_cmd_mem->va == NULL) {
2659                 status = -ENOMEM;
2660                 goto free_mbox;
2661         }
2662         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2663
2664         spin_lock_init(&adapter->mbox_lock);
2665         spin_lock_init(&adapter->mcc_lock);
2666         spin_lock_init(&adapter->mcc_cq_lock);
2667
2668         init_completion(&adapter->flash_compl);
2669         pci_save_state(adapter->pdev);
2670         return 0;
2671
2672 free_mbox:
2673         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2674                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2675
2676 unmap_pci_bars:
2677         be_unmap_pci_bars(adapter);
2678
2679 done:
2680         return status;
2681 }
2682
2683 static void be_stats_cleanup(struct be_adapter *adapter)
2684 {
2685         struct be_dma_mem *cmd = &adapter->stats_cmd;
2686
2687         if (cmd->va)
2688                 pci_free_consistent(adapter->pdev, cmd->size,
2689                         cmd->va, cmd->dma);
2690 }
2691
2692 static int be_stats_init(struct be_adapter *adapter)
2693 {
2694         struct be_dma_mem *cmd = &adapter->stats_cmd;
2695
2696         cmd->size = sizeof(struct be_cmd_req_get_stats);
2697         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2698         if (cmd->va == NULL)
2699                 return -1;
2700         memset(cmd->va, 0, cmd->size);
2701         return 0;
2702 }
2703
2704 static void __devexit be_remove(struct pci_dev *pdev)
2705 {
2706         struct be_adapter *adapter = pci_get_drvdata(pdev);
2707
2708         if (!adapter)
2709                 return;
2710
2711         unregister_netdev(adapter->netdev);
2712
2713         be_clear(adapter);
2714
2715         be_stats_cleanup(adapter);
2716
2717         be_ctrl_cleanup(adapter);
2718
2719         be_sriov_disable(adapter);
2720
2721         be_msix_disable(adapter);
2722
2723         pci_set_drvdata(pdev, NULL);
2724         pci_release_regions(pdev);
2725         pci_disable_device(pdev);
2726
2727         free_netdev(adapter->netdev);
2728 }
2729
2730 static int be_get_config(struct be_adapter *adapter)
2731 {
2732         int status;
2733         u8 mac[ETH_ALEN];
2734
2735         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2736         if (status)
2737                 return status;
2738
2739         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2740                         &adapter->function_mode, &adapter->function_caps);
2741         if (status)
2742                 return status;
2743
2744         memset(mac, 0, ETH_ALEN);
2745
2746         if (be_physfn(adapter)) {
2747                 status = be_cmd_mac_addr_query(adapter, mac,
2748                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2749
2750                 if (status)
2751                         return status;
2752
2753                 if (!is_valid_ether_addr(mac))
2754                         return -EADDRNOTAVAIL;
2755
2756                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2757                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2758         }
2759
2760         if (adapter->function_mode & 0x400)
2761                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2762         else
2763                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2764
2765         return 0;
2766 }
2767
2768 static int __devinit be_probe(struct pci_dev *pdev,
2769                         const struct pci_device_id *pdev_id)
2770 {
2771         int status = 0;
2772         struct be_adapter *adapter;
2773         struct net_device *netdev;
2774
2775         status = pci_enable_device(pdev);
2776         if (status)
2777                 goto do_none;
2778
2779         status = pci_request_regions(pdev, DRV_NAME);
2780         if (status)
2781                 goto disable_dev;
2782         pci_set_master(pdev);
2783
2784         netdev = alloc_etherdev(sizeof(struct be_adapter));
2785         if (netdev == NULL) {
2786                 status = -ENOMEM;
2787                 goto rel_reg;
2788         }
2789         adapter = netdev_priv(netdev);
2790
2791         switch (pdev->device) {
2792         case BE_DEVICE_ID1:
2793         case OC_DEVICE_ID1:
2794                 adapter->generation = BE_GEN2;
2795                 break;
2796         case BE_DEVICE_ID2:
2797         case OC_DEVICE_ID2:
2798                 adapter->generation = BE_GEN3;
2799                 break;
2800         default:
2801                 adapter->generation = 0;
2802         }
2803
2804         adapter->pdev = pdev;
2805         pci_set_drvdata(pdev, adapter);
2806         adapter->netdev = netdev;
2807         SET_NETDEV_DEV(netdev, &pdev->dev);
2808
2809         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2810         if (!status) {
2811                 netdev->features |= NETIF_F_HIGHDMA;
2812         } else {
2813                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2814                 if (status) {
2815                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2816                         goto free_netdev;
2817                 }
2818         }
2819
2820         be_sriov_enable(adapter);
2821
2822         status = be_ctrl_init(adapter);
2823         if (status)
2824                 goto free_netdev;
2825
2826         /* sync up with fw's ready state */
2827         if (be_physfn(adapter)) {
2828                 status = be_cmd_POST(adapter);
2829                 if (status)
2830                         goto ctrl_clean;
2831         }
2832
2833         /* tell fw we're ready to fire cmds */
2834         status = be_cmd_fw_init(adapter);
2835         if (status)
2836                 goto ctrl_clean;
2837
2838         if (be_physfn(adapter)) {
2839                 status = be_cmd_reset_function(adapter);
2840                 if (status)
2841                         goto ctrl_clean;
2842         }
2843
2844         status = be_stats_init(adapter);
2845         if (status)
2846                 goto ctrl_clean;
2847
2848         status = be_get_config(adapter);
2849         if (status)
2850                 goto stats_clean;
2851
2852         be_msix_enable(adapter);
2853
2854         INIT_DELAYED_WORK(&adapter->work, be_worker);
2855
2856         status = be_setup(adapter);
2857         if (status)
2858                 goto msix_disable;
2859
2860         be_netdev_init(netdev);
2861         status = register_netdev(netdev);
2862         if (status != 0)
2863                 goto unsetup;
2864
2865         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2866         return 0;
2867
2868 unsetup:
2869         be_clear(adapter);
2870 msix_disable:
2871         be_msix_disable(adapter);
2872 stats_clean:
2873         be_stats_cleanup(adapter);
2874 ctrl_clean:
2875         be_ctrl_cleanup(adapter);
2876 free_netdev:
2877         be_sriov_disable(adapter);
2878         free_netdev(adapter->netdev);
2879         pci_set_drvdata(pdev, NULL);
2880 rel_reg:
2881         pci_release_regions(pdev);
2882 disable_dev:
2883         pci_disable_device(pdev);
2884 do_none:
2885         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2886         return status;
2887 }
2888
2889 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2890 {
2891         struct be_adapter *adapter = pci_get_drvdata(pdev);
2892         struct net_device *netdev =  adapter->netdev;
2893
2894         if (adapter->wol)
2895                 be_setup_wol(adapter, true);
2896
2897         netif_device_detach(netdev);
2898         if (netif_running(netdev)) {
2899                 rtnl_lock();
2900                 be_close(netdev);
2901                 rtnl_unlock();
2902         }
2903         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2904         be_clear(adapter);
2905
2906         pci_save_state(pdev);
2907         pci_disable_device(pdev);
2908         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2909         return 0;
2910 }
2911
2912 static int be_resume(struct pci_dev *pdev)
2913 {
2914         int status = 0;
2915         struct be_adapter *adapter = pci_get_drvdata(pdev);
2916         struct net_device *netdev =  adapter->netdev;
2917
2918         netif_device_detach(netdev);
2919
2920         status = pci_enable_device(pdev);
2921         if (status)
2922                 return status;
2923
2924         pci_set_power_state(pdev, 0);
2925         pci_restore_state(pdev);
2926
2927         /* tell fw we're ready to fire cmds */
2928         status = be_cmd_fw_init(adapter);
2929         if (status)
2930                 return status;
2931
2932         be_setup(adapter);
2933         if (netif_running(netdev)) {
2934                 rtnl_lock();
2935                 be_open(netdev);
2936                 rtnl_unlock();
2937         }
2938         netif_device_attach(netdev);
2939
2940         if (adapter->wol)
2941                 be_setup_wol(adapter, false);
2942         return 0;
2943 }
2944
2945 /*
2946  * An FLR will stop BE from DMAing any data.
2947  */
2948 static void be_shutdown(struct pci_dev *pdev)
2949 {
2950         struct be_adapter *adapter = pci_get_drvdata(pdev);
2951         struct net_device *netdev =  adapter->netdev;
2952
2953         netif_device_detach(netdev);
2954
2955         be_cmd_reset_function(adapter);
2956
2957         if (adapter->wol)
2958                 be_setup_wol(adapter, true);
2959
2960         pci_disable_device(pdev);
2961 }
2962
2963 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2964                                 pci_channel_state_t state)
2965 {
2966         struct be_adapter *adapter = pci_get_drvdata(pdev);
2967         struct net_device *netdev =  adapter->netdev;
2968
2969         dev_err(&adapter->pdev->dev, "EEH error detected\n");
2970
2971         adapter->eeh_err = true;
2972
2973         netif_device_detach(netdev);
2974
2975         if (netif_running(netdev)) {
2976                 rtnl_lock();
2977                 be_close(netdev);
2978                 rtnl_unlock();
2979         }
2980         be_clear(adapter);
2981
2982         if (state == pci_channel_io_perm_failure)
2983                 return PCI_ERS_RESULT_DISCONNECT;
2984
2985         pci_disable_device(pdev);
2986
2987         return PCI_ERS_RESULT_NEED_RESET;
2988 }
2989
2990 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2991 {
2992         struct be_adapter *adapter = pci_get_drvdata(pdev);
2993         int status;
2994
2995         dev_info(&adapter->pdev->dev, "EEH reset\n");
2996         adapter->eeh_err = false;
2997
2998         status = pci_enable_device(pdev);
2999         if (status)
3000                 return PCI_ERS_RESULT_DISCONNECT;
3001
3002         pci_set_master(pdev);
3003         pci_set_power_state(pdev, 0);
3004         pci_restore_state(pdev);
3005
3006         /* Check if card is ok and fw is ready */
3007         status = be_cmd_POST(adapter);
3008         if (status)
3009                 return PCI_ERS_RESULT_DISCONNECT;
3010
3011         return PCI_ERS_RESULT_RECOVERED;
3012 }
3013
3014 static void be_eeh_resume(struct pci_dev *pdev)
3015 {
3016         int status = 0;
3017         struct be_adapter *adapter = pci_get_drvdata(pdev);
3018         struct net_device *netdev =  adapter->netdev;
3019
3020         dev_info(&adapter->pdev->dev, "EEH resume\n");
3021
3022         pci_save_state(pdev);
3023
3024         /* tell fw we're ready to fire cmds */
3025         status = be_cmd_fw_init(adapter);
3026         if (status)
3027                 goto err;
3028
3029         status = be_setup(adapter);
3030         if (status)
3031                 goto err;
3032
3033         if (netif_running(netdev)) {
3034                 status = be_open(netdev);
3035                 if (status)
3036                         goto err;
3037         }
3038         netif_device_attach(netdev);
3039         return;
3040 err:
3041         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3042 }
3043
3044 static struct pci_error_handlers be_eeh_handlers = {
3045         .error_detected = be_eeh_err_detected,
3046         .slot_reset = be_eeh_reset,
3047         .resume = be_eeh_resume,
3048 };
3049
3050 static struct pci_driver be_driver = {
3051         .name = DRV_NAME,
3052         .id_table = be_dev_ids,
3053         .probe = be_probe,
3054         .remove = be_remove,
3055         .suspend = be_suspend,
3056         .resume = be_resume,
3057         .shutdown = be_shutdown,
3058         .err_handler = &be_eeh_handlers
3059 };
3060
3061 static int __init be_init_module(void)
3062 {
3063         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3064             rx_frag_size != 2048) {
3065                 printk(KERN_WARNING DRV_NAME
3066                         " : Module param rx_frag_size must be 2048/4096/8192."
3067                         " Using 2048\n");
3068                 rx_frag_size = 2048;
3069         }
3070
3071         if (num_vfs > 32) {
3072                 printk(KERN_WARNING DRV_NAME
3073                         " : Module param num_vfs must not be greater than 32."
3074                         "Using 32\n");
3075                 num_vfs = 32;
3076         }
3077
3078         return pci_register_driver(&be_driver);
3079 }
3080 module_init(be_init_module);
3081
3082 static void __exit be_exit_module(void)
3083 {
3084         pci_unregister_driver(&be_driver);
3085 }
3086 module_exit(be_exit_module);