Merge branch 'message-callback' into kbuild/kconfig
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
36         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
38         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
40         { 0 }
41 };
42 MODULE_DEVICE_TABLE(pci, be_dev_ids);
43 /* UE Status Low CSR */
44 static char *ue_status_low_desc[] = {
45         "CEV",
46         "CTX",
47         "DBUF",
48         "ERX",
49         "Host",
50         "MPU",
51         "NDMA",
52         "PTC ",
53         "RDMA ",
54         "RXF ",
55         "RXIPS ",
56         "RXULP0 ",
57         "RXULP1 ",
58         "RXULP2 ",
59         "TIM ",
60         "TPOST ",
61         "TPRE ",
62         "TXIPS ",
63         "TXULP0 ",
64         "TXULP1 ",
65         "UC ",
66         "WDMA ",
67         "TXULP2 ",
68         "HOST1 ",
69         "P0_OB_LINK ",
70         "P1_OB_LINK ",
71         "HOST_GPIO ",
72         "MBOX ",
73         "AXGMAC0",
74         "AXGMAC1",
75         "JTAG",
76         "MPU_INTPEND"
77 };
78 /* UE Status High CSR */
79 static char *ue_status_hi_desc[] = {
80         "LPCMEMHOST",
81         "MGMT_MAC",
82         "PCS0ONLINE",
83         "MPU_IRAM",
84         "PCS1ONLINE",
85         "PCTL0",
86         "PCTL1",
87         "PMEM",
88         "RR",
89         "TXPB",
90         "RXPP",
91         "XAUI",
92         "TXP",
93         "ARM",
94         "IPC",
95         "HOST2",
96         "HOST3",
97         "HOST4",
98         "HOST5",
99         "HOST6",
100         "HOST7",
101         "HOST8",
102         "HOST9",
103         "NETC"
104         "Unknown",
105         "Unknown",
106         "Unknown",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown"
112 };
113
114 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115 {
116         struct be_dma_mem *mem = &q->dma_mem;
117         if (mem->va)
118                 pci_free_consistent(adapter->pdev, mem->size,
119                         mem->va, mem->dma);
120 }
121
122 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
123                 u16 len, u16 entry_size)
124 {
125         struct be_dma_mem *mem = &q->dma_mem;
126
127         memset(q, 0, sizeof(*q));
128         q->len = len;
129         q->entry_size = entry_size;
130         mem->size = len * entry_size;
131         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
132         if (!mem->va)
133                 return -1;
134         memset(mem->va, 0, mem->size);
135         return 0;
136 }
137
138 static void be_intr_set(struct be_adapter *adapter, bool enable)
139 {
140         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
141         u32 reg = ioread32(addr);
142         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
143
144         if (adapter->eeh_err)
145                 return;
146
147         if (!enabled && enable)
148                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149         else if (enabled && !enable)
150                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151         else
152                 return;
153
154         iowrite32(reg, addr);
155 }
156
157 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
158 {
159         u32 val = 0;
160         val |= qid & DB_RQ_RING_ID_MASK;
161         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
162
163         wmb();
164         iowrite32(val, adapter->db + DB_RQ_OFFSET);
165 }
166
167 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
168 {
169         u32 val = 0;
170         val |= qid & DB_TXULP_RING_ID_MASK;
171         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
172
173         wmb();
174         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
175 }
176
177 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
178                 bool arm, bool clear_int, u16 num_popped)
179 {
180         u32 val = 0;
181         val |= qid & DB_EQ_RING_ID_MASK;
182
183         if (adapter->eeh_err)
184                 return;
185
186         if (arm)
187                 val |= 1 << DB_EQ_REARM_SHIFT;
188         if (clear_int)
189                 val |= 1 << DB_EQ_CLR_SHIFT;
190         val |= 1 << DB_EQ_EVNT_SHIFT;
191         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
192         iowrite32(val, adapter->db + DB_EQ_OFFSET);
193 }
194
195 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
196 {
197         u32 val = 0;
198         val |= qid & DB_CQ_RING_ID_MASK;
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_CQ_REARM_SHIFT;
205         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
206         iowrite32(val, adapter->db + DB_CQ_OFFSET);
207 }
208
209 static int be_mac_addr_set(struct net_device *netdev, void *p)
210 {
211         struct be_adapter *adapter = netdev_priv(netdev);
212         struct sockaddr *addr = p;
213         int status = 0;
214
215         if (!is_valid_ether_addr(addr->sa_data))
216                 return -EADDRNOTAVAIL;
217
218         /* MAC addr configuration will be done in hardware for VFs
219          * by their corresponding PFs. Just copy to netdev addr here
220          */
221         if (!be_physfn(adapter))
222                 goto netdev_addr;
223
224         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
225         if (status)
226                 return status;
227
228         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
229                         adapter->if_handle, &adapter->pmac_id);
230 netdev_addr:
231         if (!status)
232                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
233
234         return status;
235 }
236
237 void netdev_stats_update(struct be_adapter *adapter)
238 {
239         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
240         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241         struct be_port_rxf_stats *port_stats =
242                         &rxf_stats->port[adapter->port_num];
243         struct net_device_stats *dev_stats = &adapter->netdev->stats;
244         struct be_erx_stats *erx_stats = &hw_stats->erx;
245
246         dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
247         dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248         dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249         dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
250
251         /* bad pkts received */
252         dev_stats->rx_errors = port_stats->rx_crc_errors +
253                 port_stats->rx_alignment_symbol_errors +
254                 port_stats->rx_in_range_errors +
255                 port_stats->rx_out_range_errors +
256                 port_stats->rx_frame_too_long +
257                 port_stats->rx_dropped_too_small +
258                 port_stats->rx_dropped_too_short +
259                 port_stats->rx_dropped_header_too_small +
260                 port_stats->rx_dropped_tcp_length +
261                 port_stats->rx_dropped_runt +
262                 port_stats->rx_tcp_checksum_errs +
263                 port_stats->rx_ip_checksum_errs +
264                 port_stats->rx_udp_checksum_errs;
265
266         /*  no space in linux buffers: best possible approximation */
267         dev_stats->rx_dropped =
268                 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
269
270         /* detailed rx errors */
271         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
272                 port_stats->rx_out_range_errors +
273                 port_stats->rx_frame_too_long;
274
275         /* receive ring buffer overflow */
276         dev_stats->rx_over_errors = 0;
277
278         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
279
280         /* frame alignment errors */
281         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
282
283         /* receiver fifo overrun */
284         /* drops_no_pbuf is no per i/f, it's per BE card */
285         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
286                                         port_stats->rx_input_fifo_overflow +
287                                         rxf_stats->rx_drops_no_pbuf;
288         /* receiver missed packetd */
289         dev_stats->rx_missed_errors = 0;
290
291         /*  packet transmit problems */
292         dev_stats->tx_errors = 0;
293
294         /* no space available in linux */
295         dev_stats->tx_dropped = 0;
296
297         dev_stats->multicast = port_stats->rx_multicast_frames;
298         dev_stats->collisions = 0;
299
300         /* detailed tx_errors */
301         dev_stats->tx_aborted_errors = 0;
302         dev_stats->tx_carrier_errors = 0;
303         dev_stats->tx_fifo_errors = 0;
304         dev_stats->tx_heartbeat_errors = 0;
305         dev_stats->tx_window_errors = 0;
306 }
307
308 void be_link_status_update(struct be_adapter *adapter, bool link_up)
309 {
310         struct net_device *netdev = adapter->netdev;
311
312         /* If link came up or went down */
313         if (adapter->link_up != link_up) {
314                 adapter->link_speed = -1;
315                 if (link_up) {
316                         netif_start_queue(netdev);
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_stop_queue(netdev);
321                         netif_carrier_off(netdev);
322                         printk(KERN_INFO "%s: Link down\n", netdev->name);
323                 }
324                 adapter->link_up = link_up;
325         }
326 }
327
328 /* Update the EQ delay n BE based on the RX frags consumed / sec */
329 static void be_rx_eqd_update(struct be_adapter *adapter)
330 {
331         struct be_eq_obj *rx_eq = &adapter->rx_eq;
332         struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
333         ulong now = jiffies;
334         u32 eqd;
335
336         if (!rx_eq->enable_aic)
337                 return;
338
339         /* Wrapped around */
340         if (time_before(now, stats->rx_fps_jiffies)) {
341                 stats->rx_fps_jiffies = now;
342                 return;
343         }
344
345         /* Update once a second */
346         if ((now - stats->rx_fps_jiffies) < HZ)
347                 return;
348
349         stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
350                         ((now - stats->rx_fps_jiffies) / HZ);
351
352         stats->rx_fps_jiffies = now;
353         stats->be_prev_rx_frags = stats->be_rx_frags;
354         eqd = stats->be_rx_fps / 110000;
355         eqd = eqd << 3;
356         if (eqd > rx_eq->max_eqd)
357                 eqd = rx_eq->max_eqd;
358         if (eqd < rx_eq->min_eqd)
359                 eqd = rx_eq->min_eqd;
360         if (eqd < 10)
361                 eqd = 0;
362         if (eqd != rx_eq->cur_eqd)
363                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
364
365         rx_eq->cur_eqd = eqd;
366 }
367
368 static struct net_device_stats *be_get_stats(struct net_device *dev)
369 {
370         return &dev->stats;
371 }
372
373 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374 {
375         u64 rate = bytes;
376
377         do_div(rate, ticks / HZ);
378         rate <<= 3;                     /* bytes/sec -> bits/sec */
379         do_div(rate, 1000000ul);        /* MB/Sec */
380
381         return rate;
382 }
383
384 static void be_tx_rate_update(struct be_adapter *adapter)
385 {
386         struct be_drvr_stats *stats = drvr_stats(adapter);
387         ulong now = jiffies;
388
389         /* Wrapped around? */
390         if (time_before(now, stats->be_tx_jiffies)) {
391                 stats->be_tx_jiffies = now;
392                 return;
393         }
394
395         /* Update tx rate once in two seconds */
396         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
397                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
398                                                   - stats->be_tx_bytes_prev,
399                                                  now - stats->be_tx_jiffies);
400                 stats->be_tx_jiffies = now;
401                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
402         }
403 }
404
405 static void be_tx_stats_update(struct be_adapter *adapter,
406                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
407 {
408         struct be_drvr_stats *stats = drvr_stats(adapter);
409         stats->be_tx_reqs++;
410         stats->be_tx_wrbs += wrb_cnt;
411         stats->be_tx_bytes += copied;
412         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
413         if (stopped)
414                 stats->be_tx_stops++;
415 }
416
417 /* Determine number of WRB entries needed to xmit data in an skb */
418 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
419 {
420         int cnt = (skb->len > skb->data_len);
421
422         cnt += skb_shinfo(skb)->nr_frags;
423
424         /* to account for hdr wrb */
425         cnt++;
426         if (cnt & 1) {
427                 /* add a dummy to make it an even num */
428                 cnt++;
429                 *dummy = true;
430         } else
431                 *dummy = false;
432         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
433         return cnt;
434 }
435
436 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
437 {
438         wrb->frag_pa_hi = upper_32_bits(addr);
439         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
440         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
441 }
442
443 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
444                 bool vlan, u32 wrb_cnt, u32 len)
445 {
446         memset(hdr, 0, sizeof(*hdr));
447
448         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
450         if (skb_is_gso(skb)) {
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453                         hdr, skb_shinfo(skb)->gso_size);
454                 if (skb_is_gso_v6(skb))
455                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
456         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
457                 if (is_tcp_pkt(skb))
458                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
459                 else if (is_udp_pkt(skb))
460                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
461         }
462
463         if (vlan && vlan_tx_tag_present(skb)) {
464                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
465                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
466                         hdr, vlan_tx_tag_get(skb));
467         }
468
469         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
470         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
471         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
472         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
473 }
474
475 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
476                 bool unmap_single)
477 {
478         dma_addr_t dma;
479
480         be_dws_le_to_cpu(wrb, sizeof(*wrb));
481
482         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
483         if (wrb->frag_len) {
484                 if (unmap_single)
485                         pci_unmap_single(pdev, dma, wrb->frag_len,
486                                 PCI_DMA_TODEVICE);
487                 else
488                         pci_unmap_page(pdev, dma, wrb->frag_len,
489                                 PCI_DMA_TODEVICE);
490         }
491 }
492
493 static int make_tx_wrbs(struct be_adapter *adapter,
494                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
495 {
496         dma_addr_t busaddr;
497         int i, copied = 0;
498         struct pci_dev *pdev = adapter->pdev;
499         struct sk_buff *first_skb = skb;
500         struct be_queue_info *txq = &adapter->tx_obj.q;
501         struct be_eth_wrb *wrb;
502         struct be_eth_hdr_wrb *hdr;
503         bool map_single = false;
504         u16 map_head;
505
506         hdr = queue_head_node(txq);
507         queue_head_inc(txq);
508         map_head = txq->head;
509
510         if (skb->len > skb->data_len) {
511                 int len = skb_headlen(skb);
512                 busaddr = pci_map_single(pdev, skb->data, len,
513                                          PCI_DMA_TODEVICE);
514                 if (pci_dma_mapping_error(pdev, busaddr))
515                         goto dma_err;
516                 map_single = true;
517                 wrb = queue_head_node(txq);
518                 wrb_fill(wrb, busaddr, len);
519                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
520                 queue_head_inc(txq);
521                 copied += len;
522         }
523
524         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
525                 struct skb_frag_struct *frag =
526                         &skb_shinfo(skb)->frags[i];
527                 busaddr = pci_map_page(pdev, frag->page,
528                                        frag->page_offset,
529                                        frag->size, PCI_DMA_TODEVICE);
530                 if (pci_dma_mapping_error(pdev, busaddr))
531                         goto dma_err;
532                 wrb = queue_head_node(txq);
533                 wrb_fill(wrb, busaddr, frag->size);
534                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
535                 queue_head_inc(txq);
536                 copied += frag->size;
537         }
538
539         if (dummy_wrb) {
540                 wrb = queue_head_node(txq);
541                 wrb_fill(wrb, 0, 0);
542                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
543                 queue_head_inc(txq);
544         }
545
546         wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
547                 wrb_cnt, copied);
548         be_dws_cpu_to_le(hdr, sizeof(*hdr));
549
550         return copied;
551 dma_err:
552         txq->head = map_head;
553         while (copied) {
554                 wrb = queue_head_node(txq);
555                 unmap_tx_frag(pdev, wrb, map_single);
556                 map_single = false;
557                 copied -= wrb->frag_len;
558                 queue_head_inc(txq);
559         }
560         return 0;
561 }
562
563 static netdev_tx_t be_xmit(struct sk_buff *skb,
564                         struct net_device *netdev)
565 {
566         struct be_adapter *adapter = netdev_priv(netdev);
567         struct be_tx_obj *tx_obj = &adapter->tx_obj;
568         struct be_queue_info *txq = &tx_obj->q;
569         u32 wrb_cnt = 0, copied = 0;
570         u32 start = txq->head;
571         bool dummy_wrb, stopped = false;
572
573         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
574
575         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
576         if (copied) {
577                 /* record the sent skb in the sent_skb table */
578                 BUG_ON(tx_obj->sent_skb_list[start]);
579                 tx_obj->sent_skb_list[start] = skb;
580
581                 /* Ensure txq has space for the next skb; Else stop the queue
582                  * *BEFORE* ringing the tx doorbell, so that we serialze the
583                  * tx compls of the current transmit which'll wake up the queue
584                  */
585                 atomic_add(wrb_cnt, &txq->used);
586                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
587                                                                 txq->len) {
588                         netif_stop_queue(netdev);
589                         stopped = true;
590                 }
591
592                 be_txq_notify(adapter, txq->id, wrb_cnt);
593
594                 be_tx_stats_update(adapter, wrb_cnt, copied,
595                                 skb_shinfo(skb)->gso_segs, stopped);
596         } else {
597                 txq->head = start;
598                 dev_kfree_skb_any(skb);
599         }
600         return NETDEV_TX_OK;
601 }
602
603 static int be_change_mtu(struct net_device *netdev, int new_mtu)
604 {
605         struct be_adapter *adapter = netdev_priv(netdev);
606         if (new_mtu < BE_MIN_MTU ||
607                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
608                                         (ETH_HLEN + ETH_FCS_LEN))) {
609                 dev_info(&adapter->pdev->dev,
610                         "MTU must be between %d and %d bytes\n",
611                         BE_MIN_MTU,
612                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
613                 return -EINVAL;
614         }
615         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
616                         netdev->mtu, new_mtu);
617         netdev->mtu = new_mtu;
618         return 0;
619 }
620
621 /*
622  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
623  * If the user configures more, place BE in vlan promiscuous mode.
624  */
625 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
626 {
627         u16 vtag[BE_NUM_VLANS_SUPPORTED];
628         u16 ntags = 0, i;
629         int status = 0;
630         u32 if_handle;
631
632         if (vf) {
633                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
634                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
635                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
636         }
637
638         if (adapter->vlans_added <= adapter->max_vlans)  {
639                 /* Construct VLAN Table to give to HW */
640                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
641                         if (adapter->vlan_tag[i]) {
642                                 vtag[ntags] = cpu_to_le16(i);
643                                 ntags++;
644                         }
645                 }
646                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
647                                         vtag, ntags, 1, 0);
648         } else {
649                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
650                                         NULL, 0, 1, 1);
651         }
652
653         return status;
654 }
655
656 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
657 {
658         struct be_adapter *adapter = netdev_priv(netdev);
659         struct be_eq_obj *rx_eq = &adapter->rx_eq;
660         struct be_eq_obj *tx_eq = &adapter->tx_eq;
661
662         be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
663         be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
664         adapter->vlan_grp = grp;
665         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
666         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
667 }
668
669 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
670 {
671         struct be_adapter *adapter = netdev_priv(netdev);
672
673         adapter->vlans_added++;
674         if (!be_physfn(adapter))
675                 return;
676
677         adapter->vlan_tag[vid] = 1;
678         if (adapter->vlans_added <= (adapter->max_vlans + 1))
679                 be_vid_config(adapter, false, 0);
680 }
681
682 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
683 {
684         struct be_adapter *adapter = netdev_priv(netdev);
685
686         adapter->vlans_added--;
687         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688
689         if (!be_physfn(adapter))
690                 return;
691
692         adapter->vlan_tag[vid] = 0;
693         if (adapter->vlans_added <= adapter->max_vlans)
694                 be_vid_config(adapter, false, 0);
695 }
696
697 static void be_set_multicast_list(struct net_device *netdev)
698 {
699         struct be_adapter *adapter = netdev_priv(netdev);
700
701         if (netdev->flags & IFF_PROMISC) {
702                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
703                 adapter->promiscuous = true;
704                 goto done;
705         }
706
707         /* BE was previously in promiscous mode; disable it */
708         if (adapter->promiscuous) {
709                 adapter->promiscuous = false;
710                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
711         }
712
713         /* Enable multicast promisc if num configured exceeds what we support */
714         if (netdev->flags & IFF_ALLMULTI ||
715             netdev_mc_count(netdev) > BE_MAX_MC) {
716                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
717                                 &adapter->mc_cmd_mem);
718                 goto done;
719         }
720
721         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
722                 &adapter->mc_cmd_mem);
723 done:
724         return;
725 }
726
727 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
728 {
729         struct be_adapter *adapter = netdev_priv(netdev);
730         int status;
731
732         if (!adapter->sriov_enabled)
733                 return -EPERM;
734
735         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
736                 return -EINVAL;
737
738         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
739                 status = be_cmd_pmac_del(adapter,
740                                         adapter->vf_cfg[vf].vf_if_handle,
741                                         adapter->vf_cfg[vf].vf_pmac_id);
742
743         status = be_cmd_pmac_add(adapter, mac,
744                                 adapter->vf_cfg[vf].vf_if_handle,
745                                 &adapter->vf_cfg[vf].vf_pmac_id);
746
747         if (status)
748                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
749                                 mac, vf);
750         else
751                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
752
753         return status;
754 }
755
756 static int be_get_vf_config(struct net_device *netdev, int vf,
757                         struct ifla_vf_info *vi)
758 {
759         struct be_adapter *adapter = netdev_priv(netdev);
760
761         if (!adapter->sriov_enabled)
762                 return -EPERM;
763
764         if (vf >= num_vfs)
765                 return -EINVAL;
766
767         vi->vf = vf;
768         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
769         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
770         vi->qos = 0;
771         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
772
773         return 0;
774 }
775
776 static int be_set_vf_vlan(struct net_device *netdev,
777                         int vf, u16 vlan, u8 qos)
778 {
779         struct be_adapter *adapter = netdev_priv(netdev);
780         int status = 0;
781
782         if (!adapter->sriov_enabled)
783                 return -EPERM;
784
785         if ((vf >= num_vfs) || (vlan > 4095))
786                 return -EINVAL;
787
788         if (vlan) {
789                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
790                 adapter->vlans_added++;
791         } else {
792                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
793                 adapter->vlans_added--;
794         }
795
796         status = be_vid_config(adapter, true, vf);
797
798         if (status)
799                 dev_info(&adapter->pdev->dev,
800                                 "VLAN %d config on VF %d failed\n", vlan, vf);
801         return status;
802 }
803
804 static int be_set_vf_tx_rate(struct net_device *netdev,
805                         int vf, int rate)
806 {
807         struct be_adapter *adapter = netdev_priv(netdev);
808         int status = 0;
809
810         if (!adapter->sriov_enabled)
811                 return -EPERM;
812
813         if ((vf >= num_vfs) || (rate < 0))
814                 return -EINVAL;
815
816         if (rate > 10000)
817                 rate = 10000;
818
819         adapter->vf_cfg[vf].vf_tx_rate = rate;
820         status = be_cmd_set_qos(adapter, rate / 10, vf);
821
822         if (status)
823                 dev_info(&adapter->pdev->dev,
824                                 "tx rate %d on VF %d failed\n", rate, vf);
825         return status;
826 }
827
828 static void be_rx_rate_update(struct be_adapter *adapter)
829 {
830         struct be_drvr_stats *stats = drvr_stats(adapter);
831         ulong now = jiffies;
832
833         /* Wrapped around */
834         if (time_before(now, stats->be_rx_jiffies)) {
835                 stats->be_rx_jiffies = now;
836                 return;
837         }
838
839         /* Update the rate once in two seconds */
840         if ((now - stats->be_rx_jiffies) < 2 * HZ)
841                 return;
842
843         stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
844                                           - stats->be_rx_bytes_prev,
845                                          now - stats->be_rx_jiffies);
846         stats->be_rx_jiffies = now;
847         stats->be_rx_bytes_prev = stats->be_rx_bytes;
848 }
849
850 static void be_rx_stats_update(struct be_adapter *adapter,
851                 u32 pktsize, u16 numfrags)
852 {
853         struct be_drvr_stats *stats = drvr_stats(adapter);
854
855         stats->be_rx_compl++;
856         stats->be_rx_frags += numfrags;
857         stats->be_rx_bytes += pktsize;
858         stats->be_rx_pkts++;
859 }
860
861 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
862 {
863         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
864
865         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
866         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
867         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
868         if (ip_version) {
869                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
870                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
871         }
872         ipv6_chk = (ip_version && (tcpf || udpf));
873
874         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
875 }
876
877 static struct be_rx_page_info *
878 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
879 {
880         struct be_rx_page_info *rx_page_info;
881         struct be_queue_info *rxq = &adapter->rx_obj.q;
882
883         rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
884         BUG_ON(!rx_page_info->page);
885
886         if (rx_page_info->last_page_user) {
887                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
888                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
889                 rx_page_info->last_page_user = false;
890         }
891
892         atomic_dec(&rxq->used);
893         return rx_page_info;
894 }
895
896 /* Throwaway the data in the Rx completion */
897 static void be_rx_compl_discard(struct be_adapter *adapter,
898                         struct be_eth_rx_compl *rxcp)
899 {
900         struct be_queue_info *rxq = &adapter->rx_obj.q;
901         struct be_rx_page_info *page_info;
902         u16 rxq_idx, i, num_rcvd;
903
904         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
905         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
906
907         for (i = 0; i < num_rcvd; i++) {
908                 page_info = get_rx_page_info(adapter, rxq_idx);
909                 put_page(page_info->page);
910                 memset(page_info, 0, sizeof(*page_info));
911                 index_inc(&rxq_idx, rxq->len);
912         }
913 }
914
915 /*
916  * skb_fill_rx_data forms a complete skb for an ether frame
917  * indicated by rxcp.
918  */
919 static void skb_fill_rx_data(struct be_adapter *adapter,
920                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
921                         u16 num_rcvd)
922 {
923         struct be_queue_info *rxq = &adapter->rx_obj.q;
924         struct be_rx_page_info *page_info;
925         u16 rxq_idx, i, j;
926         u32 pktsize, hdr_len, curr_frag_len, size;
927         u8 *start;
928
929         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
930         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
931
932         page_info = get_rx_page_info(adapter, rxq_idx);
933
934         start = page_address(page_info->page) + page_info->page_offset;
935         prefetch(start);
936
937         /* Copy data in the first descriptor of this completion */
938         curr_frag_len = min(pktsize, rx_frag_size);
939
940         /* Copy the header portion into skb_data */
941         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
942         memcpy(skb->data, start, hdr_len);
943         skb->len = curr_frag_len;
944         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
945                 /* Complete packet has now been moved to data */
946                 put_page(page_info->page);
947                 skb->data_len = 0;
948                 skb->tail += curr_frag_len;
949         } else {
950                 skb_shinfo(skb)->nr_frags = 1;
951                 skb_shinfo(skb)->frags[0].page = page_info->page;
952                 skb_shinfo(skb)->frags[0].page_offset =
953                                         page_info->page_offset + hdr_len;
954                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
955                 skb->data_len = curr_frag_len - hdr_len;
956                 skb->tail += hdr_len;
957         }
958         page_info->page = NULL;
959
960         if (pktsize <= rx_frag_size) {
961                 BUG_ON(num_rcvd != 1);
962                 goto done;
963         }
964
965         /* More frags present for this completion */
966         size = pktsize;
967         for (i = 1, j = 0; i < num_rcvd; i++) {
968                 size -= curr_frag_len;
969                 index_inc(&rxq_idx, rxq->len);
970                 page_info = get_rx_page_info(adapter, rxq_idx);
971
972                 curr_frag_len = min(size, rx_frag_size);
973
974                 /* Coalesce all frags from the same physical page in one slot */
975                 if (page_info->page_offset == 0) {
976                         /* Fresh page */
977                         j++;
978                         skb_shinfo(skb)->frags[j].page = page_info->page;
979                         skb_shinfo(skb)->frags[j].page_offset =
980                                                         page_info->page_offset;
981                         skb_shinfo(skb)->frags[j].size = 0;
982                         skb_shinfo(skb)->nr_frags++;
983                 } else {
984                         put_page(page_info->page);
985                 }
986
987                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
988                 skb->len += curr_frag_len;
989                 skb->data_len += curr_frag_len;
990
991                 page_info->page = NULL;
992         }
993         BUG_ON(j > MAX_SKB_FRAGS);
994
995 done:
996         be_rx_stats_update(adapter, pktsize, num_rcvd);
997 }
998
999 /* Process the RX completion indicated by rxcp when GRO is disabled */
1000 static void be_rx_compl_process(struct be_adapter *adapter,
1001                         struct be_eth_rx_compl *rxcp)
1002 {
1003         struct sk_buff *skb;
1004         u32 vlanf, vid;
1005         u16 num_rcvd;
1006         u8 vtm;
1007
1008         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1009         /* Is it a flush compl that has no data */
1010         if (unlikely(num_rcvd == 0))
1011                 return;
1012
1013         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1014         if (unlikely(!skb)) {
1015                 if (net_ratelimit())
1016                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1017                 be_rx_compl_discard(adapter, rxcp);
1018                 return;
1019         }
1020
1021         skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
1022
1023         if (do_pkt_csum(rxcp, adapter->rx_csum))
1024                 skb->ip_summed = CHECKSUM_NONE;
1025         else
1026                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1027
1028         skb->truesize = skb->len + sizeof(struct sk_buff);
1029         skb->protocol = eth_type_trans(skb, adapter->netdev);
1030
1031         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1032         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1033
1034         /* vlanf could be wrongly set in some cards.
1035          * ignore if vtm is not set */
1036         if ((adapter->function_mode & 0x400) && !vtm)
1037                 vlanf = 0;
1038
1039         if (unlikely(vlanf)) {
1040                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1041                         kfree_skb(skb);
1042                         return;
1043                 }
1044                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1045                 vid = swab16(vid);
1046                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1047         } else {
1048                 netif_receive_skb(skb);
1049         }
1050 }
1051
1052 /* Process the RX completion indicated by rxcp when GRO is enabled */
1053 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1054                         struct be_eth_rx_compl *rxcp)
1055 {
1056         struct be_rx_page_info *page_info;
1057         struct sk_buff *skb = NULL;
1058         struct be_queue_info *rxq = &adapter->rx_obj.q;
1059         struct be_eq_obj *eq_obj =  &adapter->rx_eq;
1060         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1061         u16 i, rxq_idx = 0, vid, j;
1062         u8 vtm;
1063
1064         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1065         /* Is it a flush compl that has no data */
1066         if (unlikely(num_rcvd == 0))
1067                 return;
1068
1069         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1070         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1071         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1072         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1073
1074         /* vlanf could be wrongly set in some cards.
1075          * ignore if vtm is not set */
1076         if ((adapter->function_mode & 0x400) && !vtm)
1077                 vlanf = 0;
1078
1079         skb = napi_get_frags(&eq_obj->napi);
1080         if (!skb) {
1081                 be_rx_compl_discard(adapter, rxcp);
1082                 return;
1083         }
1084
1085         remaining = pkt_size;
1086         for (i = 0, j = -1; i < num_rcvd; i++) {
1087                 page_info = get_rx_page_info(adapter, rxq_idx);
1088
1089                 curr_frag_len = min(remaining, rx_frag_size);
1090
1091                 /* Coalesce all frags from the same physical page in one slot */
1092                 if (i == 0 || page_info->page_offset == 0) {
1093                         /* First frag or Fresh page */
1094                         j++;
1095                         skb_shinfo(skb)->frags[j].page = page_info->page;
1096                         skb_shinfo(skb)->frags[j].page_offset =
1097                                                         page_info->page_offset;
1098                         skb_shinfo(skb)->frags[j].size = 0;
1099                 } else {
1100                         put_page(page_info->page);
1101                 }
1102                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1103
1104                 remaining -= curr_frag_len;
1105                 index_inc(&rxq_idx, rxq->len);
1106                 memset(page_info, 0, sizeof(*page_info));
1107         }
1108         BUG_ON(j > MAX_SKB_FRAGS);
1109
1110         skb_shinfo(skb)->nr_frags = j + 1;
1111         skb->len = pkt_size;
1112         skb->data_len = pkt_size;
1113         skb->truesize += pkt_size;
1114         skb->ip_summed = CHECKSUM_UNNECESSARY;
1115
1116         if (likely(!vlanf)) {
1117                 napi_gro_frags(&eq_obj->napi);
1118         } else {
1119                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1120                 vid = swab16(vid);
1121
1122                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1123                         return;
1124
1125                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1126         }
1127
1128         be_rx_stats_update(adapter, pkt_size, num_rcvd);
1129 }
1130
1131 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1132 {
1133         struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
1134
1135         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1136                 return NULL;
1137
1138         rmb();
1139         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1140
1141         queue_tail_inc(&adapter->rx_obj.cq);
1142         return rxcp;
1143 }
1144
1145 /* To reset the valid bit, we need to reset the whole word as
1146  * when walking the queue the valid entries are little-endian
1147  * and invalid entries are host endian
1148  */
1149 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1150 {
1151         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1152 }
1153
1154 static inline struct page *be_alloc_pages(u32 size)
1155 {
1156         gfp_t alloc_flags = GFP_ATOMIC;
1157         u32 order = get_order(size);
1158         if (order > 0)
1159                 alloc_flags |= __GFP_COMP;
1160         return  alloc_pages(alloc_flags, order);
1161 }
1162
1163 /*
1164  * Allocate a page, split it to fragments of size rx_frag_size and post as
1165  * receive buffers to BE
1166  */
1167 static void be_post_rx_frags(struct be_adapter *adapter)
1168 {
1169         struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
1170         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1171         struct be_queue_info *rxq = &adapter->rx_obj.q;
1172         struct page *pagep = NULL;
1173         struct be_eth_rx_d *rxd;
1174         u64 page_dmaaddr = 0, frag_dmaaddr;
1175         u32 posted, page_offset = 0;
1176
1177         page_info = &page_info_tbl[rxq->head];
1178         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1179                 if (!pagep) {
1180                         pagep = be_alloc_pages(adapter->big_page_size);
1181                         if (unlikely(!pagep)) {
1182                                 drvr_stats(adapter)->be_ethrx_post_fail++;
1183                                 break;
1184                         }
1185                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1186                                                 adapter->big_page_size,
1187                                                 PCI_DMA_FROMDEVICE);
1188                         page_info->page_offset = 0;
1189                 } else {
1190                         get_page(pagep);
1191                         page_info->page_offset = page_offset + rx_frag_size;
1192                 }
1193                 page_offset = page_info->page_offset;
1194                 page_info->page = pagep;
1195                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1196                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1197
1198                 rxd = queue_head_node(rxq);
1199                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1200                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1201
1202                 /* Any space left in the current big page for another frag? */
1203                 if ((page_offset + rx_frag_size + rx_frag_size) >
1204                                         adapter->big_page_size) {
1205                         pagep = NULL;
1206                         page_info->last_page_user = true;
1207                 }
1208
1209                 prev_page_info = page_info;
1210                 queue_head_inc(rxq);
1211                 page_info = &page_info_tbl[rxq->head];
1212         }
1213         if (pagep)
1214                 prev_page_info->last_page_user = true;
1215
1216         if (posted) {
1217                 atomic_add(posted, &rxq->used);
1218                 be_rxq_notify(adapter, rxq->id, posted);
1219         } else if (atomic_read(&rxq->used) == 0) {
1220                 /* Let be_worker replenish when memory is available */
1221                 adapter->rx_post_starved = true;
1222         }
1223 }
1224
1225 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1226 {
1227         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1228
1229         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1230                 return NULL;
1231
1232         rmb();
1233         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1234
1235         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1236
1237         queue_tail_inc(tx_cq);
1238         return txcp;
1239 }
1240
1241 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1242 {
1243         struct be_queue_info *txq = &adapter->tx_obj.q;
1244         struct be_eth_wrb *wrb;
1245         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1246         struct sk_buff *sent_skb;
1247         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1248         bool unmap_skb_hdr = true;
1249
1250         sent_skb = sent_skbs[txq->tail];
1251         BUG_ON(!sent_skb);
1252         sent_skbs[txq->tail] = NULL;
1253
1254         /* skip header wrb */
1255         queue_tail_inc(txq);
1256
1257         do {
1258                 cur_index = txq->tail;
1259                 wrb = queue_tail_node(txq);
1260                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1261                                         skb_headlen(sent_skb)));
1262                 unmap_skb_hdr = false;
1263
1264                 num_wrbs++;
1265                 queue_tail_inc(txq);
1266         } while (cur_index != last_index);
1267
1268         atomic_sub(num_wrbs, &txq->used);
1269
1270         kfree_skb(sent_skb);
1271 }
1272
1273 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1274 {
1275         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1276
1277         if (!eqe->evt)
1278                 return NULL;
1279
1280         rmb();
1281         eqe->evt = le32_to_cpu(eqe->evt);
1282         queue_tail_inc(&eq_obj->q);
1283         return eqe;
1284 }
1285
1286 static int event_handle(struct be_adapter *adapter,
1287                         struct be_eq_obj *eq_obj)
1288 {
1289         struct be_eq_entry *eqe;
1290         u16 num = 0;
1291
1292         while ((eqe = event_get(eq_obj)) != NULL) {
1293                 eqe->evt = 0;
1294                 num++;
1295         }
1296
1297         /* Deal with any spurious interrupts that come
1298          * without events
1299          */
1300         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1301         if (num)
1302                 napi_schedule(&eq_obj->napi);
1303
1304         return num;
1305 }
1306
1307 /* Just read and notify events without processing them.
1308  * Used at the time of destroying event queues */
1309 static void be_eq_clean(struct be_adapter *adapter,
1310                         struct be_eq_obj *eq_obj)
1311 {
1312         struct be_eq_entry *eqe;
1313         u16 num = 0;
1314
1315         while ((eqe = event_get(eq_obj)) != NULL) {
1316                 eqe->evt = 0;
1317                 num++;
1318         }
1319
1320         if (num)
1321                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1322 }
1323
1324 static void be_rx_q_clean(struct be_adapter *adapter)
1325 {
1326         struct be_rx_page_info *page_info;
1327         struct be_queue_info *rxq = &adapter->rx_obj.q;
1328         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1329         struct be_eth_rx_compl *rxcp;
1330         u16 tail;
1331
1332         /* First cleanup pending rx completions */
1333         while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1334                 be_rx_compl_discard(adapter, rxcp);
1335                 be_rx_compl_reset(rxcp);
1336                 be_cq_notify(adapter, rx_cq->id, true, 1);
1337         }
1338
1339         /* Then free posted rx buffer that were not used */
1340         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1341         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1342                 page_info = get_rx_page_info(adapter, tail);
1343                 put_page(page_info->page);
1344                 memset(page_info, 0, sizeof(*page_info));
1345         }
1346         BUG_ON(atomic_read(&rxq->used));
1347 }
1348
1349 static void be_tx_compl_clean(struct be_adapter *adapter)
1350 {
1351         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1352         struct be_queue_info *txq = &adapter->tx_obj.q;
1353         struct be_eth_tx_compl *txcp;
1354         u16 end_idx, cmpl = 0, timeo = 0;
1355         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1356         struct sk_buff *sent_skb;
1357         bool dummy_wrb;
1358
1359         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1360         do {
1361                 while ((txcp = be_tx_compl_get(tx_cq))) {
1362                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1363                                         wrb_index, txcp);
1364                         be_tx_compl_process(adapter, end_idx);
1365                         cmpl++;
1366                 }
1367                 if (cmpl) {
1368                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1369                         cmpl = 0;
1370                 }
1371
1372                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1373                         break;
1374
1375                 mdelay(1);
1376         } while (true);
1377
1378         if (atomic_read(&txq->used))
1379                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1380                         atomic_read(&txq->used));
1381
1382         /* free posted tx for which compls will never arrive */
1383         while (atomic_read(&txq->used)) {
1384                 sent_skb = sent_skbs[txq->tail];
1385                 end_idx = txq->tail;
1386                 index_adv(&end_idx,
1387                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1388                 be_tx_compl_process(adapter, end_idx);
1389         }
1390 }
1391
1392 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1393 {
1394         struct be_queue_info *q;
1395
1396         q = &adapter->mcc_obj.q;
1397         if (q->created)
1398                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1399         be_queue_free(adapter, q);
1400
1401         q = &adapter->mcc_obj.cq;
1402         if (q->created)
1403                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1404         be_queue_free(adapter, q);
1405 }
1406
1407 /* Must be called only after TX qs are created as MCC shares TX EQ */
1408 static int be_mcc_queues_create(struct be_adapter *adapter)
1409 {
1410         struct be_queue_info *q, *cq;
1411
1412         /* Alloc MCC compl queue */
1413         cq = &adapter->mcc_obj.cq;
1414         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1415                         sizeof(struct be_mcc_compl)))
1416                 goto err;
1417
1418         /* Ask BE to create MCC compl queue; share TX's eq */
1419         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1420                 goto mcc_cq_free;
1421
1422         /* Alloc MCC queue */
1423         q = &adapter->mcc_obj.q;
1424         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1425                 goto mcc_cq_destroy;
1426
1427         /* Ask BE to create MCC queue */
1428         if (be_cmd_mccq_create(adapter, q, cq))
1429                 goto mcc_q_free;
1430
1431         return 0;
1432
1433 mcc_q_free:
1434         be_queue_free(adapter, q);
1435 mcc_cq_destroy:
1436         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1437 mcc_cq_free:
1438         be_queue_free(adapter, cq);
1439 err:
1440         return -1;
1441 }
1442
1443 static void be_tx_queues_destroy(struct be_adapter *adapter)
1444 {
1445         struct be_queue_info *q;
1446
1447         q = &adapter->tx_obj.q;
1448         if (q->created)
1449                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1450         be_queue_free(adapter, q);
1451
1452         q = &adapter->tx_obj.cq;
1453         if (q->created)
1454                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1455         be_queue_free(adapter, q);
1456
1457         /* Clear any residual events */
1458         be_eq_clean(adapter, &adapter->tx_eq);
1459
1460         q = &adapter->tx_eq.q;
1461         if (q->created)
1462                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1463         be_queue_free(adapter, q);
1464 }
1465
1466 static int be_tx_queues_create(struct be_adapter *adapter)
1467 {
1468         struct be_queue_info *eq, *q, *cq;
1469
1470         adapter->tx_eq.max_eqd = 0;
1471         adapter->tx_eq.min_eqd = 0;
1472         adapter->tx_eq.cur_eqd = 96;
1473         adapter->tx_eq.enable_aic = false;
1474         /* Alloc Tx Event queue */
1475         eq = &adapter->tx_eq.q;
1476         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1477                 return -1;
1478
1479         /* Ask BE to create Tx Event queue */
1480         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1481                 goto tx_eq_free;
1482         adapter->base_eq_id = adapter->tx_eq.q.id;
1483
1484         /* Alloc TX eth compl queue */
1485         cq = &adapter->tx_obj.cq;
1486         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1487                         sizeof(struct be_eth_tx_compl)))
1488                 goto tx_eq_destroy;
1489
1490         /* Ask BE to create Tx eth compl queue */
1491         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1492                 goto tx_cq_free;
1493
1494         /* Alloc TX eth queue */
1495         q = &adapter->tx_obj.q;
1496         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1497                 goto tx_cq_destroy;
1498
1499         /* Ask BE to create Tx eth queue */
1500         if (be_cmd_txq_create(adapter, q, cq))
1501                 goto tx_q_free;
1502         return 0;
1503
1504 tx_q_free:
1505         be_queue_free(adapter, q);
1506 tx_cq_destroy:
1507         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1508 tx_cq_free:
1509         be_queue_free(adapter, cq);
1510 tx_eq_destroy:
1511         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1512 tx_eq_free:
1513         be_queue_free(adapter, eq);
1514         return -1;
1515 }
1516
1517 static void be_rx_queues_destroy(struct be_adapter *adapter)
1518 {
1519         struct be_queue_info *q;
1520
1521         q = &adapter->rx_obj.q;
1522         if (q->created) {
1523                 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1524
1525                 /* After the rxq is invalidated, wait for a grace time
1526                  * of 1ms for all dma to end and the flush compl to arrive
1527                  */
1528                 mdelay(1);
1529                 be_rx_q_clean(adapter);
1530         }
1531         be_queue_free(adapter, q);
1532
1533         q = &adapter->rx_obj.cq;
1534         if (q->created)
1535                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1536         be_queue_free(adapter, q);
1537
1538         /* Clear any residual events */
1539         be_eq_clean(adapter, &adapter->rx_eq);
1540
1541         q = &adapter->rx_eq.q;
1542         if (q->created)
1543                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1544         be_queue_free(adapter, q);
1545 }
1546
1547 static int be_rx_queues_create(struct be_adapter *adapter)
1548 {
1549         struct be_queue_info *eq, *q, *cq;
1550         int rc;
1551
1552         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1553         adapter->rx_eq.max_eqd = BE_MAX_EQD;
1554         adapter->rx_eq.min_eqd = 0;
1555         adapter->rx_eq.cur_eqd = 0;
1556         adapter->rx_eq.enable_aic = true;
1557
1558         /* Alloc Rx Event queue */
1559         eq = &adapter->rx_eq.q;
1560         rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1561                                 sizeof(struct be_eq_entry));
1562         if (rc)
1563                 return rc;
1564
1565         /* Ask BE to create Rx Event queue */
1566         rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1567         if (rc)
1568                 goto rx_eq_free;
1569
1570         /* Alloc RX eth compl queue */
1571         cq = &adapter->rx_obj.cq;
1572         rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1573                         sizeof(struct be_eth_rx_compl));
1574         if (rc)
1575                 goto rx_eq_destroy;
1576
1577         /* Ask BE to create Rx eth compl queue */
1578         rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579         if (rc)
1580                 goto rx_cq_free;
1581
1582         /* Alloc RX eth queue */
1583         q = &adapter->rx_obj.q;
1584         rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1585         if (rc)
1586                 goto rx_cq_destroy;
1587
1588         /* Ask BE to create Rx eth queue */
1589         rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1590                 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1591         if (rc)
1592                 goto rx_q_free;
1593
1594         return 0;
1595 rx_q_free:
1596         be_queue_free(adapter, q);
1597 rx_cq_destroy:
1598         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1599 rx_cq_free:
1600         be_queue_free(adapter, cq);
1601 rx_eq_destroy:
1602         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1603 rx_eq_free:
1604         be_queue_free(adapter, eq);
1605         return rc;
1606 }
1607
1608 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1609 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1610 {
1611         return eq_id - adapter->base_eq_id;
1612 }
1613
1614 static irqreturn_t be_intx(int irq, void *dev)
1615 {
1616         struct be_adapter *adapter = dev;
1617         int isr;
1618
1619         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1620                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1621         if (!isr)
1622                 return IRQ_NONE;
1623
1624         event_handle(adapter, &adapter->tx_eq);
1625         event_handle(adapter, &adapter->rx_eq);
1626
1627         return IRQ_HANDLED;
1628 }
1629
1630 static irqreturn_t be_msix_rx(int irq, void *dev)
1631 {
1632         struct be_adapter *adapter = dev;
1633
1634         event_handle(adapter, &adapter->rx_eq);
1635
1636         return IRQ_HANDLED;
1637 }
1638
1639 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1640 {
1641         struct be_adapter *adapter = dev;
1642
1643         event_handle(adapter, &adapter->tx_eq);
1644
1645         return IRQ_HANDLED;
1646 }
1647
1648 static inline bool do_gro(struct be_adapter *adapter,
1649                         struct be_eth_rx_compl *rxcp)
1650 {
1651         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1652         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1653
1654         if (err)
1655                 drvr_stats(adapter)->be_rxcp_err++;
1656
1657         return (tcp_frame && !err) ? true : false;
1658 }
1659
1660 int be_poll_rx(struct napi_struct *napi, int budget)
1661 {
1662         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1663         struct be_adapter *adapter =
1664                 container_of(rx_eq, struct be_adapter, rx_eq);
1665         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1666         struct be_eth_rx_compl *rxcp;
1667         u32 work_done;
1668
1669         adapter->stats.drvr_stats.be_rx_polls++;
1670         for (work_done = 0; work_done < budget; work_done++) {
1671                 rxcp = be_rx_compl_get(adapter);
1672                 if (!rxcp)
1673                         break;
1674
1675                 if (do_gro(adapter, rxcp))
1676                         be_rx_compl_process_gro(adapter, rxcp);
1677                 else
1678                         be_rx_compl_process(adapter, rxcp);
1679
1680                 be_rx_compl_reset(rxcp);
1681         }
1682
1683         /* Refill the queue */
1684         if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1685                 be_post_rx_frags(adapter);
1686
1687         /* All consumed */
1688         if (work_done < budget) {
1689                 napi_complete(napi);
1690                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1691         } else {
1692                 /* More to be consumed; continue with interrupts disabled */
1693                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1694         }
1695         return work_done;
1696 }
1697
1698 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1699  * For TX/MCC we don't honour budget; consume everything
1700  */
1701 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1702 {
1703         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1704         struct be_adapter *adapter =
1705                 container_of(tx_eq, struct be_adapter, tx_eq);
1706         struct be_queue_info *txq = &adapter->tx_obj.q;
1707         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1708         struct be_eth_tx_compl *txcp;
1709         int tx_compl = 0, mcc_compl, status = 0;
1710         u16 end_idx;
1711
1712         while ((txcp = be_tx_compl_get(tx_cq))) {
1713                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1714                                 wrb_index, txcp);
1715                 be_tx_compl_process(adapter, end_idx);
1716                 tx_compl++;
1717         }
1718
1719         mcc_compl = be_process_mcc(adapter, &status);
1720
1721         napi_complete(napi);
1722
1723         if (mcc_compl) {
1724                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1725                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1726         }
1727
1728         if (tx_compl) {
1729                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1730
1731                 /* As Tx wrbs have been freed up, wake up netdev queue if
1732                  * it was stopped due to lack of tx wrbs.
1733                  */
1734                 if (netif_queue_stopped(adapter->netdev) &&
1735                         atomic_read(&txq->used) < txq->len / 2) {
1736                         netif_wake_queue(adapter->netdev);
1737                 }
1738
1739                 drvr_stats(adapter)->be_tx_events++;
1740                 drvr_stats(adapter)->be_tx_compl += tx_compl;
1741         }
1742
1743         return 1;
1744 }
1745
1746 static inline bool be_detect_ue(struct be_adapter *adapter)
1747 {
1748         u32 online0 = 0, online1 = 0;
1749
1750         pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1751
1752         pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1753
1754         if (!online0 || !online1) {
1755                 adapter->ue_detected = true;
1756                 dev_err(&adapter->pdev->dev,
1757                         "UE Detected!! online0=%d online1=%d\n",
1758                         online0, online1);
1759                 return true;
1760         }
1761
1762         return false;
1763 }
1764
1765 void be_dump_ue(struct be_adapter *adapter)
1766 {
1767         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1768         u32 i;
1769
1770         pci_read_config_dword(adapter->pdev,
1771                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1772         pci_read_config_dword(adapter->pdev,
1773                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1774         pci_read_config_dword(adapter->pdev,
1775                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1776         pci_read_config_dword(adapter->pdev,
1777                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1778
1779         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1780         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1781
1782         if (ue_status_lo) {
1783                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1784                         if (ue_status_lo & 1)
1785                                 dev_err(&adapter->pdev->dev,
1786                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1787                 }
1788         }
1789         if (ue_status_hi) {
1790                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1791                         if (ue_status_hi & 1)
1792                                 dev_err(&adapter->pdev->dev,
1793                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1794                 }
1795         }
1796
1797 }
1798
1799 static void be_worker(struct work_struct *work)
1800 {
1801         struct be_adapter *adapter =
1802                 container_of(work, struct be_adapter, work.work);
1803
1804         if (!adapter->stats_ioctl_sent)
1805                 be_cmd_get_stats(adapter, &adapter->stats.cmd);
1806
1807         /* Set EQ delay */
1808         be_rx_eqd_update(adapter);
1809
1810         be_tx_rate_update(adapter);
1811         be_rx_rate_update(adapter);
1812
1813         if (adapter->rx_post_starved) {
1814                 adapter->rx_post_starved = false;
1815                 be_post_rx_frags(adapter);
1816         }
1817         if (!adapter->ue_detected) {
1818                 if (be_detect_ue(adapter))
1819                         be_dump_ue(adapter);
1820         }
1821
1822         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1823 }
1824
1825 static void be_msix_disable(struct be_adapter *adapter)
1826 {
1827         if (adapter->msix_enabled) {
1828                 pci_disable_msix(adapter->pdev);
1829                 adapter->msix_enabled = false;
1830         }
1831 }
1832
1833 static void be_msix_enable(struct be_adapter *adapter)
1834 {
1835         int i, status;
1836
1837         for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1838                 adapter->msix_entries[i].entry = i;
1839
1840         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1841                 BE_NUM_MSIX_VECTORS);
1842         if (status == 0)
1843                 adapter->msix_enabled = true;
1844 }
1845
1846 static void be_sriov_enable(struct be_adapter *adapter)
1847 {
1848         be_check_sriov_fn_type(adapter);
1849 #ifdef CONFIG_PCI_IOV
1850         if (be_physfn(adapter) && num_vfs) {
1851                 int status;
1852
1853                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1854                 adapter->sriov_enabled = status ? false : true;
1855         }
1856 #endif
1857 }
1858
1859 static void be_sriov_disable(struct be_adapter *adapter)
1860 {
1861 #ifdef CONFIG_PCI_IOV
1862         if (adapter->sriov_enabled) {
1863                 pci_disable_sriov(adapter->pdev);
1864                 adapter->sriov_enabled = false;
1865         }
1866 #endif
1867 }
1868
1869 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1870 {
1871         return adapter->msix_entries[
1872                         be_evt_bit_get(adapter, eq_id)].vector;
1873 }
1874
1875 static int be_request_irq(struct be_adapter *adapter,
1876                 struct be_eq_obj *eq_obj,
1877                 void *handler, char *desc)
1878 {
1879         struct net_device *netdev = adapter->netdev;
1880         int vec;
1881
1882         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1883         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1884         return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1885 }
1886
1887 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1888 {
1889         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1890         free_irq(vec, adapter);
1891 }
1892
1893 static int be_msix_register(struct be_adapter *adapter)
1894 {
1895         int status;
1896
1897         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1898         if (status)
1899                 goto err;
1900
1901         status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1902         if (status)
1903                 goto free_tx_irq;
1904
1905         return 0;
1906
1907 free_tx_irq:
1908         be_free_irq(adapter, &adapter->tx_eq);
1909 err:
1910         dev_warn(&adapter->pdev->dev,
1911                 "MSIX Request IRQ failed - err %d\n", status);
1912         pci_disable_msix(adapter->pdev);
1913         adapter->msix_enabled = false;
1914         return status;
1915 }
1916
1917 static int be_irq_register(struct be_adapter *adapter)
1918 {
1919         struct net_device *netdev = adapter->netdev;
1920         int status;
1921
1922         if (adapter->msix_enabled) {
1923                 status = be_msix_register(adapter);
1924                 if (status == 0)
1925                         goto done;
1926                 /* INTx is not supported for VF */
1927                 if (!be_physfn(adapter))
1928                         return status;
1929         }
1930
1931         /* INTx */
1932         netdev->irq = adapter->pdev->irq;
1933         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1934                         adapter);
1935         if (status) {
1936                 dev_err(&adapter->pdev->dev,
1937                         "INTx request IRQ failed - err %d\n", status);
1938                 return status;
1939         }
1940 done:
1941         adapter->isr_registered = true;
1942         return 0;
1943 }
1944
1945 static void be_irq_unregister(struct be_adapter *adapter)
1946 {
1947         struct net_device *netdev = adapter->netdev;
1948
1949         if (!adapter->isr_registered)
1950                 return;
1951
1952         /* INTx */
1953         if (!adapter->msix_enabled) {
1954                 free_irq(netdev->irq, adapter);
1955                 goto done;
1956         }
1957
1958         /* MSIx */
1959         be_free_irq(adapter, &adapter->tx_eq);
1960         be_free_irq(adapter, &adapter->rx_eq);
1961 done:
1962         adapter->isr_registered = false;
1963 }
1964
1965 static int be_close(struct net_device *netdev)
1966 {
1967         struct be_adapter *adapter = netdev_priv(netdev);
1968         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1969         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1970         int vec;
1971
1972         cancel_delayed_work_sync(&adapter->work);
1973
1974         be_async_mcc_disable(adapter);
1975
1976         netif_stop_queue(netdev);
1977         netif_carrier_off(netdev);
1978         adapter->link_up = false;
1979
1980         be_intr_set(adapter, false);
1981
1982         if (adapter->msix_enabled) {
1983                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1984                 synchronize_irq(vec);
1985                 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1986                 synchronize_irq(vec);
1987         } else {
1988                 synchronize_irq(netdev->irq);
1989         }
1990         be_irq_unregister(adapter);
1991
1992         napi_disable(&rx_eq->napi);
1993         napi_disable(&tx_eq->napi);
1994
1995         /* Wait for all pending tx completions to arrive so that
1996          * all tx skbs are freed.
1997          */
1998         be_tx_compl_clean(adapter);
1999
2000         return 0;
2001 }
2002
2003 static int be_open(struct net_device *netdev)
2004 {
2005         struct be_adapter *adapter = netdev_priv(netdev);
2006         struct be_eq_obj *rx_eq = &adapter->rx_eq;
2007         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2008         bool link_up;
2009         int status;
2010         u8 mac_speed;
2011         u16 link_speed;
2012
2013         /* First time posting */
2014         be_post_rx_frags(adapter);
2015
2016         napi_enable(&rx_eq->napi);
2017         napi_enable(&tx_eq->napi);
2018
2019         be_irq_register(adapter);
2020
2021         be_intr_set(adapter, true);
2022
2023         /* The evt queues are created in unarmed state; arm them */
2024         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
2025         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2026
2027         /* Rx compl queue may be in unarmed state; rearm it */
2028         be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
2029
2030         /* Now that interrupts are on we can process async mcc */
2031         be_async_mcc_enable(adapter);
2032
2033         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2034
2035         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2036                         &link_speed);
2037         if (status)
2038                 goto err;
2039         be_link_status_update(adapter, link_up);
2040
2041         if (be_physfn(adapter)) {
2042                 status = be_vid_config(adapter, false, 0);
2043                 if (status)
2044                         goto err;
2045
2046                 status = be_cmd_set_flow_control(adapter,
2047                                 adapter->tx_fc, adapter->rx_fc);
2048                 if (status)
2049                         goto err;
2050         }
2051
2052         return 0;
2053 err:
2054         be_close(adapter->netdev);
2055         return -EIO;
2056 }
2057
2058 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2059 {
2060         struct be_dma_mem cmd;
2061         int status = 0;
2062         u8 mac[ETH_ALEN];
2063
2064         memset(mac, 0, ETH_ALEN);
2065
2066         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2067         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2068         if (cmd.va == NULL)
2069                 return -1;
2070         memset(cmd.va, 0, cmd.size);
2071
2072         if (enable) {
2073                 status = pci_write_config_dword(adapter->pdev,
2074                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2075                 if (status) {
2076                         dev_err(&adapter->pdev->dev,
2077                                 "Could not enable Wake-on-lan\n");
2078                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2079                                         cmd.dma);
2080                         return status;
2081                 }
2082                 status = be_cmd_enable_magic_wol(adapter,
2083                                 adapter->netdev->dev_addr, &cmd);
2084                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2085                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2086         } else {
2087                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2088                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2089                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2090         }
2091
2092         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2093         return status;
2094 }
2095
2096 static int be_setup(struct be_adapter *adapter)
2097 {
2098         struct net_device *netdev = adapter->netdev;
2099         u32 cap_flags, en_flags, vf = 0;
2100         int status;
2101         u8 mac[ETH_ALEN];
2102
2103         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2104
2105         if (be_physfn(adapter)) {
2106                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2107                                 BE_IF_FLAGS_PROMISCUOUS |
2108                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2109                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2110         }
2111
2112         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2113                         netdev->dev_addr, false/* pmac_invalid */,
2114                         &adapter->if_handle, &adapter->pmac_id, 0);
2115         if (status != 0)
2116                 goto do_none;
2117
2118         if (be_physfn(adapter)) {
2119                 while (vf < num_vfs) {
2120                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2121                                         | BE_IF_FLAGS_BROADCAST;
2122                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2123                                         mac, true,
2124                                         &adapter->vf_cfg[vf].vf_if_handle,
2125                                         NULL, vf+1);
2126                         if (status) {
2127                                 dev_err(&adapter->pdev->dev,
2128                                 "Interface Create failed for VF %d\n", vf);
2129                                 goto if_destroy;
2130                         }
2131                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2132                         vf++;
2133                 }
2134         } else if (!be_physfn(adapter)) {
2135                 status = be_cmd_mac_addr_query(adapter, mac,
2136                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2137                 if (!status) {
2138                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2139                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2140                 }
2141         }
2142
2143         status = be_tx_queues_create(adapter);
2144         if (status != 0)
2145                 goto if_destroy;
2146
2147         status = be_rx_queues_create(adapter);
2148         if (status != 0)
2149                 goto tx_qs_destroy;
2150
2151         status = be_mcc_queues_create(adapter);
2152         if (status != 0)
2153                 goto rx_qs_destroy;
2154
2155         adapter->link_speed = -1;
2156
2157         return 0;
2158
2159 rx_qs_destroy:
2160         be_rx_queues_destroy(adapter);
2161 tx_qs_destroy:
2162         be_tx_queues_destroy(adapter);
2163 if_destroy:
2164         for (vf = 0; vf < num_vfs; vf++)
2165                 if (adapter->vf_cfg[vf].vf_if_handle)
2166                         be_cmd_if_destroy(adapter,
2167                                         adapter->vf_cfg[vf].vf_if_handle);
2168         be_cmd_if_destroy(adapter, adapter->if_handle);
2169 do_none:
2170         return status;
2171 }
2172
2173 static int be_clear(struct be_adapter *adapter)
2174 {
2175         be_mcc_queues_destroy(adapter);
2176         be_rx_queues_destroy(adapter);
2177         be_tx_queues_destroy(adapter);
2178
2179         be_cmd_if_destroy(adapter, adapter->if_handle);
2180
2181         /* tell fw we're done with firing cmds */
2182         be_cmd_fw_clean(adapter);
2183         return 0;
2184 }
2185
2186
2187 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2188 char flash_cookie[2][16] =      {"*** SE FLAS",
2189                                 "H DIRECTORY *** "};
2190
2191 static bool be_flash_redboot(struct be_adapter *adapter,
2192                         const u8 *p, u32 img_start, int image_size,
2193                         int hdr_size)
2194 {
2195         u32 crc_offset;
2196         u8 flashed_crc[4];
2197         int status;
2198
2199         crc_offset = hdr_size + img_start + image_size - 4;
2200
2201         p += crc_offset;
2202
2203         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2204                         (image_size - 4));
2205         if (status) {
2206                 dev_err(&adapter->pdev->dev,
2207                 "could not get crc from flash, not flashing redboot\n");
2208                 return false;
2209         }
2210
2211         /*update redboot only if crc does not match*/
2212         if (!memcmp(flashed_crc, p, 4))
2213                 return false;
2214         else
2215                 return true;
2216 }
2217
2218 static int be_flash_data(struct be_adapter *adapter,
2219                         const struct firmware *fw,
2220                         struct be_dma_mem *flash_cmd, int num_of_images)
2221
2222 {
2223         int status = 0, i, filehdr_size = 0;
2224         u32 total_bytes = 0, flash_op;
2225         int num_bytes;
2226         const u8 *p = fw->data;
2227         struct be_cmd_write_flashrom *req = flash_cmd->va;
2228         struct flash_comp *pflashcomp;
2229         int num_comp;
2230
2231         struct flash_comp gen3_flash_types[9] = {
2232                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2233                         FLASH_IMAGE_MAX_SIZE_g3},
2234                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2235                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2236                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2237                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2238                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2239                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2240                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2241                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2242                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2243                         FLASH_IMAGE_MAX_SIZE_g3},
2244                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2245                         FLASH_IMAGE_MAX_SIZE_g3},
2246                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2247                         FLASH_IMAGE_MAX_SIZE_g3},
2248                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2249                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2250         };
2251         struct flash_comp gen2_flash_types[8] = {
2252                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2253                         FLASH_IMAGE_MAX_SIZE_g2},
2254                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2255                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2256                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2257                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2258                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2259                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2260                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2261                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2262                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2263                         FLASH_IMAGE_MAX_SIZE_g2},
2264                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2265                         FLASH_IMAGE_MAX_SIZE_g2},
2266                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2267                          FLASH_IMAGE_MAX_SIZE_g2}
2268         };
2269
2270         if (adapter->generation == BE_GEN3) {
2271                 pflashcomp = gen3_flash_types;
2272                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2273                 num_comp = 9;
2274         } else {
2275                 pflashcomp = gen2_flash_types;
2276                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2277                 num_comp = 8;
2278         }
2279         for (i = 0; i < num_comp; i++) {
2280                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2281                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2282                         continue;
2283                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2284                         (!be_flash_redboot(adapter, fw->data,
2285                          pflashcomp[i].offset, pflashcomp[i].size,
2286                          filehdr_size)))
2287                         continue;
2288                 p = fw->data;
2289                 p += filehdr_size + pflashcomp[i].offset
2290                         + (num_of_images * sizeof(struct image_hdr));
2291         if (p + pflashcomp[i].size > fw->data + fw->size)
2292                 return -1;
2293         total_bytes = pflashcomp[i].size;
2294                 while (total_bytes) {
2295                         if (total_bytes > 32*1024)
2296                                 num_bytes = 32*1024;
2297                         else
2298                                 num_bytes = total_bytes;
2299                         total_bytes -= num_bytes;
2300
2301                         if (!total_bytes)
2302                                 flash_op = FLASHROM_OPER_FLASH;
2303                         else
2304                                 flash_op = FLASHROM_OPER_SAVE;
2305                         memcpy(req->params.data_buf, p, num_bytes);
2306                         p += num_bytes;
2307                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2308                                 pflashcomp[i].optype, flash_op, num_bytes);
2309                         if (status) {
2310                                 dev_err(&adapter->pdev->dev,
2311                                         "cmd to write to flash rom failed.\n");
2312                                 return -1;
2313                         }
2314                         yield();
2315                 }
2316         }
2317         return 0;
2318 }
2319
2320 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2321 {
2322         if (fhdr == NULL)
2323                 return 0;
2324         if (fhdr->build[0] == '3')
2325                 return BE_GEN3;
2326         else if (fhdr->build[0] == '2')
2327                 return BE_GEN2;
2328         else
2329                 return 0;
2330 }
2331
2332 int be_load_fw(struct be_adapter *adapter, u8 *func)
2333 {
2334         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2335         const struct firmware *fw;
2336         struct flash_file_hdr_g2 *fhdr;
2337         struct flash_file_hdr_g3 *fhdr3;
2338         struct image_hdr *img_hdr_ptr = NULL;
2339         struct be_dma_mem flash_cmd;
2340         int status, i = 0, num_imgs = 0;
2341         const u8 *p;
2342
2343         strcpy(fw_file, func);
2344
2345         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2346         if (status)
2347                 goto fw_exit;
2348
2349         p = fw->data;
2350         fhdr = (struct flash_file_hdr_g2 *) p;
2351         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2352
2353         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2354         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2355                                         &flash_cmd.dma);
2356         if (!flash_cmd.va) {
2357                 status = -ENOMEM;
2358                 dev_err(&adapter->pdev->dev,
2359                         "Memory allocation failure while flashing\n");
2360                 goto fw_exit;
2361         }
2362
2363         if ((adapter->generation == BE_GEN3) &&
2364                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2365                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2366                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2367                 for (i = 0; i < num_imgs; i++) {
2368                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2369                                         (sizeof(struct flash_file_hdr_g3) +
2370                                          i * sizeof(struct image_hdr)));
2371                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2372                                 status = be_flash_data(adapter, fw, &flash_cmd,
2373                                                         num_imgs);
2374                 }
2375         } else if ((adapter->generation == BE_GEN2) &&
2376                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2377                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2378         } else {
2379                 dev_err(&adapter->pdev->dev,
2380                         "UFI and Interface are not compatible for flashing\n");
2381                 status = -1;
2382         }
2383
2384         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2385                                 flash_cmd.dma);
2386         if (status) {
2387                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2388                 goto fw_exit;
2389         }
2390
2391         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2392
2393 fw_exit:
2394         release_firmware(fw);
2395         return status;
2396 }
2397
2398 static struct net_device_ops be_netdev_ops = {
2399         .ndo_open               = be_open,
2400         .ndo_stop               = be_close,
2401         .ndo_start_xmit         = be_xmit,
2402         .ndo_get_stats          = be_get_stats,
2403         .ndo_set_rx_mode        = be_set_multicast_list,
2404         .ndo_set_mac_address    = be_mac_addr_set,
2405         .ndo_change_mtu         = be_change_mtu,
2406         .ndo_validate_addr      = eth_validate_addr,
2407         .ndo_vlan_rx_register   = be_vlan_register,
2408         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2409         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2410         .ndo_set_vf_mac         = be_set_vf_mac,
2411         .ndo_set_vf_vlan        = be_set_vf_vlan,
2412         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2413         .ndo_get_vf_config      = be_get_vf_config
2414 };
2415
2416 static void be_netdev_init(struct net_device *netdev)
2417 {
2418         struct be_adapter *adapter = netdev_priv(netdev);
2419
2420         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2421                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2422                 NETIF_F_GRO | NETIF_F_TSO6;
2423
2424         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2425
2426         netdev->flags |= IFF_MULTICAST;
2427
2428         adapter->rx_csum = true;
2429
2430         /* Default settings for Rx and Tx flow control */
2431         adapter->rx_fc = true;
2432         adapter->tx_fc = true;
2433
2434         netif_set_gso_max_size(netdev, 65535);
2435
2436         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2437
2438         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2439
2440         netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2441                 BE_NAPI_WEIGHT);
2442         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2443                 BE_NAPI_WEIGHT);
2444
2445         netif_carrier_off(netdev);
2446         netif_stop_queue(netdev);
2447 }
2448
2449 static void be_unmap_pci_bars(struct be_adapter *adapter)
2450 {
2451         if (adapter->csr)
2452                 iounmap(adapter->csr);
2453         if (adapter->db)
2454                 iounmap(adapter->db);
2455         if (adapter->pcicfg && be_physfn(adapter))
2456                 iounmap(adapter->pcicfg);
2457 }
2458
2459 static int be_map_pci_bars(struct be_adapter *adapter)
2460 {
2461         u8 __iomem *addr;
2462         int pcicfg_reg, db_reg;
2463
2464         if (be_physfn(adapter)) {
2465                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2466                                 pci_resource_len(adapter->pdev, 2));
2467                 if (addr == NULL)
2468                         return -ENOMEM;
2469                 adapter->csr = addr;
2470         }
2471
2472         if (adapter->generation == BE_GEN2) {
2473                 pcicfg_reg = 1;
2474                 db_reg = 4;
2475         } else {
2476                 pcicfg_reg = 0;
2477                 if (be_physfn(adapter))
2478                         db_reg = 4;
2479                 else
2480                         db_reg = 0;
2481         }
2482         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2483                                 pci_resource_len(adapter->pdev, db_reg));
2484         if (addr == NULL)
2485                 goto pci_map_err;
2486         adapter->db = addr;
2487
2488         if (be_physfn(adapter)) {
2489                 addr = ioremap_nocache(
2490                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2491                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2492                 if (addr == NULL)
2493                         goto pci_map_err;
2494                 adapter->pcicfg = addr;
2495         } else
2496                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2497
2498         return 0;
2499 pci_map_err:
2500         be_unmap_pci_bars(adapter);
2501         return -ENOMEM;
2502 }
2503
2504
2505 static void be_ctrl_cleanup(struct be_adapter *adapter)
2506 {
2507         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2508
2509         be_unmap_pci_bars(adapter);
2510
2511         if (mem->va)
2512                 pci_free_consistent(adapter->pdev, mem->size,
2513                         mem->va, mem->dma);
2514
2515         mem = &adapter->mc_cmd_mem;
2516         if (mem->va)
2517                 pci_free_consistent(adapter->pdev, mem->size,
2518                         mem->va, mem->dma);
2519 }
2520
2521 static int be_ctrl_init(struct be_adapter *adapter)
2522 {
2523         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2524         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2525         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2526         int status;
2527
2528         status = be_map_pci_bars(adapter);
2529         if (status)
2530                 goto done;
2531
2532         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2533         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2534                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2535         if (!mbox_mem_alloc->va) {
2536                 status = -ENOMEM;
2537                 goto unmap_pci_bars;
2538         }
2539
2540         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2541         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2542         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2543         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2544
2545         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2546         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2547                         &mc_cmd_mem->dma);
2548         if (mc_cmd_mem->va == NULL) {
2549                 status = -ENOMEM;
2550                 goto free_mbox;
2551         }
2552         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2553
2554         spin_lock_init(&adapter->mbox_lock);
2555         spin_lock_init(&adapter->mcc_lock);
2556         spin_lock_init(&adapter->mcc_cq_lock);
2557
2558         init_completion(&adapter->flash_compl);
2559         pci_save_state(adapter->pdev);
2560         return 0;
2561
2562 free_mbox:
2563         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2564                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2565
2566 unmap_pci_bars:
2567         be_unmap_pci_bars(adapter);
2568
2569 done:
2570         return status;
2571 }
2572
2573 static void be_stats_cleanup(struct be_adapter *adapter)
2574 {
2575         struct be_stats_obj *stats = &adapter->stats;
2576         struct be_dma_mem *cmd = &stats->cmd;
2577
2578         if (cmd->va)
2579                 pci_free_consistent(adapter->pdev, cmd->size,
2580                         cmd->va, cmd->dma);
2581 }
2582
2583 static int be_stats_init(struct be_adapter *adapter)
2584 {
2585         struct be_stats_obj *stats = &adapter->stats;
2586         struct be_dma_mem *cmd = &stats->cmd;
2587
2588         cmd->size = sizeof(struct be_cmd_req_get_stats);
2589         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2590         if (cmd->va == NULL)
2591                 return -1;
2592         memset(cmd->va, 0, cmd->size);
2593         return 0;
2594 }
2595
2596 static void __devexit be_remove(struct pci_dev *pdev)
2597 {
2598         struct be_adapter *adapter = pci_get_drvdata(pdev);
2599
2600         if (!adapter)
2601                 return;
2602
2603         unregister_netdev(adapter->netdev);
2604
2605         be_clear(adapter);
2606
2607         be_stats_cleanup(adapter);
2608
2609         be_ctrl_cleanup(adapter);
2610
2611         be_sriov_disable(adapter);
2612
2613         be_msix_disable(adapter);
2614
2615         pci_set_drvdata(pdev, NULL);
2616         pci_release_regions(pdev);
2617         pci_disable_device(pdev);
2618
2619         free_netdev(adapter->netdev);
2620 }
2621
2622 static int be_get_config(struct be_adapter *adapter)
2623 {
2624         int status;
2625         u8 mac[ETH_ALEN];
2626
2627         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2628         if (status)
2629                 return status;
2630
2631         status = be_cmd_query_fw_cfg(adapter,
2632                                 &adapter->port_num, &adapter->function_mode);
2633         if (status)
2634                 return status;
2635
2636         memset(mac, 0, ETH_ALEN);
2637
2638         if (be_physfn(adapter)) {
2639                 status = be_cmd_mac_addr_query(adapter, mac,
2640                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2641
2642                 if (status)
2643                         return status;
2644
2645                 if (!is_valid_ether_addr(mac))
2646                         return -EADDRNOTAVAIL;
2647
2648                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2649                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2650         }
2651
2652         if (adapter->function_mode & 0x400)
2653                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2654         else
2655                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2656
2657         return 0;
2658 }
2659
2660 static int __devinit be_probe(struct pci_dev *pdev,
2661                         const struct pci_device_id *pdev_id)
2662 {
2663         int status = 0;
2664         struct be_adapter *adapter;
2665         struct net_device *netdev;
2666
2667
2668         status = pci_enable_device(pdev);
2669         if (status)
2670                 goto do_none;
2671
2672         status = pci_request_regions(pdev, DRV_NAME);
2673         if (status)
2674                 goto disable_dev;
2675         pci_set_master(pdev);
2676
2677         netdev = alloc_etherdev(sizeof(struct be_adapter));
2678         if (netdev == NULL) {
2679                 status = -ENOMEM;
2680                 goto rel_reg;
2681         }
2682         adapter = netdev_priv(netdev);
2683
2684         switch (pdev->device) {
2685         case BE_DEVICE_ID1:
2686         case OC_DEVICE_ID1:
2687                 adapter->generation = BE_GEN2;
2688                 break;
2689         case BE_DEVICE_ID2:
2690         case OC_DEVICE_ID2:
2691                 adapter->generation = BE_GEN3;
2692                 break;
2693         default:
2694                 adapter->generation = 0;
2695         }
2696
2697         adapter->pdev = pdev;
2698         pci_set_drvdata(pdev, adapter);
2699         adapter->netdev = netdev;
2700         be_netdev_init(netdev);
2701         SET_NETDEV_DEV(netdev, &pdev->dev);
2702
2703         be_msix_enable(adapter);
2704
2705         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2706         if (!status) {
2707                 netdev->features |= NETIF_F_HIGHDMA;
2708         } else {
2709                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2710                 if (status) {
2711                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2712                         goto free_netdev;
2713                 }
2714         }
2715
2716         be_sriov_enable(adapter);
2717
2718         status = be_ctrl_init(adapter);
2719         if (status)
2720                 goto free_netdev;
2721
2722         /* sync up with fw's ready state */
2723         if (be_physfn(adapter)) {
2724                 status = be_cmd_POST(adapter);
2725                 if (status)
2726                         goto ctrl_clean;
2727         }
2728
2729         /* tell fw we're ready to fire cmds */
2730         status = be_cmd_fw_init(adapter);
2731         if (status)
2732                 goto ctrl_clean;
2733
2734         if (be_physfn(adapter)) {
2735                 status = be_cmd_reset_function(adapter);
2736                 if (status)
2737                         goto ctrl_clean;
2738         }
2739
2740         status = be_stats_init(adapter);
2741         if (status)
2742                 goto ctrl_clean;
2743
2744         status = be_get_config(adapter);
2745         if (status)
2746                 goto stats_clean;
2747
2748         INIT_DELAYED_WORK(&adapter->work, be_worker);
2749
2750         status = be_setup(adapter);
2751         if (status)
2752                 goto stats_clean;
2753
2754         status = register_netdev(netdev);
2755         if (status != 0)
2756                 goto unsetup;
2757
2758         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2759         return 0;
2760
2761 unsetup:
2762         be_clear(adapter);
2763 stats_clean:
2764         be_stats_cleanup(adapter);
2765 ctrl_clean:
2766         be_ctrl_cleanup(adapter);
2767 free_netdev:
2768         be_msix_disable(adapter);
2769         be_sriov_disable(adapter);
2770         free_netdev(adapter->netdev);
2771         pci_set_drvdata(pdev, NULL);
2772 rel_reg:
2773         pci_release_regions(pdev);
2774 disable_dev:
2775         pci_disable_device(pdev);
2776 do_none:
2777         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2778         return status;
2779 }
2780
2781 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2782 {
2783         struct be_adapter *adapter = pci_get_drvdata(pdev);
2784         struct net_device *netdev =  adapter->netdev;
2785
2786         if (adapter->wol)
2787                 be_setup_wol(adapter, true);
2788
2789         netif_device_detach(netdev);
2790         if (netif_running(netdev)) {
2791                 rtnl_lock();
2792                 be_close(netdev);
2793                 rtnl_unlock();
2794         }
2795         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2796         be_clear(adapter);
2797
2798         pci_save_state(pdev);
2799         pci_disable_device(pdev);
2800         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2801         return 0;
2802 }
2803
2804 static int be_resume(struct pci_dev *pdev)
2805 {
2806         int status = 0;
2807         struct be_adapter *adapter = pci_get_drvdata(pdev);
2808         struct net_device *netdev =  adapter->netdev;
2809
2810         netif_device_detach(netdev);
2811
2812         status = pci_enable_device(pdev);
2813         if (status)
2814                 return status;
2815
2816         pci_set_power_state(pdev, 0);
2817         pci_restore_state(pdev);
2818
2819         /* tell fw we're ready to fire cmds */
2820         status = be_cmd_fw_init(adapter);
2821         if (status)
2822                 return status;
2823
2824         be_setup(adapter);
2825         if (netif_running(netdev)) {
2826                 rtnl_lock();
2827                 be_open(netdev);
2828                 rtnl_unlock();
2829         }
2830         netif_device_attach(netdev);
2831
2832         if (adapter->wol)
2833                 be_setup_wol(adapter, false);
2834         return 0;
2835 }
2836
2837 /*
2838  * An FLR will stop BE from DMAing any data.
2839  */
2840 static void be_shutdown(struct pci_dev *pdev)
2841 {
2842         struct be_adapter *adapter = pci_get_drvdata(pdev);
2843         struct net_device *netdev =  adapter->netdev;
2844
2845         netif_device_detach(netdev);
2846
2847         be_cmd_reset_function(adapter);
2848
2849         if (adapter->wol)
2850                 be_setup_wol(adapter, true);
2851
2852         pci_disable_device(pdev);
2853 }
2854
2855 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2856                                 pci_channel_state_t state)
2857 {
2858         struct be_adapter *adapter = pci_get_drvdata(pdev);
2859         struct net_device *netdev =  adapter->netdev;
2860
2861         dev_err(&adapter->pdev->dev, "EEH error detected\n");
2862
2863         adapter->eeh_err = true;
2864
2865         netif_device_detach(netdev);
2866
2867         if (netif_running(netdev)) {
2868                 rtnl_lock();
2869                 be_close(netdev);
2870                 rtnl_unlock();
2871         }
2872         be_clear(adapter);
2873
2874         if (state == pci_channel_io_perm_failure)
2875                 return PCI_ERS_RESULT_DISCONNECT;
2876
2877         pci_disable_device(pdev);
2878
2879         return PCI_ERS_RESULT_NEED_RESET;
2880 }
2881
2882 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2883 {
2884         struct be_adapter *adapter = pci_get_drvdata(pdev);
2885         int status;
2886
2887         dev_info(&adapter->pdev->dev, "EEH reset\n");
2888         adapter->eeh_err = false;
2889
2890         status = pci_enable_device(pdev);
2891         if (status)
2892                 return PCI_ERS_RESULT_DISCONNECT;
2893
2894         pci_set_master(pdev);
2895         pci_set_power_state(pdev, 0);
2896         pci_restore_state(pdev);
2897
2898         /* Check if card is ok and fw is ready */
2899         status = be_cmd_POST(adapter);
2900         if (status)
2901                 return PCI_ERS_RESULT_DISCONNECT;
2902
2903         return PCI_ERS_RESULT_RECOVERED;
2904 }
2905
2906 static void be_eeh_resume(struct pci_dev *pdev)
2907 {
2908         int status = 0;
2909         struct be_adapter *adapter = pci_get_drvdata(pdev);
2910         struct net_device *netdev =  adapter->netdev;
2911
2912         dev_info(&adapter->pdev->dev, "EEH resume\n");
2913
2914         pci_save_state(pdev);
2915
2916         /* tell fw we're ready to fire cmds */
2917         status = be_cmd_fw_init(adapter);
2918         if (status)
2919                 goto err;
2920
2921         status = be_setup(adapter);
2922         if (status)
2923                 goto err;
2924
2925         if (netif_running(netdev)) {
2926                 status = be_open(netdev);
2927                 if (status)
2928                         goto err;
2929         }
2930         netif_device_attach(netdev);
2931         return;
2932 err:
2933         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2934 }
2935
2936 static struct pci_error_handlers be_eeh_handlers = {
2937         .error_detected = be_eeh_err_detected,
2938         .slot_reset = be_eeh_reset,
2939         .resume = be_eeh_resume,
2940 };
2941
2942 static struct pci_driver be_driver = {
2943         .name = DRV_NAME,
2944         .id_table = be_dev_ids,
2945         .probe = be_probe,
2946         .remove = be_remove,
2947         .suspend = be_suspend,
2948         .resume = be_resume,
2949         .shutdown = be_shutdown,
2950         .err_handler = &be_eeh_handlers
2951 };
2952
2953 static int __init be_init_module(void)
2954 {
2955         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2956             rx_frag_size != 2048) {
2957                 printk(KERN_WARNING DRV_NAME
2958                         " : Module param rx_frag_size must be 2048/4096/8192."
2959                         " Using 2048\n");
2960                 rx_frag_size = 2048;
2961         }
2962
2963         if (num_vfs > 32) {
2964                 printk(KERN_WARNING DRV_NAME
2965                         " : Module param num_vfs must not be greater than 32."
2966                         "Using 32\n");
2967                 num_vfs = 32;
2968         }
2969
2970         return pci_register_driver(&be_driver);
2971 }
2972 module_init(be_init_module);
2973
2974 static void __exit be_exit_module(void)
2975 {
2976         pci_unregister_driver(&be_driver);
2977 }
2978 module_exit(be_exit_module);