Merge commit 'v2.6.36' into kbuild/misc
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
36         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
38         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
40         { 0 }
41 };
42 MODULE_DEVICE_TABLE(pci, be_dev_ids);
43 /* UE Status Low CSR */
44 static char *ue_status_low_desc[] = {
45         "CEV",
46         "CTX",
47         "DBUF",
48         "ERX",
49         "Host",
50         "MPU",
51         "NDMA",
52         "PTC ",
53         "RDMA ",
54         "RXF ",
55         "RXIPS ",
56         "RXULP0 ",
57         "RXULP1 ",
58         "RXULP2 ",
59         "TIM ",
60         "TPOST ",
61         "TPRE ",
62         "TXIPS ",
63         "TXULP0 ",
64         "TXULP1 ",
65         "UC ",
66         "WDMA ",
67         "TXULP2 ",
68         "HOST1 ",
69         "P0_OB_LINK ",
70         "P1_OB_LINK ",
71         "HOST_GPIO ",
72         "MBOX ",
73         "AXGMAC0",
74         "AXGMAC1",
75         "JTAG",
76         "MPU_INTPEND"
77 };
78 /* UE Status High CSR */
79 static char *ue_status_hi_desc[] = {
80         "LPCMEMHOST",
81         "MGMT_MAC",
82         "PCS0ONLINE",
83         "MPU_IRAM",
84         "PCS1ONLINE",
85         "PCTL0",
86         "PCTL1",
87         "PMEM",
88         "RR",
89         "TXPB",
90         "RXPP",
91         "XAUI",
92         "TXP",
93         "ARM",
94         "IPC",
95         "HOST2",
96         "HOST3",
97         "HOST4",
98         "HOST5",
99         "HOST6",
100         "HOST7",
101         "HOST8",
102         "HOST9",
103         "NETC"
104         "Unknown",
105         "Unknown",
106         "Unknown",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown"
112 };
113
114 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115 {
116         struct be_dma_mem *mem = &q->dma_mem;
117         if (mem->va)
118                 pci_free_consistent(adapter->pdev, mem->size,
119                         mem->va, mem->dma);
120 }
121
122 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
123                 u16 len, u16 entry_size)
124 {
125         struct be_dma_mem *mem = &q->dma_mem;
126
127         memset(q, 0, sizeof(*q));
128         q->len = len;
129         q->entry_size = entry_size;
130         mem->size = len * entry_size;
131         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
132         if (!mem->va)
133                 return -1;
134         memset(mem->va, 0, mem->size);
135         return 0;
136 }
137
138 static void be_intr_set(struct be_adapter *adapter, bool enable)
139 {
140         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
141         u32 reg = ioread32(addr);
142         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
143
144         if (adapter->eeh_err)
145                 return;
146
147         if (!enabled && enable)
148                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149         else if (enabled && !enable)
150                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151         else
152                 return;
153
154         iowrite32(reg, addr);
155 }
156
157 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
158 {
159         u32 val = 0;
160         val |= qid & DB_RQ_RING_ID_MASK;
161         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
162
163         wmb();
164         iowrite32(val, adapter->db + DB_RQ_OFFSET);
165 }
166
167 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
168 {
169         u32 val = 0;
170         val |= qid & DB_TXULP_RING_ID_MASK;
171         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
172
173         wmb();
174         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
175 }
176
177 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
178                 bool arm, bool clear_int, u16 num_popped)
179 {
180         u32 val = 0;
181         val |= qid & DB_EQ_RING_ID_MASK;
182
183         if (adapter->eeh_err)
184                 return;
185
186         if (arm)
187                 val |= 1 << DB_EQ_REARM_SHIFT;
188         if (clear_int)
189                 val |= 1 << DB_EQ_CLR_SHIFT;
190         val |= 1 << DB_EQ_EVNT_SHIFT;
191         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
192         iowrite32(val, adapter->db + DB_EQ_OFFSET);
193 }
194
195 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
196 {
197         u32 val = 0;
198         val |= qid & DB_CQ_RING_ID_MASK;
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_CQ_REARM_SHIFT;
205         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
206         iowrite32(val, adapter->db + DB_CQ_OFFSET);
207 }
208
209 static int be_mac_addr_set(struct net_device *netdev, void *p)
210 {
211         struct be_adapter *adapter = netdev_priv(netdev);
212         struct sockaddr *addr = p;
213         int status = 0;
214
215         if (!is_valid_ether_addr(addr->sa_data))
216                 return -EADDRNOTAVAIL;
217
218         /* MAC addr configuration will be done in hardware for VFs
219          * by their corresponding PFs. Just copy to netdev addr here
220          */
221         if (!be_physfn(adapter))
222                 goto netdev_addr;
223
224         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
225         if (status)
226                 return status;
227
228         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
229                         adapter->if_handle, &adapter->pmac_id);
230 netdev_addr:
231         if (!status)
232                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
233
234         return status;
235 }
236
237 void netdev_stats_update(struct be_adapter *adapter)
238 {
239         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
240         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241         struct be_port_rxf_stats *port_stats =
242                         &rxf_stats->port[adapter->port_num];
243         struct net_device_stats *dev_stats = &adapter->netdev->stats;
244         struct be_erx_stats *erx_stats = &hw_stats->erx;
245
246         dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
247         dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248         dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249         dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
250         dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
251
252         /* bad pkts received */
253         dev_stats->rx_errors = port_stats->rx_crc_errors +
254                 port_stats->rx_alignment_symbol_errors +
255                 port_stats->rx_in_range_errors +
256                 port_stats->rx_out_range_errors +
257                 port_stats->rx_frame_too_long +
258                 port_stats->rx_dropped_too_small +
259                 port_stats->rx_dropped_too_short +
260                 port_stats->rx_dropped_header_too_small +
261                 port_stats->rx_dropped_tcp_length +
262                 port_stats->rx_dropped_runt +
263                 port_stats->rx_tcp_checksum_errs +
264                 port_stats->rx_ip_checksum_errs +
265                 port_stats->rx_udp_checksum_errs;
266
267         /*  no space in linux buffers: best possible approximation */
268         dev_stats->rx_dropped =
269                 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
270
271         /* detailed rx errors */
272         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
273                 port_stats->rx_out_range_errors +
274                 port_stats->rx_frame_too_long;
275
276         /* receive ring buffer overflow */
277         dev_stats->rx_over_errors = 0;
278
279         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
280
281         /* frame alignment errors */
282         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
283
284         /* receiver fifo overrun */
285         /* drops_no_pbuf is no per i/f, it's per BE card */
286         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
287                                         port_stats->rx_input_fifo_overflow +
288                                         rxf_stats->rx_drops_no_pbuf;
289         /* receiver missed packetd */
290         dev_stats->rx_missed_errors = 0;
291
292         /*  packet transmit problems */
293         dev_stats->tx_errors = 0;
294
295         /* no space available in linux */
296         dev_stats->tx_dropped = 0;
297
298         dev_stats->collisions = 0;
299
300         /* detailed tx_errors */
301         dev_stats->tx_aborted_errors = 0;
302         dev_stats->tx_carrier_errors = 0;
303         dev_stats->tx_fifo_errors = 0;
304         dev_stats->tx_heartbeat_errors = 0;
305         dev_stats->tx_window_errors = 0;
306 }
307
308 void be_link_status_update(struct be_adapter *adapter, bool link_up)
309 {
310         struct net_device *netdev = adapter->netdev;
311
312         /* If link came up or went down */
313         if (adapter->link_up != link_up) {
314                 adapter->link_speed = -1;
315                 if (link_up) {
316                         netif_start_queue(netdev);
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_stop_queue(netdev);
321                         netif_carrier_off(netdev);
322                         printk(KERN_INFO "%s: Link down\n", netdev->name);
323                 }
324                 adapter->link_up = link_up;
325         }
326 }
327
328 /* Update the EQ delay n BE based on the RX frags consumed / sec */
329 static void be_rx_eqd_update(struct be_adapter *adapter)
330 {
331         struct be_eq_obj *rx_eq = &adapter->rx_eq;
332         struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
333         ulong now = jiffies;
334         u32 eqd;
335
336         if (!rx_eq->enable_aic)
337                 return;
338
339         /* Wrapped around */
340         if (time_before(now, stats->rx_fps_jiffies)) {
341                 stats->rx_fps_jiffies = now;
342                 return;
343         }
344
345         /* Update once a second */
346         if ((now - stats->rx_fps_jiffies) < HZ)
347                 return;
348
349         stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
350                         ((now - stats->rx_fps_jiffies) / HZ);
351
352         stats->rx_fps_jiffies = now;
353         stats->be_prev_rx_frags = stats->be_rx_frags;
354         eqd = stats->be_rx_fps / 110000;
355         eqd = eqd << 3;
356         if (eqd > rx_eq->max_eqd)
357                 eqd = rx_eq->max_eqd;
358         if (eqd < rx_eq->min_eqd)
359                 eqd = rx_eq->min_eqd;
360         if (eqd < 10)
361                 eqd = 0;
362         if (eqd != rx_eq->cur_eqd)
363                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
364
365         rx_eq->cur_eqd = eqd;
366 }
367
368 static struct net_device_stats *be_get_stats(struct net_device *dev)
369 {
370         return &dev->stats;
371 }
372
373 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374 {
375         u64 rate = bytes;
376
377         do_div(rate, ticks / HZ);
378         rate <<= 3;                     /* bytes/sec -> bits/sec */
379         do_div(rate, 1000000ul);        /* MB/Sec */
380
381         return rate;
382 }
383
384 static void be_tx_rate_update(struct be_adapter *adapter)
385 {
386         struct be_drvr_stats *stats = drvr_stats(adapter);
387         ulong now = jiffies;
388
389         /* Wrapped around? */
390         if (time_before(now, stats->be_tx_jiffies)) {
391                 stats->be_tx_jiffies = now;
392                 return;
393         }
394
395         /* Update tx rate once in two seconds */
396         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
397                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
398                                                   - stats->be_tx_bytes_prev,
399                                                  now - stats->be_tx_jiffies);
400                 stats->be_tx_jiffies = now;
401                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
402         }
403 }
404
405 static void be_tx_stats_update(struct be_adapter *adapter,
406                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
407 {
408         struct be_drvr_stats *stats = drvr_stats(adapter);
409         stats->be_tx_reqs++;
410         stats->be_tx_wrbs += wrb_cnt;
411         stats->be_tx_bytes += copied;
412         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
413         if (stopped)
414                 stats->be_tx_stops++;
415 }
416
417 /* Determine number of WRB entries needed to xmit data in an skb */
418 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
419 {
420         int cnt = (skb->len > skb->data_len);
421
422         cnt += skb_shinfo(skb)->nr_frags;
423
424         /* to account for hdr wrb */
425         cnt++;
426         if (cnt & 1) {
427                 /* add a dummy to make it an even num */
428                 cnt++;
429                 *dummy = true;
430         } else
431                 *dummy = false;
432         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
433         return cnt;
434 }
435
436 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
437 {
438         wrb->frag_pa_hi = upper_32_bits(addr);
439         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
440         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
441 }
442
443 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
444                 bool vlan, u32 wrb_cnt, u32 len)
445 {
446         memset(hdr, 0, sizeof(*hdr));
447
448         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
450         if (skb_is_gso(skb)) {
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453                         hdr, skb_shinfo(skb)->gso_size);
454                 if (skb_is_gso_v6(skb))
455                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
456         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
457                 if (is_tcp_pkt(skb))
458                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
459                 else if (is_udp_pkt(skb))
460                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
461         }
462
463         if (vlan && vlan_tx_tag_present(skb)) {
464                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
465                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
466                         hdr, vlan_tx_tag_get(skb));
467         }
468
469         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
470         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
471         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
472         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
473 }
474
475 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
476                 bool unmap_single)
477 {
478         dma_addr_t dma;
479
480         be_dws_le_to_cpu(wrb, sizeof(*wrb));
481
482         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
483         if (wrb->frag_len) {
484                 if (unmap_single)
485                         pci_unmap_single(pdev, dma, wrb->frag_len,
486                                 PCI_DMA_TODEVICE);
487                 else
488                         pci_unmap_page(pdev, dma, wrb->frag_len,
489                                 PCI_DMA_TODEVICE);
490         }
491 }
492
493 static int make_tx_wrbs(struct be_adapter *adapter,
494                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
495 {
496         dma_addr_t busaddr;
497         int i, copied = 0;
498         struct pci_dev *pdev = adapter->pdev;
499         struct sk_buff *first_skb = skb;
500         struct be_queue_info *txq = &adapter->tx_obj.q;
501         struct be_eth_wrb *wrb;
502         struct be_eth_hdr_wrb *hdr;
503         bool map_single = false;
504         u16 map_head;
505
506         hdr = queue_head_node(txq);
507         queue_head_inc(txq);
508         map_head = txq->head;
509
510         if (skb->len > skb->data_len) {
511                 int len = skb_headlen(skb);
512                 busaddr = pci_map_single(pdev, skb->data, len,
513                                          PCI_DMA_TODEVICE);
514                 if (pci_dma_mapping_error(pdev, busaddr))
515                         goto dma_err;
516                 map_single = true;
517                 wrb = queue_head_node(txq);
518                 wrb_fill(wrb, busaddr, len);
519                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
520                 queue_head_inc(txq);
521                 copied += len;
522         }
523
524         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
525                 struct skb_frag_struct *frag =
526                         &skb_shinfo(skb)->frags[i];
527                 busaddr = pci_map_page(pdev, frag->page,
528                                        frag->page_offset,
529                                        frag->size, PCI_DMA_TODEVICE);
530                 if (pci_dma_mapping_error(pdev, busaddr))
531                         goto dma_err;
532                 wrb = queue_head_node(txq);
533                 wrb_fill(wrb, busaddr, frag->size);
534                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
535                 queue_head_inc(txq);
536                 copied += frag->size;
537         }
538
539         if (dummy_wrb) {
540                 wrb = queue_head_node(txq);
541                 wrb_fill(wrb, 0, 0);
542                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
543                 queue_head_inc(txq);
544         }
545
546         wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
547                 wrb_cnt, copied);
548         be_dws_cpu_to_le(hdr, sizeof(*hdr));
549
550         return copied;
551 dma_err:
552         txq->head = map_head;
553         while (copied) {
554                 wrb = queue_head_node(txq);
555                 unmap_tx_frag(pdev, wrb, map_single);
556                 map_single = false;
557                 copied -= wrb->frag_len;
558                 queue_head_inc(txq);
559         }
560         return 0;
561 }
562
563 static netdev_tx_t be_xmit(struct sk_buff *skb,
564                         struct net_device *netdev)
565 {
566         struct be_adapter *adapter = netdev_priv(netdev);
567         struct be_tx_obj *tx_obj = &adapter->tx_obj;
568         struct be_queue_info *txq = &tx_obj->q;
569         u32 wrb_cnt = 0, copied = 0;
570         u32 start = txq->head;
571         bool dummy_wrb, stopped = false;
572
573         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
574
575         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
576         if (copied) {
577                 /* record the sent skb in the sent_skb table */
578                 BUG_ON(tx_obj->sent_skb_list[start]);
579                 tx_obj->sent_skb_list[start] = skb;
580
581                 /* Ensure txq has space for the next skb; Else stop the queue
582                  * *BEFORE* ringing the tx doorbell, so that we serialze the
583                  * tx compls of the current transmit which'll wake up the queue
584                  */
585                 atomic_add(wrb_cnt, &txq->used);
586                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
587                                                                 txq->len) {
588                         netif_stop_queue(netdev);
589                         stopped = true;
590                 }
591
592                 be_txq_notify(adapter, txq->id, wrb_cnt);
593
594                 be_tx_stats_update(adapter, wrb_cnt, copied,
595                                 skb_shinfo(skb)->gso_segs, stopped);
596         } else {
597                 txq->head = start;
598                 dev_kfree_skb_any(skb);
599         }
600         return NETDEV_TX_OK;
601 }
602
603 static int be_change_mtu(struct net_device *netdev, int new_mtu)
604 {
605         struct be_adapter *adapter = netdev_priv(netdev);
606         if (new_mtu < BE_MIN_MTU ||
607                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
608                                         (ETH_HLEN + ETH_FCS_LEN))) {
609                 dev_info(&adapter->pdev->dev,
610                         "MTU must be between %d and %d bytes\n",
611                         BE_MIN_MTU,
612                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
613                 return -EINVAL;
614         }
615         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
616                         netdev->mtu, new_mtu);
617         netdev->mtu = new_mtu;
618         return 0;
619 }
620
621 /*
622  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
623  * If the user configures more, place BE in vlan promiscuous mode.
624  */
625 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
626 {
627         u16 vtag[BE_NUM_VLANS_SUPPORTED];
628         u16 ntags = 0, i;
629         int status = 0;
630         u32 if_handle;
631
632         if (vf) {
633                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
634                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
635                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
636         }
637
638         if (adapter->vlans_added <= adapter->max_vlans)  {
639                 /* Construct VLAN Table to give to HW */
640                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
641                         if (adapter->vlan_tag[i]) {
642                                 vtag[ntags] = cpu_to_le16(i);
643                                 ntags++;
644                         }
645                 }
646                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
647                                         vtag, ntags, 1, 0);
648         } else {
649                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
650                                         NULL, 0, 1, 1);
651         }
652
653         return status;
654 }
655
656 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
657 {
658         struct be_adapter *adapter = netdev_priv(netdev);
659         struct be_eq_obj *rx_eq = &adapter->rx_eq;
660         struct be_eq_obj *tx_eq = &adapter->tx_eq;
661
662         be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
663         be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
664         adapter->vlan_grp = grp;
665         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
666         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
667 }
668
669 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
670 {
671         struct be_adapter *adapter = netdev_priv(netdev);
672
673         adapter->vlans_added++;
674         if (!be_physfn(adapter))
675                 return;
676
677         adapter->vlan_tag[vid] = 1;
678         if (adapter->vlans_added <= (adapter->max_vlans + 1))
679                 be_vid_config(adapter, false, 0);
680 }
681
682 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
683 {
684         struct be_adapter *adapter = netdev_priv(netdev);
685
686         adapter->vlans_added--;
687         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688
689         if (!be_physfn(adapter))
690                 return;
691
692         adapter->vlan_tag[vid] = 0;
693         if (adapter->vlans_added <= adapter->max_vlans)
694                 be_vid_config(adapter, false, 0);
695 }
696
697 static void be_set_multicast_list(struct net_device *netdev)
698 {
699         struct be_adapter *adapter = netdev_priv(netdev);
700
701         if (netdev->flags & IFF_PROMISC) {
702                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
703                 adapter->promiscuous = true;
704                 goto done;
705         }
706
707         /* BE was previously in promiscous mode; disable it */
708         if (adapter->promiscuous) {
709                 adapter->promiscuous = false;
710                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
711         }
712
713         /* Enable multicast promisc if num configured exceeds what we support */
714         if (netdev->flags & IFF_ALLMULTI ||
715             netdev_mc_count(netdev) > BE_MAX_MC) {
716                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
717                                 &adapter->mc_cmd_mem);
718                 goto done;
719         }
720
721         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
722                 &adapter->mc_cmd_mem);
723 done:
724         return;
725 }
726
727 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
728 {
729         struct be_adapter *adapter = netdev_priv(netdev);
730         int status;
731
732         if (!adapter->sriov_enabled)
733                 return -EPERM;
734
735         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
736                 return -EINVAL;
737
738         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
739                 status = be_cmd_pmac_del(adapter,
740                                         adapter->vf_cfg[vf].vf_if_handle,
741                                         adapter->vf_cfg[vf].vf_pmac_id);
742
743         status = be_cmd_pmac_add(adapter, mac,
744                                 adapter->vf_cfg[vf].vf_if_handle,
745                                 &adapter->vf_cfg[vf].vf_pmac_id);
746
747         if (status)
748                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
749                                 mac, vf);
750         else
751                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
752
753         return status;
754 }
755
756 static int be_get_vf_config(struct net_device *netdev, int vf,
757                         struct ifla_vf_info *vi)
758 {
759         struct be_adapter *adapter = netdev_priv(netdev);
760
761         if (!adapter->sriov_enabled)
762                 return -EPERM;
763
764         if (vf >= num_vfs)
765                 return -EINVAL;
766
767         vi->vf = vf;
768         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
769         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
770         vi->qos = 0;
771         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
772
773         return 0;
774 }
775
776 static int be_set_vf_vlan(struct net_device *netdev,
777                         int vf, u16 vlan, u8 qos)
778 {
779         struct be_adapter *adapter = netdev_priv(netdev);
780         int status = 0;
781
782         if (!adapter->sriov_enabled)
783                 return -EPERM;
784
785         if ((vf >= num_vfs) || (vlan > 4095))
786                 return -EINVAL;
787
788         if (vlan) {
789                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
790                 adapter->vlans_added++;
791         } else {
792                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
793                 adapter->vlans_added--;
794         }
795
796         status = be_vid_config(adapter, true, vf);
797
798         if (status)
799                 dev_info(&adapter->pdev->dev,
800                                 "VLAN %d config on VF %d failed\n", vlan, vf);
801         return status;
802 }
803
804 static int be_set_vf_tx_rate(struct net_device *netdev,
805                         int vf, int rate)
806 {
807         struct be_adapter *adapter = netdev_priv(netdev);
808         int status = 0;
809
810         if (!adapter->sriov_enabled)
811                 return -EPERM;
812
813         if ((vf >= num_vfs) || (rate < 0))
814                 return -EINVAL;
815
816         if (rate > 10000)
817                 rate = 10000;
818
819         adapter->vf_cfg[vf].vf_tx_rate = rate;
820         status = be_cmd_set_qos(adapter, rate / 10, vf);
821
822         if (status)
823                 dev_info(&adapter->pdev->dev,
824                                 "tx rate %d on VF %d failed\n", rate, vf);
825         return status;
826 }
827
828 static void be_rx_rate_update(struct be_adapter *adapter)
829 {
830         struct be_drvr_stats *stats = drvr_stats(adapter);
831         ulong now = jiffies;
832
833         /* Wrapped around */
834         if (time_before(now, stats->be_rx_jiffies)) {
835                 stats->be_rx_jiffies = now;
836                 return;
837         }
838
839         /* Update the rate once in two seconds */
840         if ((now - stats->be_rx_jiffies) < 2 * HZ)
841                 return;
842
843         stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
844                                           - stats->be_rx_bytes_prev,
845                                          now - stats->be_rx_jiffies);
846         stats->be_rx_jiffies = now;
847         stats->be_rx_bytes_prev = stats->be_rx_bytes;
848 }
849
850 static void be_rx_stats_update(struct be_adapter *adapter,
851                 u32 pktsize, u16 numfrags, u8 pkt_type)
852 {
853         struct be_drvr_stats *stats = drvr_stats(adapter);
854
855         stats->be_rx_compl++;
856         stats->be_rx_frags += numfrags;
857         stats->be_rx_bytes += pktsize;
858         stats->be_rx_pkts++;
859
860         if (pkt_type == BE_MULTICAST_PACKET)
861                 stats->be_rx_mcast_pkt++;
862 }
863
864 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
865 {
866         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
867
868         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
869         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
870         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
871         if (ip_version) {
872                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
873                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
874         }
875         ipv6_chk = (ip_version && (tcpf || udpf));
876
877         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
878 }
879
880 static struct be_rx_page_info *
881 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
882 {
883         struct be_rx_page_info *rx_page_info;
884         struct be_queue_info *rxq = &adapter->rx_obj.q;
885
886         rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
887         BUG_ON(!rx_page_info->page);
888
889         if (rx_page_info->last_page_user) {
890                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
891                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
892                 rx_page_info->last_page_user = false;
893         }
894
895         atomic_dec(&rxq->used);
896         return rx_page_info;
897 }
898
899 /* Throwaway the data in the Rx completion */
900 static void be_rx_compl_discard(struct be_adapter *adapter,
901                         struct be_eth_rx_compl *rxcp)
902 {
903         struct be_queue_info *rxq = &adapter->rx_obj.q;
904         struct be_rx_page_info *page_info;
905         u16 rxq_idx, i, num_rcvd;
906
907         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
908         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
909
910         for (i = 0; i < num_rcvd; i++) {
911                 page_info = get_rx_page_info(adapter, rxq_idx);
912                 put_page(page_info->page);
913                 memset(page_info, 0, sizeof(*page_info));
914                 index_inc(&rxq_idx, rxq->len);
915         }
916 }
917
918 /*
919  * skb_fill_rx_data forms a complete skb for an ether frame
920  * indicated by rxcp.
921  */
922 static void skb_fill_rx_data(struct be_adapter *adapter,
923                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
924                         u16 num_rcvd)
925 {
926         struct be_queue_info *rxq = &adapter->rx_obj.q;
927         struct be_rx_page_info *page_info;
928         u16 rxq_idx, i, j;
929         u32 pktsize, hdr_len, curr_frag_len, size;
930         u8 *start;
931         u8 pkt_type;
932
933         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
934         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
935         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
936
937         page_info = get_rx_page_info(adapter, rxq_idx);
938
939         start = page_address(page_info->page) + page_info->page_offset;
940         prefetch(start);
941
942         /* Copy data in the first descriptor of this completion */
943         curr_frag_len = min(pktsize, rx_frag_size);
944
945         /* Copy the header portion into skb_data */
946         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
947         memcpy(skb->data, start, hdr_len);
948         skb->len = curr_frag_len;
949         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
950                 /* Complete packet has now been moved to data */
951                 put_page(page_info->page);
952                 skb->data_len = 0;
953                 skb->tail += curr_frag_len;
954         } else {
955                 skb_shinfo(skb)->nr_frags = 1;
956                 skb_shinfo(skb)->frags[0].page = page_info->page;
957                 skb_shinfo(skb)->frags[0].page_offset =
958                                         page_info->page_offset + hdr_len;
959                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
960                 skb->data_len = curr_frag_len - hdr_len;
961                 skb->tail += hdr_len;
962         }
963         page_info->page = NULL;
964
965         if (pktsize <= rx_frag_size) {
966                 BUG_ON(num_rcvd != 1);
967                 goto done;
968         }
969
970         /* More frags present for this completion */
971         size = pktsize;
972         for (i = 1, j = 0; i < num_rcvd; i++) {
973                 size -= curr_frag_len;
974                 index_inc(&rxq_idx, rxq->len);
975                 page_info = get_rx_page_info(adapter, rxq_idx);
976
977                 curr_frag_len = min(size, rx_frag_size);
978
979                 /* Coalesce all frags from the same physical page in one slot */
980                 if (page_info->page_offset == 0) {
981                         /* Fresh page */
982                         j++;
983                         skb_shinfo(skb)->frags[j].page = page_info->page;
984                         skb_shinfo(skb)->frags[j].page_offset =
985                                                         page_info->page_offset;
986                         skb_shinfo(skb)->frags[j].size = 0;
987                         skb_shinfo(skb)->nr_frags++;
988                 } else {
989                         put_page(page_info->page);
990                 }
991
992                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
993                 skb->len += curr_frag_len;
994                 skb->data_len += curr_frag_len;
995
996                 page_info->page = NULL;
997         }
998         BUG_ON(j > MAX_SKB_FRAGS);
999
1000 done:
1001         be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
1002 }
1003
1004 /* Process the RX completion indicated by rxcp when GRO is disabled */
1005 static void be_rx_compl_process(struct be_adapter *adapter,
1006                         struct be_eth_rx_compl *rxcp)
1007 {
1008         struct sk_buff *skb;
1009         u32 vlanf, vid;
1010         u16 num_rcvd;
1011         u8 vtm;
1012
1013         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1014         /* Is it a flush compl that has no data */
1015         if (unlikely(num_rcvd == 0))
1016                 return;
1017
1018         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1019         if (unlikely(!skb)) {
1020                 if (net_ratelimit())
1021                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1022                 be_rx_compl_discard(adapter, rxcp);
1023                 return;
1024         }
1025
1026         skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
1027
1028         if (do_pkt_csum(rxcp, adapter->rx_csum))
1029                 skb->ip_summed = CHECKSUM_NONE;
1030         else
1031                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1032
1033         skb->truesize = skb->len + sizeof(struct sk_buff);
1034         skb->protocol = eth_type_trans(skb, adapter->netdev);
1035
1036         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1037         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1038
1039         /* vlanf could be wrongly set in some cards.
1040          * ignore if vtm is not set */
1041         if ((adapter->function_mode & 0x400) && !vtm)
1042                 vlanf = 0;
1043
1044         if (unlikely(vlanf)) {
1045                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1046                         kfree_skb(skb);
1047                         return;
1048                 }
1049                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1050                 vid = swab16(vid);
1051                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1052         } else {
1053                 netif_receive_skb(skb);
1054         }
1055 }
1056
1057 /* Process the RX completion indicated by rxcp when GRO is enabled */
1058 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1059                         struct be_eth_rx_compl *rxcp)
1060 {
1061         struct be_rx_page_info *page_info;
1062         struct sk_buff *skb = NULL;
1063         struct be_queue_info *rxq = &adapter->rx_obj.q;
1064         struct be_eq_obj *eq_obj =  &adapter->rx_eq;
1065         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1066         u16 i, rxq_idx = 0, vid, j;
1067         u8 vtm;
1068         u8 pkt_type;
1069
1070         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1071         /* Is it a flush compl that has no data */
1072         if (unlikely(num_rcvd == 0))
1073                 return;
1074
1075         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1076         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1077         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1078         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1079         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1080
1081         /* vlanf could be wrongly set in some cards.
1082          * ignore if vtm is not set */
1083         if ((adapter->function_mode & 0x400) && !vtm)
1084                 vlanf = 0;
1085
1086         skb = napi_get_frags(&eq_obj->napi);
1087         if (!skb) {
1088                 be_rx_compl_discard(adapter, rxcp);
1089                 return;
1090         }
1091
1092         remaining = pkt_size;
1093         for (i = 0, j = -1; i < num_rcvd; i++) {
1094                 page_info = get_rx_page_info(adapter, rxq_idx);
1095
1096                 curr_frag_len = min(remaining, rx_frag_size);
1097
1098                 /* Coalesce all frags from the same physical page in one slot */
1099                 if (i == 0 || page_info->page_offset == 0) {
1100                         /* First frag or Fresh page */
1101                         j++;
1102                         skb_shinfo(skb)->frags[j].page = page_info->page;
1103                         skb_shinfo(skb)->frags[j].page_offset =
1104                                                         page_info->page_offset;
1105                         skb_shinfo(skb)->frags[j].size = 0;
1106                 } else {
1107                         put_page(page_info->page);
1108                 }
1109                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1110
1111                 remaining -= curr_frag_len;
1112                 index_inc(&rxq_idx, rxq->len);
1113                 memset(page_info, 0, sizeof(*page_info));
1114         }
1115         BUG_ON(j > MAX_SKB_FRAGS);
1116
1117         skb_shinfo(skb)->nr_frags = j + 1;
1118         skb->len = pkt_size;
1119         skb->data_len = pkt_size;
1120         skb->truesize += pkt_size;
1121         skb->ip_summed = CHECKSUM_UNNECESSARY;
1122
1123         if (likely(!vlanf)) {
1124                 napi_gro_frags(&eq_obj->napi);
1125         } else {
1126                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1127                 vid = swab16(vid);
1128
1129                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1130                         return;
1131
1132                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1133         }
1134
1135         be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
1136 }
1137
1138 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1139 {
1140         struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
1141
1142         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1143                 return NULL;
1144
1145         rmb();
1146         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1147
1148         queue_tail_inc(&adapter->rx_obj.cq);
1149         return rxcp;
1150 }
1151
1152 /* To reset the valid bit, we need to reset the whole word as
1153  * when walking the queue the valid entries are little-endian
1154  * and invalid entries are host endian
1155  */
1156 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1157 {
1158         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1159 }
1160
1161 static inline struct page *be_alloc_pages(u32 size)
1162 {
1163         gfp_t alloc_flags = GFP_ATOMIC;
1164         u32 order = get_order(size);
1165         if (order > 0)
1166                 alloc_flags |= __GFP_COMP;
1167         return  alloc_pages(alloc_flags, order);
1168 }
1169
1170 /*
1171  * Allocate a page, split it to fragments of size rx_frag_size and post as
1172  * receive buffers to BE
1173  */
1174 static void be_post_rx_frags(struct be_adapter *adapter)
1175 {
1176         struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
1177         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1178         struct be_queue_info *rxq = &adapter->rx_obj.q;
1179         struct page *pagep = NULL;
1180         struct be_eth_rx_d *rxd;
1181         u64 page_dmaaddr = 0, frag_dmaaddr;
1182         u32 posted, page_offset = 0;
1183
1184         page_info = &page_info_tbl[rxq->head];
1185         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1186                 if (!pagep) {
1187                         pagep = be_alloc_pages(adapter->big_page_size);
1188                         if (unlikely(!pagep)) {
1189                                 drvr_stats(adapter)->be_ethrx_post_fail++;
1190                                 break;
1191                         }
1192                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1193                                                 adapter->big_page_size,
1194                                                 PCI_DMA_FROMDEVICE);
1195                         page_info->page_offset = 0;
1196                 } else {
1197                         get_page(pagep);
1198                         page_info->page_offset = page_offset + rx_frag_size;
1199                 }
1200                 page_offset = page_info->page_offset;
1201                 page_info->page = pagep;
1202                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1203                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1204
1205                 rxd = queue_head_node(rxq);
1206                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1207                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1208
1209                 /* Any space left in the current big page for another frag? */
1210                 if ((page_offset + rx_frag_size + rx_frag_size) >
1211                                         adapter->big_page_size) {
1212                         pagep = NULL;
1213                         page_info->last_page_user = true;
1214                 }
1215
1216                 prev_page_info = page_info;
1217                 queue_head_inc(rxq);
1218                 page_info = &page_info_tbl[rxq->head];
1219         }
1220         if (pagep)
1221                 prev_page_info->last_page_user = true;
1222
1223         if (posted) {
1224                 atomic_add(posted, &rxq->used);
1225                 be_rxq_notify(adapter, rxq->id, posted);
1226         } else if (atomic_read(&rxq->used) == 0) {
1227                 /* Let be_worker replenish when memory is available */
1228                 adapter->rx_post_starved = true;
1229         }
1230 }
1231
1232 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1233 {
1234         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1235
1236         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1237                 return NULL;
1238
1239         rmb();
1240         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1241
1242         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1243
1244         queue_tail_inc(tx_cq);
1245         return txcp;
1246 }
1247
1248 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1249 {
1250         struct be_queue_info *txq = &adapter->tx_obj.q;
1251         struct be_eth_wrb *wrb;
1252         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1253         struct sk_buff *sent_skb;
1254         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1255         bool unmap_skb_hdr = true;
1256
1257         sent_skb = sent_skbs[txq->tail];
1258         BUG_ON(!sent_skb);
1259         sent_skbs[txq->tail] = NULL;
1260
1261         /* skip header wrb */
1262         queue_tail_inc(txq);
1263
1264         do {
1265                 cur_index = txq->tail;
1266                 wrb = queue_tail_node(txq);
1267                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1268                                         skb_headlen(sent_skb)));
1269                 unmap_skb_hdr = false;
1270
1271                 num_wrbs++;
1272                 queue_tail_inc(txq);
1273         } while (cur_index != last_index);
1274
1275         atomic_sub(num_wrbs, &txq->used);
1276
1277         kfree_skb(sent_skb);
1278 }
1279
1280 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1281 {
1282         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1283
1284         if (!eqe->evt)
1285                 return NULL;
1286
1287         rmb();
1288         eqe->evt = le32_to_cpu(eqe->evt);
1289         queue_tail_inc(&eq_obj->q);
1290         return eqe;
1291 }
1292
1293 static int event_handle(struct be_adapter *adapter,
1294                         struct be_eq_obj *eq_obj)
1295 {
1296         struct be_eq_entry *eqe;
1297         u16 num = 0;
1298
1299         while ((eqe = event_get(eq_obj)) != NULL) {
1300                 eqe->evt = 0;
1301                 num++;
1302         }
1303
1304         /* Deal with any spurious interrupts that come
1305          * without events
1306          */
1307         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1308         if (num)
1309                 napi_schedule(&eq_obj->napi);
1310
1311         return num;
1312 }
1313
1314 /* Just read and notify events without processing them.
1315  * Used at the time of destroying event queues */
1316 static void be_eq_clean(struct be_adapter *adapter,
1317                         struct be_eq_obj *eq_obj)
1318 {
1319         struct be_eq_entry *eqe;
1320         u16 num = 0;
1321
1322         while ((eqe = event_get(eq_obj)) != NULL) {
1323                 eqe->evt = 0;
1324                 num++;
1325         }
1326
1327         if (num)
1328                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1329 }
1330
1331 static void be_rx_q_clean(struct be_adapter *adapter)
1332 {
1333         struct be_rx_page_info *page_info;
1334         struct be_queue_info *rxq = &adapter->rx_obj.q;
1335         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1336         struct be_eth_rx_compl *rxcp;
1337         u16 tail;
1338
1339         /* First cleanup pending rx completions */
1340         while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1341                 be_rx_compl_discard(adapter, rxcp);
1342                 be_rx_compl_reset(rxcp);
1343                 be_cq_notify(adapter, rx_cq->id, true, 1);
1344         }
1345
1346         /* Then free posted rx buffer that were not used */
1347         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1348         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1349                 page_info = get_rx_page_info(adapter, tail);
1350                 put_page(page_info->page);
1351                 memset(page_info, 0, sizeof(*page_info));
1352         }
1353         BUG_ON(atomic_read(&rxq->used));
1354 }
1355
1356 static void be_tx_compl_clean(struct be_adapter *adapter)
1357 {
1358         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1359         struct be_queue_info *txq = &adapter->tx_obj.q;
1360         struct be_eth_tx_compl *txcp;
1361         u16 end_idx, cmpl = 0, timeo = 0;
1362         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1363         struct sk_buff *sent_skb;
1364         bool dummy_wrb;
1365
1366         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1367         do {
1368                 while ((txcp = be_tx_compl_get(tx_cq))) {
1369                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1370                                         wrb_index, txcp);
1371                         be_tx_compl_process(adapter, end_idx);
1372                         cmpl++;
1373                 }
1374                 if (cmpl) {
1375                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1376                         cmpl = 0;
1377                 }
1378
1379                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1380                         break;
1381
1382                 mdelay(1);
1383         } while (true);
1384
1385         if (atomic_read(&txq->used))
1386                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1387                         atomic_read(&txq->used));
1388
1389         /* free posted tx for which compls will never arrive */
1390         while (atomic_read(&txq->used)) {
1391                 sent_skb = sent_skbs[txq->tail];
1392                 end_idx = txq->tail;
1393                 index_adv(&end_idx,
1394                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1395                 be_tx_compl_process(adapter, end_idx);
1396         }
1397 }
1398
1399 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1400 {
1401         struct be_queue_info *q;
1402
1403         q = &adapter->mcc_obj.q;
1404         if (q->created)
1405                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1406         be_queue_free(adapter, q);
1407
1408         q = &adapter->mcc_obj.cq;
1409         if (q->created)
1410                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1411         be_queue_free(adapter, q);
1412 }
1413
1414 /* Must be called only after TX qs are created as MCC shares TX EQ */
1415 static int be_mcc_queues_create(struct be_adapter *adapter)
1416 {
1417         struct be_queue_info *q, *cq;
1418
1419         /* Alloc MCC compl queue */
1420         cq = &adapter->mcc_obj.cq;
1421         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1422                         sizeof(struct be_mcc_compl)))
1423                 goto err;
1424
1425         /* Ask BE to create MCC compl queue; share TX's eq */
1426         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1427                 goto mcc_cq_free;
1428
1429         /* Alloc MCC queue */
1430         q = &adapter->mcc_obj.q;
1431         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1432                 goto mcc_cq_destroy;
1433
1434         /* Ask BE to create MCC queue */
1435         if (be_cmd_mccq_create(adapter, q, cq))
1436                 goto mcc_q_free;
1437
1438         return 0;
1439
1440 mcc_q_free:
1441         be_queue_free(adapter, q);
1442 mcc_cq_destroy:
1443         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1444 mcc_cq_free:
1445         be_queue_free(adapter, cq);
1446 err:
1447         return -1;
1448 }
1449
1450 static void be_tx_queues_destroy(struct be_adapter *adapter)
1451 {
1452         struct be_queue_info *q;
1453
1454         q = &adapter->tx_obj.q;
1455         if (q->created)
1456                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1457         be_queue_free(adapter, q);
1458
1459         q = &adapter->tx_obj.cq;
1460         if (q->created)
1461                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1462         be_queue_free(adapter, q);
1463
1464         /* Clear any residual events */
1465         be_eq_clean(adapter, &adapter->tx_eq);
1466
1467         q = &adapter->tx_eq.q;
1468         if (q->created)
1469                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1470         be_queue_free(adapter, q);
1471 }
1472
1473 static int be_tx_queues_create(struct be_adapter *adapter)
1474 {
1475         struct be_queue_info *eq, *q, *cq;
1476
1477         adapter->tx_eq.max_eqd = 0;
1478         adapter->tx_eq.min_eqd = 0;
1479         adapter->tx_eq.cur_eqd = 96;
1480         adapter->tx_eq.enable_aic = false;
1481         /* Alloc Tx Event queue */
1482         eq = &adapter->tx_eq.q;
1483         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1484                 return -1;
1485
1486         /* Ask BE to create Tx Event queue */
1487         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1488                 goto tx_eq_free;
1489         adapter->base_eq_id = adapter->tx_eq.q.id;
1490
1491         /* Alloc TX eth compl queue */
1492         cq = &adapter->tx_obj.cq;
1493         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1494                         sizeof(struct be_eth_tx_compl)))
1495                 goto tx_eq_destroy;
1496
1497         /* Ask BE to create Tx eth compl queue */
1498         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1499                 goto tx_cq_free;
1500
1501         /* Alloc TX eth queue */
1502         q = &adapter->tx_obj.q;
1503         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1504                 goto tx_cq_destroy;
1505
1506         /* Ask BE to create Tx eth queue */
1507         if (be_cmd_txq_create(adapter, q, cq))
1508                 goto tx_q_free;
1509         return 0;
1510
1511 tx_q_free:
1512         be_queue_free(adapter, q);
1513 tx_cq_destroy:
1514         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1515 tx_cq_free:
1516         be_queue_free(adapter, cq);
1517 tx_eq_destroy:
1518         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1519 tx_eq_free:
1520         be_queue_free(adapter, eq);
1521         return -1;
1522 }
1523
1524 static void be_rx_queues_destroy(struct be_adapter *adapter)
1525 {
1526         struct be_queue_info *q;
1527
1528         q = &adapter->rx_obj.q;
1529         if (q->created) {
1530                 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1531
1532                 /* After the rxq is invalidated, wait for a grace time
1533                  * of 1ms for all dma to end and the flush compl to arrive
1534                  */
1535                 mdelay(1);
1536                 be_rx_q_clean(adapter);
1537         }
1538         be_queue_free(adapter, q);
1539
1540         q = &adapter->rx_obj.cq;
1541         if (q->created)
1542                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1543         be_queue_free(adapter, q);
1544
1545         /* Clear any residual events */
1546         be_eq_clean(adapter, &adapter->rx_eq);
1547
1548         q = &adapter->rx_eq.q;
1549         if (q->created)
1550                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1551         be_queue_free(adapter, q);
1552 }
1553
1554 static int be_rx_queues_create(struct be_adapter *adapter)
1555 {
1556         struct be_queue_info *eq, *q, *cq;
1557         int rc;
1558
1559         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1560         adapter->rx_eq.max_eqd = BE_MAX_EQD;
1561         adapter->rx_eq.min_eqd = 0;
1562         adapter->rx_eq.cur_eqd = 0;
1563         adapter->rx_eq.enable_aic = true;
1564
1565         /* Alloc Rx Event queue */
1566         eq = &adapter->rx_eq.q;
1567         rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1568                                 sizeof(struct be_eq_entry));
1569         if (rc)
1570                 return rc;
1571
1572         /* Ask BE to create Rx Event queue */
1573         rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1574         if (rc)
1575                 goto rx_eq_free;
1576
1577         /* Alloc RX eth compl queue */
1578         cq = &adapter->rx_obj.cq;
1579         rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1580                         sizeof(struct be_eth_rx_compl));
1581         if (rc)
1582                 goto rx_eq_destroy;
1583
1584         /* Ask BE to create Rx eth compl queue */
1585         rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1586         if (rc)
1587                 goto rx_cq_free;
1588
1589         /* Alloc RX eth queue */
1590         q = &adapter->rx_obj.q;
1591         rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1592         if (rc)
1593                 goto rx_cq_destroy;
1594
1595         /* Ask BE to create Rx eth queue */
1596         rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1597                 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1598         if (rc)
1599                 goto rx_q_free;
1600
1601         return 0;
1602 rx_q_free:
1603         be_queue_free(adapter, q);
1604 rx_cq_destroy:
1605         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1606 rx_cq_free:
1607         be_queue_free(adapter, cq);
1608 rx_eq_destroy:
1609         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1610 rx_eq_free:
1611         be_queue_free(adapter, eq);
1612         return rc;
1613 }
1614
1615 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1616 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1617 {
1618         return eq_id - adapter->base_eq_id;
1619 }
1620
1621 static irqreturn_t be_intx(int irq, void *dev)
1622 {
1623         struct be_adapter *adapter = dev;
1624         int isr;
1625
1626         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1627                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1628         if (!isr)
1629                 return IRQ_NONE;
1630
1631         event_handle(adapter, &adapter->tx_eq);
1632         event_handle(adapter, &adapter->rx_eq);
1633
1634         return IRQ_HANDLED;
1635 }
1636
1637 static irqreturn_t be_msix_rx(int irq, void *dev)
1638 {
1639         struct be_adapter *adapter = dev;
1640
1641         event_handle(adapter, &adapter->rx_eq);
1642
1643         return IRQ_HANDLED;
1644 }
1645
1646 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649
1650         event_handle(adapter, &adapter->tx_eq);
1651
1652         return IRQ_HANDLED;
1653 }
1654
1655 static inline bool do_gro(struct be_adapter *adapter,
1656                         struct be_eth_rx_compl *rxcp)
1657 {
1658         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1659         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1660
1661         if (err)
1662                 drvr_stats(adapter)->be_rxcp_err++;
1663
1664         return (tcp_frame && !err) ? true : false;
1665 }
1666
1667 int be_poll_rx(struct napi_struct *napi, int budget)
1668 {
1669         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1670         struct be_adapter *adapter =
1671                 container_of(rx_eq, struct be_adapter, rx_eq);
1672         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1673         struct be_eth_rx_compl *rxcp;
1674         u32 work_done;
1675
1676         adapter->stats.drvr_stats.be_rx_polls++;
1677         for (work_done = 0; work_done < budget; work_done++) {
1678                 rxcp = be_rx_compl_get(adapter);
1679                 if (!rxcp)
1680                         break;
1681
1682                 if (do_gro(adapter, rxcp))
1683                         be_rx_compl_process_gro(adapter, rxcp);
1684                 else
1685                         be_rx_compl_process(adapter, rxcp);
1686
1687                 be_rx_compl_reset(rxcp);
1688         }
1689
1690         /* Refill the queue */
1691         if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1692                 be_post_rx_frags(adapter);
1693
1694         /* All consumed */
1695         if (work_done < budget) {
1696                 napi_complete(napi);
1697                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1698         } else {
1699                 /* More to be consumed; continue with interrupts disabled */
1700                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1701         }
1702         return work_done;
1703 }
1704
1705 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1706  * For TX/MCC we don't honour budget; consume everything
1707  */
1708 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1709 {
1710         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1711         struct be_adapter *adapter =
1712                 container_of(tx_eq, struct be_adapter, tx_eq);
1713         struct be_queue_info *txq = &adapter->tx_obj.q;
1714         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1715         struct be_eth_tx_compl *txcp;
1716         int tx_compl = 0, mcc_compl, status = 0;
1717         u16 end_idx;
1718
1719         while ((txcp = be_tx_compl_get(tx_cq))) {
1720                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1721                                 wrb_index, txcp);
1722                 be_tx_compl_process(adapter, end_idx);
1723                 tx_compl++;
1724         }
1725
1726         mcc_compl = be_process_mcc(adapter, &status);
1727
1728         napi_complete(napi);
1729
1730         if (mcc_compl) {
1731                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1732                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1733         }
1734
1735         if (tx_compl) {
1736                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1737
1738                 /* As Tx wrbs have been freed up, wake up netdev queue if
1739                  * it was stopped due to lack of tx wrbs.
1740                  */
1741                 if (netif_queue_stopped(adapter->netdev) &&
1742                         atomic_read(&txq->used) < txq->len / 2) {
1743                         netif_wake_queue(adapter->netdev);
1744                 }
1745
1746                 drvr_stats(adapter)->be_tx_events++;
1747                 drvr_stats(adapter)->be_tx_compl += tx_compl;
1748         }
1749
1750         return 1;
1751 }
1752
1753 void be_detect_dump_ue(struct be_adapter *adapter)
1754 {
1755         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1756         u32 i;
1757
1758         pci_read_config_dword(adapter->pdev,
1759                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1760         pci_read_config_dword(adapter->pdev,
1761                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1762         pci_read_config_dword(adapter->pdev,
1763                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1764         pci_read_config_dword(adapter->pdev,
1765                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1766
1767         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1768         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1769
1770         if (ue_status_lo || ue_status_hi) {
1771                 adapter->ue_detected = true;
1772                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1773         }
1774
1775         if (ue_status_lo) {
1776                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1777                         if (ue_status_lo & 1)
1778                                 dev_err(&adapter->pdev->dev,
1779                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1780                 }
1781         }
1782         if (ue_status_hi) {
1783                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1784                         if (ue_status_hi & 1)
1785                                 dev_err(&adapter->pdev->dev,
1786                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1787                 }
1788         }
1789
1790 }
1791
1792 static void be_worker(struct work_struct *work)
1793 {
1794         struct be_adapter *adapter =
1795                 container_of(work, struct be_adapter, work.work);
1796
1797         if (!adapter->stats_ioctl_sent)
1798                 be_cmd_get_stats(adapter, &adapter->stats.cmd);
1799
1800         /* Set EQ delay */
1801         be_rx_eqd_update(adapter);
1802
1803         be_tx_rate_update(adapter);
1804         be_rx_rate_update(adapter);
1805
1806         if (adapter->rx_post_starved) {
1807                 adapter->rx_post_starved = false;
1808                 be_post_rx_frags(adapter);
1809         }
1810         if (!adapter->ue_detected)
1811                 be_detect_dump_ue(adapter);
1812
1813         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1814 }
1815
1816 static void be_msix_disable(struct be_adapter *adapter)
1817 {
1818         if (adapter->msix_enabled) {
1819                 pci_disable_msix(adapter->pdev);
1820                 adapter->msix_enabled = false;
1821         }
1822 }
1823
1824 static void be_msix_enable(struct be_adapter *adapter)
1825 {
1826         int i, status;
1827
1828         for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1829                 adapter->msix_entries[i].entry = i;
1830
1831         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1832                 BE_NUM_MSIX_VECTORS);
1833         if (status == 0)
1834                 adapter->msix_enabled = true;
1835 }
1836
1837 static void be_sriov_enable(struct be_adapter *adapter)
1838 {
1839         be_check_sriov_fn_type(adapter);
1840 #ifdef CONFIG_PCI_IOV
1841         if (be_physfn(adapter) && num_vfs) {
1842                 int status;
1843
1844                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1845                 adapter->sriov_enabled = status ? false : true;
1846         }
1847 #endif
1848 }
1849
1850 static void be_sriov_disable(struct be_adapter *adapter)
1851 {
1852 #ifdef CONFIG_PCI_IOV
1853         if (adapter->sriov_enabled) {
1854                 pci_disable_sriov(adapter->pdev);
1855                 adapter->sriov_enabled = false;
1856         }
1857 #endif
1858 }
1859
1860 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1861 {
1862         return adapter->msix_entries[
1863                         be_evt_bit_get(adapter, eq_id)].vector;
1864 }
1865
1866 static int be_request_irq(struct be_adapter *adapter,
1867                 struct be_eq_obj *eq_obj,
1868                 void *handler, char *desc)
1869 {
1870         struct net_device *netdev = adapter->netdev;
1871         int vec;
1872
1873         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1874         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1875         return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1876 }
1877
1878 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1879 {
1880         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1881         free_irq(vec, adapter);
1882 }
1883
1884 static int be_msix_register(struct be_adapter *adapter)
1885 {
1886         int status;
1887
1888         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1889         if (status)
1890                 goto err;
1891
1892         status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1893         if (status)
1894                 goto free_tx_irq;
1895
1896         return 0;
1897
1898 free_tx_irq:
1899         be_free_irq(adapter, &adapter->tx_eq);
1900 err:
1901         dev_warn(&adapter->pdev->dev,
1902                 "MSIX Request IRQ failed - err %d\n", status);
1903         pci_disable_msix(adapter->pdev);
1904         adapter->msix_enabled = false;
1905         return status;
1906 }
1907
1908 static int be_irq_register(struct be_adapter *adapter)
1909 {
1910         struct net_device *netdev = adapter->netdev;
1911         int status;
1912
1913         if (adapter->msix_enabled) {
1914                 status = be_msix_register(adapter);
1915                 if (status == 0)
1916                         goto done;
1917                 /* INTx is not supported for VF */
1918                 if (!be_physfn(adapter))
1919                         return status;
1920         }
1921
1922         /* INTx */
1923         netdev->irq = adapter->pdev->irq;
1924         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1925                         adapter);
1926         if (status) {
1927                 dev_err(&adapter->pdev->dev,
1928                         "INTx request IRQ failed - err %d\n", status);
1929                 return status;
1930         }
1931 done:
1932         adapter->isr_registered = true;
1933         return 0;
1934 }
1935
1936 static void be_irq_unregister(struct be_adapter *adapter)
1937 {
1938         struct net_device *netdev = adapter->netdev;
1939
1940         if (!adapter->isr_registered)
1941                 return;
1942
1943         /* INTx */
1944         if (!adapter->msix_enabled) {
1945                 free_irq(netdev->irq, adapter);
1946                 goto done;
1947         }
1948
1949         /* MSIx */
1950         be_free_irq(adapter, &adapter->tx_eq);
1951         be_free_irq(adapter, &adapter->rx_eq);
1952 done:
1953         adapter->isr_registered = false;
1954 }
1955
1956 static int be_close(struct net_device *netdev)
1957 {
1958         struct be_adapter *adapter = netdev_priv(netdev);
1959         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1960         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1961         int vec;
1962
1963         cancel_delayed_work_sync(&adapter->work);
1964
1965         be_async_mcc_disable(adapter);
1966
1967         netif_stop_queue(netdev);
1968         netif_carrier_off(netdev);
1969         adapter->link_up = false;
1970
1971         be_intr_set(adapter, false);
1972
1973         if (adapter->msix_enabled) {
1974                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1975                 synchronize_irq(vec);
1976                 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1977                 synchronize_irq(vec);
1978         } else {
1979                 synchronize_irq(netdev->irq);
1980         }
1981         be_irq_unregister(adapter);
1982
1983         napi_disable(&rx_eq->napi);
1984         napi_disable(&tx_eq->napi);
1985
1986         /* Wait for all pending tx completions to arrive so that
1987          * all tx skbs are freed.
1988          */
1989         be_tx_compl_clean(adapter);
1990
1991         return 0;
1992 }
1993
1994 static int be_open(struct net_device *netdev)
1995 {
1996         struct be_adapter *adapter = netdev_priv(netdev);
1997         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1998         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1999         bool link_up;
2000         int status;
2001         u8 mac_speed;
2002         u16 link_speed;
2003
2004         /* First time posting */
2005         be_post_rx_frags(adapter);
2006
2007         napi_enable(&rx_eq->napi);
2008         napi_enable(&tx_eq->napi);
2009
2010         be_irq_register(adapter);
2011
2012         be_intr_set(adapter, true);
2013
2014         /* The evt queues are created in unarmed state; arm them */
2015         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
2016         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2017
2018         /* Rx compl queue may be in unarmed state; rearm it */
2019         be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
2020
2021         /* Now that interrupts are on we can process async mcc */
2022         be_async_mcc_enable(adapter);
2023
2024         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2025
2026         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2027                         &link_speed);
2028         if (status)
2029                 goto err;
2030         be_link_status_update(adapter, link_up);
2031
2032         if (be_physfn(adapter)) {
2033                 status = be_vid_config(adapter, false, 0);
2034                 if (status)
2035                         goto err;
2036
2037                 status = be_cmd_set_flow_control(adapter,
2038                                 adapter->tx_fc, adapter->rx_fc);
2039                 if (status)
2040                         goto err;
2041         }
2042
2043         return 0;
2044 err:
2045         be_close(adapter->netdev);
2046         return -EIO;
2047 }
2048
2049 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2050 {
2051         struct be_dma_mem cmd;
2052         int status = 0;
2053         u8 mac[ETH_ALEN];
2054
2055         memset(mac, 0, ETH_ALEN);
2056
2057         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2058         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2059         if (cmd.va == NULL)
2060                 return -1;
2061         memset(cmd.va, 0, cmd.size);
2062
2063         if (enable) {
2064                 status = pci_write_config_dword(adapter->pdev,
2065                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2066                 if (status) {
2067                         dev_err(&adapter->pdev->dev,
2068                                 "Could not enable Wake-on-lan\n");
2069                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2070                                         cmd.dma);
2071                         return status;
2072                 }
2073                 status = be_cmd_enable_magic_wol(adapter,
2074                                 adapter->netdev->dev_addr, &cmd);
2075                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2076                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2077         } else {
2078                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2079                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2080                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2081         }
2082
2083         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2084         return status;
2085 }
2086
2087 static int be_setup(struct be_adapter *adapter)
2088 {
2089         struct net_device *netdev = adapter->netdev;
2090         u32 cap_flags, en_flags, vf = 0;
2091         int status;
2092         u8 mac[ETH_ALEN];
2093
2094         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2095
2096         if (be_physfn(adapter)) {
2097                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2098                                 BE_IF_FLAGS_PROMISCUOUS |
2099                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2100                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2101         }
2102
2103         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2104                         netdev->dev_addr, false/* pmac_invalid */,
2105                         &adapter->if_handle, &adapter->pmac_id, 0);
2106         if (status != 0)
2107                 goto do_none;
2108
2109         if (be_physfn(adapter)) {
2110                 while (vf < num_vfs) {
2111                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2112                                         | BE_IF_FLAGS_BROADCAST;
2113                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2114                                         mac, true,
2115                                         &adapter->vf_cfg[vf].vf_if_handle,
2116                                         NULL, vf+1);
2117                         if (status) {
2118                                 dev_err(&adapter->pdev->dev,
2119                                 "Interface Create failed for VF %d\n", vf);
2120                                 goto if_destroy;
2121                         }
2122                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2123                         vf++;
2124                 }
2125         } else if (!be_physfn(adapter)) {
2126                 status = be_cmd_mac_addr_query(adapter, mac,
2127                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2128                 if (!status) {
2129                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2130                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2131                 }
2132         }
2133
2134         status = be_tx_queues_create(adapter);
2135         if (status != 0)
2136                 goto if_destroy;
2137
2138         status = be_rx_queues_create(adapter);
2139         if (status != 0)
2140                 goto tx_qs_destroy;
2141
2142         status = be_mcc_queues_create(adapter);
2143         if (status != 0)
2144                 goto rx_qs_destroy;
2145
2146         adapter->link_speed = -1;
2147
2148         return 0;
2149
2150 rx_qs_destroy:
2151         be_rx_queues_destroy(adapter);
2152 tx_qs_destroy:
2153         be_tx_queues_destroy(adapter);
2154 if_destroy:
2155         for (vf = 0; vf < num_vfs; vf++)
2156                 if (adapter->vf_cfg[vf].vf_if_handle)
2157                         be_cmd_if_destroy(adapter,
2158                                         adapter->vf_cfg[vf].vf_if_handle);
2159         be_cmd_if_destroy(adapter, adapter->if_handle);
2160 do_none:
2161         return status;
2162 }
2163
2164 static int be_clear(struct be_adapter *adapter)
2165 {
2166         be_mcc_queues_destroy(adapter);
2167         be_rx_queues_destroy(adapter);
2168         be_tx_queues_destroy(adapter);
2169
2170         be_cmd_if_destroy(adapter, adapter->if_handle);
2171
2172         /* tell fw we're done with firing cmds */
2173         be_cmd_fw_clean(adapter);
2174         return 0;
2175 }
2176
2177
2178 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2179 char flash_cookie[2][16] =      {"*** SE FLAS",
2180                                 "H DIRECTORY *** "};
2181
2182 static bool be_flash_redboot(struct be_adapter *adapter,
2183                         const u8 *p, u32 img_start, int image_size,
2184                         int hdr_size)
2185 {
2186         u32 crc_offset;
2187         u8 flashed_crc[4];
2188         int status;
2189
2190         crc_offset = hdr_size + img_start + image_size - 4;
2191
2192         p += crc_offset;
2193
2194         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2195                         (image_size - 4));
2196         if (status) {
2197                 dev_err(&adapter->pdev->dev,
2198                 "could not get crc from flash, not flashing redboot\n");
2199                 return false;
2200         }
2201
2202         /*update redboot only if crc does not match*/
2203         if (!memcmp(flashed_crc, p, 4))
2204                 return false;
2205         else
2206                 return true;
2207 }
2208
2209 static int be_flash_data(struct be_adapter *adapter,
2210                         const struct firmware *fw,
2211                         struct be_dma_mem *flash_cmd, int num_of_images)
2212
2213 {
2214         int status = 0, i, filehdr_size = 0;
2215         u32 total_bytes = 0, flash_op;
2216         int num_bytes;
2217         const u8 *p = fw->data;
2218         struct be_cmd_write_flashrom *req = flash_cmd->va;
2219         struct flash_comp *pflashcomp;
2220         int num_comp;
2221
2222         struct flash_comp gen3_flash_types[9] = {
2223                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2224                         FLASH_IMAGE_MAX_SIZE_g3},
2225                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2226                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2227                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2228                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2229                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2230                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2231                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2232                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2233                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2234                         FLASH_IMAGE_MAX_SIZE_g3},
2235                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2236                         FLASH_IMAGE_MAX_SIZE_g3},
2237                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2238                         FLASH_IMAGE_MAX_SIZE_g3},
2239                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2240                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2241         };
2242         struct flash_comp gen2_flash_types[8] = {
2243                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2244                         FLASH_IMAGE_MAX_SIZE_g2},
2245                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2246                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2247                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2248                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2249                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2250                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2251                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2252                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2253                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2254                         FLASH_IMAGE_MAX_SIZE_g2},
2255                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2256                         FLASH_IMAGE_MAX_SIZE_g2},
2257                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2258                          FLASH_IMAGE_MAX_SIZE_g2}
2259         };
2260
2261         if (adapter->generation == BE_GEN3) {
2262                 pflashcomp = gen3_flash_types;
2263                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2264                 num_comp = 9;
2265         } else {
2266                 pflashcomp = gen2_flash_types;
2267                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2268                 num_comp = 8;
2269         }
2270         for (i = 0; i < num_comp; i++) {
2271                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2272                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2273                         continue;
2274                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2275                         (!be_flash_redboot(adapter, fw->data,
2276                          pflashcomp[i].offset, pflashcomp[i].size,
2277                          filehdr_size)))
2278                         continue;
2279                 p = fw->data;
2280                 p += filehdr_size + pflashcomp[i].offset
2281                         + (num_of_images * sizeof(struct image_hdr));
2282         if (p + pflashcomp[i].size > fw->data + fw->size)
2283                 return -1;
2284         total_bytes = pflashcomp[i].size;
2285                 while (total_bytes) {
2286                         if (total_bytes > 32*1024)
2287                                 num_bytes = 32*1024;
2288                         else
2289                                 num_bytes = total_bytes;
2290                         total_bytes -= num_bytes;
2291
2292                         if (!total_bytes)
2293                                 flash_op = FLASHROM_OPER_FLASH;
2294                         else
2295                                 flash_op = FLASHROM_OPER_SAVE;
2296                         memcpy(req->params.data_buf, p, num_bytes);
2297                         p += num_bytes;
2298                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2299                                 pflashcomp[i].optype, flash_op, num_bytes);
2300                         if (status) {
2301                                 dev_err(&adapter->pdev->dev,
2302                                         "cmd to write to flash rom failed.\n");
2303                                 return -1;
2304                         }
2305                         yield();
2306                 }
2307         }
2308         return 0;
2309 }
2310
2311 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2312 {
2313         if (fhdr == NULL)
2314                 return 0;
2315         if (fhdr->build[0] == '3')
2316                 return BE_GEN3;
2317         else if (fhdr->build[0] == '2')
2318                 return BE_GEN2;
2319         else
2320                 return 0;
2321 }
2322
2323 int be_load_fw(struct be_adapter *adapter, u8 *func)
2324 {
2325         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2326         const struct firmware *fw;
2327         struct flash_file_hdr_g2 *fhdr;
2328         struct flash_file_hdr_g3 *fhdr3;
2329         struct image_hdr *img_hdr_ptr = NULL;
2330         struct be_dma_mem flash_cmd;
2331         int status, i = 0, num_imgs = 0;
2332         const u8 *p;
2333
2334         strcpy(fw_file, func);
2335
2336         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2337         if (status)
2338                 goto fw_exit;
2339
2340         p = fw->data;
2341         fhdr = (struct flash_file_hdr_g2 *) p;
2342         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2343
2344         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2345         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2346                                         &flash_cmd.dma);
2347         if (!flash_cmd.va) {
2348                 status = -ENOMEM;
2349                 dev_err(&adapter->pdev->dev,
2350                         "Memory allocation failure while flashing\n");
2351                 goto fw_exit;
2352         }
2353
2354         if ((adapter->generation == BE_GEN3) &&
2355                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2356                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2357                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2358                 for (i = 0; i < num_imgs; i++) {
2359                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2360                                         (sizeof(struct flash_file_hdr_g3) +
2361                                          i * sizeof(struct image_hdr)));
2362                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2363                                 status = be_flash_data(adapter, fw, &flash_cmd,
2364                                                         num_imgs);
2365                 }
2366         } else if ((adapter->generation == BE_GEN2) &&
2367                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2368                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2369         } else {
2370                 dev_err(&adapter->pdev->dev,
2371                         "UFI and Interface are not compatible for flashing\n");
2372                 status = -1;
2373         }
2374
2375         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2376                                 flash_cmd.dma);
2377         if (status) {
2378                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2379                 goto fw_exit;
2380         }
2381
2382         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2383
2384 fw_exit:
2385         release_firmware(fw);
2386         return status;
2387 }
2388
2389 static struct net_device_ops be_netdev_ops = {
2390         .ndo_open               = be_open,
2391         .ndo_stop               = be_close,
2392         .ndo_start_xmit         = be_xmit,
2393         .ndo_get_stats          = be_get_stats,
2394         .ndo_set_rx_mode        = be_set_multicast_list,
2395         .ndo_set_mac_address    = be_mac_addr_set,
2396         .ndo_change_mtu         = be_change_mtu,
2397         .ndo_validate_addr      = eth_validate_addr,
2398         .ndo_vlan_rx_register   = be_vlan_register,
2399         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2400         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2401         .ndo_set_vf_mac         = be_set_vf_mac,
2402         .ndo_set_vf_vlan        = be_set_vf_vlan,
2403         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2404         .ndo_get_vf_config      = be_get_vf_config
2405 };
2406
2407 static void be_netdev_init(struct net_device *netdev)
2408 {
2409         struct be_adapter *adapter = netdev_priv(netdev);
2410
2411         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2412                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2413                 NETIF_F_GRO | NETIF_F_TSO6;
2414
2415         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2416
2417         netdev->flags |= IFF_MULTICAST;
2418
2419         adapter->rx_csum = true;
2420
2421         /* Default settings for Rx and Tx flow control */
2422         adapter->rx_fc = true;
2423         adapter->tx_fc = true;
2424
2425         netif_set_gso_max_size(netdev, 65535);
2426
2427         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2428
2429         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2430
2431         netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2432                 BE_NAPI_WEIGHT);
2433         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2434                 BE_NAPI_WEIGHT);
2435
2436         netif_carrier_off(netdev);
2437         netif_stop_queue(netdev);
2438 }
2439
2440 static void be_unmap_pci_bars(struct be_adapter *adapter)
2441 {
2442         if (adapter->csr)
2443                 iounmap(adapter->csr);
2444         if (adapter->db)
2445                 iounmap(adapter->db);
2446         if (adapter->pcicfg && be_physfn(adapter))
2447                 iounmap(adapter->pcicfg);
2448 }
2449
2450 static int be_map_pci_bars(struct be_adapter *adapter)
2451 {
2452         u8 __iomem *addr;
2453         int pcicfg_reg, db_reg;
2454
2455         if (be_physfn(adapter)) {
2456                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2457                                 pci_resource_len(adapter->pdev, 2));
2458                 if (addr == NULL)
2459                         return -ENOMEM;
2460                 adapter->csr = addr;
2461         }
2462
2463         if (adapter->generation == BE_GEN2) {
2464                 pcicfg_reg = 1;
2465                 db_reg = 4;
2466         } else {
2467                 pcicfg_reg = 0;
2468                 if (be_physfn(adapter))
2469                         db_reg = 4;
2470                 else
2471                         db_reg = 0;
2472         }
2473         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2474                                 pci_resource_len(adapter->pdev, db_reg));
2475         if (addr == NULL)
2476                 goto pci_map_err;
2477         adapter->db = addr;
2478
2479         if (be_physfn(adapter)) {
2480                 addr = ioremap_nocache(
2481                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2482                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2483                 if (addr == NULL)
2484                         goto pci_map_err;
2485                 adapter->pcicfg = addr;
2486         } else
2487                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2488
2489         return 0;
2490 pci_map_err:
2491         be_unmap_pci_bars(adapter);
2492         return -ENOMEM;
2493 }
2494
2495
2496 static void be_ctrl_cleanup(struct be_adapter *adapter)
2497 {
2498         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2499
2500         be_unmap_pci_bars(adapter);
2501
2502         if (mem->va)
2503                 pci_free_consistent(adapter->pdev, mem->size,
2504                         mem->va, mem->dma);
2505
2506         mem = &adapter->mc_cmd_mem;
2507         if (mem->va)
2508                 pci_free_consistent(adapter->pdev, mem->size,
2509                         mem->va, mem->dma);
2510 }
2511
2512 static int be_ctrl_init(struct be_adapter *adapter)
2513 {
2514         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2515         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2516         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2517         int status;
2518
2519         status = be_map_pci_bars(adapter);
2520         if (status)
2521                 goto done;
2522
2523         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2524         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2525                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2526         if (!mbox_mem_alloc->va) {
2527                 status = -ENOMEM;
2528                 goto unmap_pci_bars;
2529         }
2530
2531         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2532         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2533         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2534         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2535
2536         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2537         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2538                         &mc_cmd_mem->dma);
2539         if (mc_cmd_mem->va == NULL) {
2540                 status = -ENOMEM;
2541                 goto free_mbox;
2542         }
2543         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2544
2545         spin_lock_init(&adapter->mbox_lock);
2546         spin_lock_init(&adapter->mcc_lock);
2547         spin_lock_init(&adapter->mcc_cq_lock);
2548
2549         init_completion(&adapter->flash_compl);
2550         pci_save_state(adapter->pdev);
2551         return 0;
2552
2553 free_mbox:
2554         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2555                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2556
2557 unmap_pci_bars:
2558         be_unmap_pci_bars(adapter);
2559
2560 done:
2561         return status;
2562 }
2563
2564 static void be_stats_cleanup(struct be_adapter *adapter)
2565 {
2566         struct be_stats_obj *stats = &adapter->stats;
2567         struct be_dma_mem *cmd = &stats->cmd;
2568
2569         if (cmd->va)
2570                 pci_free_consistent(adapter->pdev, cmd->size,
2571                         cmd->va, cmd->dma);
2572 }
2573
2574 static int be_stats_init(struct be_adapter *adapter)
2575 {
2576         struct be_stats_obj *stats = &adapter->stats;
2577         struct be_dma_mem *cmd = &stats->cmd;
2578
2579         cmd->size = sizeof(struct be_cmd_req_get_stats);
2580         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2581         if (cmd->va == NULL)
2582                 return -1;
2583         memset(cmd->va, 0, cmd->size);
2584         return 0;
2585 }
2586
2587 static void __devexit be_remove(struct pci_dev *pdev)
2588 {
2589         struct be_adapter *adapter = pci_get_drvdata(pdev);
2590
2591         if (!adapter)
2592                 return;
2593
2594         unregister_netdev(adapter->netdev);
2595
2596         be_clear(adapter);
2597
2598         be_stats_cleanup(adapter);
2599
2600         be_ctrl_cleanup(adapter);
2601
2602         be_sriov_disable(adapter);
2603
2604         be_msix_disable(adapter);
2605
2606         pci_set_drvdata(pdev, NULL);
2607         pci_release_regions(pdev);
2608         pci_disable_device(pdev);
2609
2610         free_netdev(adapter->netdev);
2611 }
2612
2613 static int be_get_config(struct be_adapter *adapter)
2614 {
2615         int status;
2616         u8 mac[ETH_ALEN];
2617
2618         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2619         if (status)
2620                 return status;
2621
2622         status = be_cmd_query_fw_cfg(adapter,
2623                                 &adapter->port_num, &adapter->function_mode);
2624         if (status)
2625                 return status;
2626
2627         memset(mac, 0, ETH_ALEN);
2628
2629         if (be_physfn(adapter)) {
2630                 status = be_cmd_mac_addr_query(adapter, mac,
2631                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2632
2633                 if (status)
2634                         return status;
2635
2636                 if (!is_valid_ether_addr(mac))
2637                         return -EADDRNOTAVAIL;
2638
2639                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2640                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2641         }
2642
2643         if (adapter->function_mode & 0x400)
2644                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2645         else
2646                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2647
2648         return 0;
2649 }
2650
2651 static int __devinit be_probe(struct pci_dev *pdev,
2652                         const struct pci_device_id *pdev_id)
2653 {
2654         int status = 0;
2655         struct be_adapter *adapter;
2656         struct net_device *netdev;
2657
2658
2659         status = pci_enable_device(pdev);
2660         if (status)
2661                 goto do_none;
2662
2663         status = pci_request_regions(pdev, DRV_NAME);
2664         if (status)
2665                 goto disable_dev;
2666         pci_set_master(pdev);
2667
2668         netdev = alloc_etherdev(sizeof(struct be_adapter));
2669         if (netdev == NULL) {
2670                 status = -ENOMEM;
2671                 goto rel_reg;
2672         }
2673         adapter = netdev_priv(netdev);
2674
2675         switch (pdev->device) {
2676         case BE_DEVICE_ID1:
2677         case OC_DEVICE_ID1:
2678                 adapter->generation = BE_GEN2;
2679                 break;
2680         case BE_DEVICE_ID2:
2681         case OC_DEVICE_ID2:
2682                 adapter->generation = BE_GEN3;
2683                 break;
2684         default:
2685                 adapter->generation = 0;
2686         }
2687
2688         adapter->pdev = pdev;
2689         pci_set_drvdata(pdev, adapter);
2690         adapter->netdev = netdev;
2691         be_netdev_init(netdev);
2692         SET_NETDEV_DEV(netdev, &pdev->dev);
2693
2694         be_msix_enable(adapter);
2695
2696         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2697         if (!status) {
2698                 netdev->features |= NETIF_F_HIGHDMA;
2699         } else {
2700                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2701                 if (status) {
2702                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2703                         goto free_netdev;
2704                 }
2705         }
2706
2707         be_sriov_enable(adapter);
2708
2709         status = be_ctrl_init(adapter);
2710         if (status)
2711                 goto free_netdev;
2712
2713         /* sync up with fw's ready state */
2714         if (be_physfn(adapter)) {
2715                 status = be_cmd_POST(adapter);
2716                 if (status)
2717                         goto ctrl_clean;
2718         }
2719
2720         /* tell fw we're ready to fire cmds */
2721         status = be_cmd_fw_init(adapter);
2722         if (status)
2723                 goto ctrl_clean;
2724
2725         if (be_physfn(adapter)) {
2726                 status = be_cmd_reset_function(adapter);
2727                 if (status)
2728                         goto ctrl_clean;
2729         }
2730
2731         status = be_stats_init(adapter);
2732         if (status)
2733                 goto ctrl_clean;
2734
2735         status = be_get_config(adapter);
2736         if (status)
2737                 goto stats_clean;
2738
2739         INIT_DELAYED_WORK(&adapter->work, be_worker);
2740
2741         status = be_setup(adapter);
2742         if (status)
2743                 goto stats_clean;
2744
2745         status = register_netdev(netdev);
2746         if (status != 0)
2747                 goto unsetup;
2748
2749         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2750         return 0;
2751
2752 unsetup:
2753         be_clear(adapter);
2754 stats_clean:
2755         be_stats_cleanup(adapter);
2756 ctrl_clean:
2757         be_ctrl_cleanup(adapter);
2758 free_netdev:
2759         be_msix_disable(adapter);
2760         be_sriov_disable(adapter);
2761         free_netdev(adapter->netdev);
2762         pci_set_drvdata(pdev, NULL);
2763 rel_reg:
2764         pci_release_regions(pdev);
2765 disable_dev:
2766         pci_disable_device(pdev);
2767 do_none:
2768         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2769         return status;
2770 }
2771
2772 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2773 {
2774         struct be_adapter *adapter = pci_get_drvdata(pdev);
2775         struct net_device *netdev =  adapter->netdev;
2776
2777         if (adapter->wol)
2778                 be_setup_wol(adapter, true);
2779
2780         netif_device_detach(netdev);
2781         if (netif_running(netdev)) {
2782                 rtnl_lock();
2783                 be_close(netdev);
2784                 rtnl_unlock();
2785         }
2786         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2787         be_clear(adapter);
2788
2789         pci_save_state(pdev);
2790         pci_disable_device(pdev);
2791         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2792         return 0;
2793 }
2794
2795 static int be_resume(struct pci_dev *pdev)
2796 {
2797         int status = 0;
2798         struct be_adapter *adapter = pci_get_drvdata(pdev);
2799         struct net_device *netdev =  adapter->netdev;
2800
2801         netif_device_detach(netdev);
2802
2803         status = pci_enable_device(pdev);
2804         if (status)
2805                 return status;
2806
2807         pci_set_power_state(pdev, 0);
2808         pci_restore_state(pdev);
2809
2810         /* tell fw we're ready to fire cmds */
2811         status = be_cmd_fw_init(adapter);
2812         if (status)
2813                 return status;
2814
2815         be_setup(adapter);
2816         if (netif_running(netdev)) {
2817                 rtnl_lock();
2818                 be_open(netdev);
2819                 rtnl_unlock();
2820         }
2821         netif_device_attach(netdev);
2822
2823         if (adapter->wol)
2824                 be_setup_wol(adapter, false);
2825         return 0;
2826 }
2827
2828 /*
2829  * An FLR will stop BE from DMAing any data.
2830  */
2831 static void be_shutdown(struct pci_dev *pdev)
2832 {
2833         struct be_adapter *adapter = pci_get_drvdata(pdev);
2834         struct net_device *netdev =  adapter->netdev;
2835
2836         netif_device_detach(netdev);
2837
2838         be_cmd_reset_function(adapter);
2839
2840         if (adapter->wol)
2841                 be_setup_wol(adapter, true);
2842
2843         pci_disable_device(pdev);
2844 }
2845
2846 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2847                                 pci_channel_state_t state)
2848 {
2849         struct be_adapter *adapter = pci_get_drvdata(pdev);
2850         struct net_device *netdev =  adapter->netdev;
2851
2852         dev_err(&adapter->pdev->dev, "EEH error detected\n");
2853
2854         adapter->eeh_err = true;
2855
2856         netif_device_detach(netdev);
2857
2858         if (netif_running(netdev)) {
2859                 rtnl_lock();
2860                 be_close(netdev);
2861                 rtnl_unlock();
2862         }
2863         be_clear(adapter);
2864
2865         if (state == pci_channel_io_perm_failure)
2866                 return PCI_ERS_RESULT_DISCONNECT;
2867
2868         pci_disable_device(pdev);
2869
2870         return PCI_ERS_RESULT_NEED_RESET;
2871 }
2872
2873 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2874 {
2875         struct be_adapter *adapter = pci_get_drvdata(pdev);
2876         int status;
2877
2878         dev_info(&adapter->pdev->dev, "EEH reset\n");
2879         adapter->eeh_err = false;
2880
2881         status = pci_enable_device(pdev);
2882         if (status)
2883                 return PCI_ERS_RESULT_DISCONNECT;
2884
2885         pci_set_master(pdev);
2886         pci_set_power_state(pdev, 0);
2887         pci_restore_state(pdev);
2888
2889         /* Check if card is ok and fw is ready */
2890         status = be_cmd_POST(adapter);
2891         if (status)
2892                 return PCI_ERS_RESULT_DISCONNECT;
2893
2894         return PCI_ERS_RESULT_RECOVERED;
2895 }
2896
2897 static void be_eeh_resume(struct pci_dev *pdev)
2898 {
2899         int status = 0;
2900         struct be_adapter *adapter = pci_get_drvdata(pdev);
2901         struct net_device *netdev =  adapter->netdev;
2902
2903         dev_info(&adapter->pdev->dev, "EEH resume\n");
2904
2905         pci_save_state(pdev);
2906
2907         /* tell fw we're ready to fire cmds */
2908         status = be_cmd_fw_init(adapter);
2909         if (status)
2910                 goto err;
2911
2912         status = be_setup(adapter);
2913         if (status)
2914                 goto err;
2915
2916         if (netif_running(netdev)) {
2917                 status = be_open(netdev);
2918                 if (status)
2919                         goto err;
2920         }
2921         netif_device_attach(netdev);
2922         return;
2923 err:
2924         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2925 }
2926
2927 static struct pci_error_handlers be_eeh_handlers = {
2928         .error_detected = be_eeh_err_detected,
2929         .slot_reset = be_eeh_reset,
2930         .resume = be_eeh_resume,
2931 };
2932
2933 static struct pci_driver be_driver = {
2934         .name = DRV_NAME,
2935         .id_table = be_dev_ids,
2936         .probe = be_probe,
2937         .remove = be_remove,
2938         .suspend = be_suspend,
2939         .resume = be_resume,
2940         .shutdown = be_shutdown,
2941         .err_handler = &be_eeh_handlers
2942 };
2943
2944 static int __init be_init_module(void)
2945 {
2946         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2947             rx_frag_size != 2048) {
2948                 printk(KERN_WARNING DRV_NAME
2949                         " : Module param rx_frag_size must be 2048/4096/8192."
2950                         " Using 2048\n");
2951                 rx_frag_size = 2048;
2952         }
2953
2954         if (num_vfs > 32) {
2955                 printk(KERN_WARNING DRV_NAME
2956                         " : Module param num_vfs must not be greater than 32."
2957                         "Using 32\n");
2958                 num_vfs = 32;
2959         }
2960
2961         return pci_register_driver(&be_driver);
2962 }
2963 module_init(be_init_module);
2964
2965 static void __exit be_exit_module(void)
2966 {
2967         pci_unregister_driver(&be_driver);
2968 }
2969 module_exit(be_exit_module);