be2net: Fix CSO for UDP packets
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static char *ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC"
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 static inline bool be_multi_rxq(struct be_adapter *adapter)
119 {
120         return (adapter->num_rx_qs > 1);
121 }
122
123 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
124 {
125         struct be_dma_mem *mem = &q->dma_mem;
126         if (mem->va)
127                 pci_free_consistent(adapter->pdev, mem->size,
128                         mem->va, mem->dma);
129 }
130
131 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132                 u16 len, u16 entry_size)
133 {
134         struct be_dma_mem *mem = &q->dma_mem;
135
136         memset(q, 0, sizeof(*q));
137         q->len = len;
138         q->entry_size = entry_size;
139         mem->size = len * entry_size;
140         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141         if (!mem->va)
142                 return -1;
143         memset(mem->va, 0, mem->size);
144         return 0;
145 }
146
147 static void be_intr_set(struct be_adapter *adapter, bool enable)
148 {
149         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
150         u32 reg = ioread32(addr);
151         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153         if (adapter->eeh_err)
154                 return;
155
156         if (!enabled && enable)
157                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158         else if (enabled && !enable)
159                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else
161                 return;
162
163         iowrite32(reg, addr);
164 }
165
166 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
167 {
168         u32 val = 0;
169         val |= qid & DB_RQ_RING_ID_MASK;
170         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
171
172         wmb();
173         iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 }
175
176 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_TXULP_RING_ID_MASK;
180         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 }
185
186 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
187                 bool arm, bool clear_int, u16 num_popped)
188 {
189         u32 val = 0;
190         val |= qid & DB_EQ_RING_ID_MASK;
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208
209         if (adapter->eeh_err)
210                 return;
211
212         if (arm)
213                 val |= 1 << DB_CQ_REARM_SHIFT;
214         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_CQ_OFFSET);
216 }
217
218 static int be_mac_addr_set(struct net_device *netdev, void *p)
219 {
220         struct be_adapter *adapter = netdev_priv(netdev);
221         struct sockaddr *addr = p;
222         int status = 0;
223
224         if (!is_valid_ether_addr(addr->sa_data))
225                 return -EADDRNOTAVAIL;
226
227         /* MAC addr configuration will be done in hardware for VFs
228          * by their corresponding PFs. Just copy to netdev addr here
229          */
230         if (!be_physfn(adapter))
231                 goto netdev_addr;
232
233         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                         adapter->if_handle, &adapter->pmac_id);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 void netdev_stats_update(struct be_adapter *adapter)
247 {
248         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
249         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250         struct be_port_rxf_stats *port_stats =
251                         &rxf_stats->port[adapter->port_num];
252         struct net_device_stats *dev_stats = &adapter->netdev->stats;
253         struct be_erx_stats *erx_stats = &hw_stats->erx;
254         struct be_rx_obj *rxo;
255         int i;
256
257         memset(dev_stats, 0, sizeof(*dev_stats));
258         for_all_rx_queues(adapter, rxo, i) {
259                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262                 /*  no space in linux buffers: best possible approximation */
263                 dev_stats->rx_dropped +=
264                         erx_stats->rx_drops_no_fragments[rxo->q.id];
265         }
266
267         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
269
270         /* bad pkts received */
271         dev_stats->rx_errors = port_stats->rx_crc_errors +
272                 port_stats->rx_alignment_symbol_errors +
273                 port_stats->rx_in_range_errors +
274                 port_stats->rx_out_range_errors +
275                 port_stats->rx_frame_too_long +
276                 port_stats->rx_dropped_too_small +
277                 port_stats->rx_dropped_too_short +
278                 port_stats->rx_dropped_header_too_small +
279                 port_stats->rx_dropped_tcp_length +
280                 port_stats->rx_dropped_runt +
281                 port_stats->rx_tcp_checksum_errs +
282                 port_stats->rx_ip_checksum_errs +
283                 port_stats->rx_udp_checksum_errs;
284
285         /* detailed rx errors */
286         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
287                 port_stats->rx_out_range_errors +
288                 port_stats->rx_frame_too_long;
289
290         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
291
292         /* frame alignment errors */
293         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
294
295         /* receiver fifo overrun */
296         /* drops_no_pbuf is no per i/f, it's per BE card */
297         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298                                         port_stats->rx_input_fifo_overflow +
299                                         rxf_stats->rx_drops_no_pbuf;
300 }
301
302 void be_link_status_update(struct be_adapter *adapter, bool link_up)
303 {
304         struct net_device *netdev = adapter->netdev;
305
306         /* If link came up or went down */
307         if (adapter->link_up != link_up) {
308                 adapter->link_speed = -1;
309                 if (link_up) {
310                         netif_start_queue(netdev);
311                         netif_carrier_on(netdev);
312                         printk(KERN_INFO "%s: Link up\n", netdev->name);
313                 } else {
314                         netif_stop_queue(netdev);
315                         netif_carrier_off(netdev);
316                         printk(KERN_INFO "%s: Link down\n", netdev->name);
317                 }
318                 adapter->link_up = link_up;
319         }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325         struct be_eq_obj *rx_eq = &rxo->rx_eq;
326         struct be_rx_stats *stats = &rxo->stats;
327         ulong now = jiffies;
328         u32 eqd;
329
330         if (!rx_eq->enable_aic)
331                 return;
332
333         /* Wrapped around */
334         if (time_before(now, stats->rx_fps_jiffies)) {
335                 stats->rx_fps_jiffies = now;
336                 return;
337         }
338
339         /* Update once a second */
340         if ((now - stats->rx_fps_jiffies) < HZ)
341                 return;
342
343         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344                         ((now - stats->rx_fps_jiffies) / HZ);
345
346         stats->rx_fps_jiffies = now;
347         stats->prev_rx_frags = stats->rx_frags;
348         eqd = stats->rx_fps / 110000;
349         eqd = eqd << 3;
350         if (eqd > rx_eq->max_eqd)
351                 eqd = rx_eq->max_eqd;
352         if (eqd < rx_eq->min_eqd)
353                 eqd = rx_eq->min_eqd;
354         if (eqd < 10)
355                 eqd = 0;
356         if (eqd != rx_eq->cur_eqd)
357                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359         rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364         u64 rate = bytes;
365
366         do_div(rate, ticks / HZ);
367         rate <<= 3;                     /* bytes/sec -> bits/sec */
368         do_div(rate, 1000000ul);        /* MB/Sec */
369
370         return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375         struct be_tx_stats *stats = tx_stats(adapter);
376         ulong now = jiffies;
377
378         /* Wrapped around? */
379         if (time_before(now, stats->be_tx_jiffies)) {
380                 stats->be_tx_jiffies = now;
381                 return;
382         }
383
384         /* Update tx rate once in two seconds */
385         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387                                                   - stats->be_tx_bytes_prev,
388                                                  now - stats->be_tx_jiffies);
389                 stats->be_tx_jiffies = now;
390                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391         }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397         struct be_tx_stats *stats = tx_stats(adapter);
398         stats->be_tx_reqs++;
399         stats->be_tx_wrbs += wrb_cnt;
400         stats->be_tx_bytes += copied;
401         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402         if (stopped)
403                 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
408 {
409         int cnt = (skb->len > skb->data_len);
410
411         cnt += skb_shinfo(skb)->nr_frags;
412
413         /* to account for hdr wrb */
414         cnt++;
415         if (cnt & 1) {
416                 /* add a dummy to make it an even num */
417                 cnt++;
418                 *dummy = true;
419         } else
420                 *dummy = false;
421         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422         return cnt;
423 }
424
425 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
426 {
427         wrb->frag_pa_hi = upper_32_bits(addr);
428         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
430 }
431
432 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
433                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
434 {
435         u8 vlan_prio = 0;
436         u16 vlan_tag = 0;
437
438         memset(hdr, 0, sizeof(*hdr));
439
440         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
441
442         if (skb_is_gso(skb)) {
443                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445                         hdr, skb_shinfo(skb)->gso_size);
446                 if (skb_is_gso_v6(skb))
447                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
448         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449                 if (is_tcp_pkt(skb))
450                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
451                 else if (is_udp_pkt(skb))
452                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
453         }
454
455         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
456                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
457                 vlan_tag = vlan_tx_tag_get(skb);
458                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
459                 /* If vlan priority provided by OS is NOT in available bmap */
460                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
461                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
462                                         adapter->recommended_prio;
463                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
464         }
465
466         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
467         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
468         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
469         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
470 }
471
472 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
473                 bool unmap_single)
474 {
475         dma_addr_t dma;
476
477         be_dws_le_to_cpu(wrb, sizeof(*wrb));
478
479         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
480         if (wrb->frag_len) {
481                 if (unmap_single)
482                         pci_unmap_single(pdev, dma, wrb->frag_len,
483                                 PCI_DMA_TODEVICE);
484                 else
485                         pci_unmap_page(pdev, dma, wrb->frag_len,
486                                 PCI_DMA_TODEVICE);
487         }
488 }
489
490 static int make_tx_wrbs(struct be_adapter *adapter,
491                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
492 {
493         dma_addr_t busaddr;
494         int i, copied = 0;
495         struct pci_dev *pdev = adapter->pdev;
496         struct sk_buff *first_skb = skb;
497         struct be_queue_info *txq = &adapter->tx_obj.q;
498         struct be_eth_wrb *wrb;
499         struct be_eth_hdr_wrb *hdr;
500         bool map_single = false;
501         u16 map_head;
502
503         hdr = queue_head_node(txq);
504         queue_head_inc(txq);
505         map_head = txq->head;
506
507         if (skb->len > skb->data_len) {
508                 int len = skb_headlen(skb);
509                 busaddr = pci_map_single(pdev, skb->data, len,
510                                          PCI_DMA_TODEVICE);
511                 if (pci_dma_mapping_error(pdev, busaddr))
512                         goto dma_err;
513                 map_single = true;
514                 wrb = queue_head_node(txq);
515                 wrb_fill(wrb, busaddr, len);
516                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
517                 queue_head_inc(txq);
518                 copied += len;
519         }
520
521         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
522                 struct skb_frag_struct *frag =
523                         &skb_shinfo(skb)->frags[i];
524                 busaddr = pci_map_page(pdev, frag->page,
525                                        frag->page_offset,
526                                        frag->size, PCI_DMA_TODEVICE);
527                 if (pci_dma_mapping_error(pdev, busaddr))
528                         goto dma_err;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, frag->size);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += frag->size;
534         }
535
536         if (dummy_wrb) {
537                 wrb = queue_head_node(txq);
538                 wrb_fill(wrb, 0, 0);
539                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
540                 queue_head_inc(txq);
541         }
542
543         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
544         be_dws_cpu_to_le(hdr, sizeof(*hdr));
545
546         return copied;
547 dma_err:
548         txq->head = map_head;
549         while (copied) {
550                 wrb = queue_head_node(txq);
551                 unmap_tx_frag(pdev, wrb, map_single);
552                 map_single = false;
553                 copied -= wrb->frag_len;
554                 queue_head_inc(txq);
555         }
556         return 0;
557 }
558
559 static netdev_tx_t be_xmit(struct sk_buff *skb,
560                         struct net_device *netdev)
561 {
562         struct be_adapter *adapter = netdev_priv(netdev);
563         struct be_tx_obj *tx_obj = &adapter->tx_obj;
564         struct be_queue_info *txq = &tx_obj->q;
565         u32 wrb_cnt = 0, copied = 0;
566         u32 start = txq->head;
567         bool dummy_wrb, stopped = false;
568
569         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
570
571         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572         if (copied) {
573                 /* record the sent skb in the sent_skb table */
574                 BUG_ON(tx_obj->sent_skb_list[start]);
575                 tx_obj->sent_skb_list[start] = skb;
576
577                 /* Ensure txq has space for the next skb; Else stop the queue
578                  * *BEFORE* ringing the tx doorbell, so that we serialze the
579                  * tx compls of the current transmit which'll wake up the queue
580                  */
581                 atomic_add(wrb_cnt, &txq->used);
582                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
583                                                                 txq->len) {
584                         netif_stop_queue(netdev);
585                         stopped = true;
586                 }
587
588                 be_txq_notify(adapter, txq->id, wrb_cnt);
589
590                 be_tx_stats_update(adapter, wrb_cnt, copied,
591                                 skb_shinfo(skb)->gso_segs, stopped);
592         } else {
593                 txq->head = start;
594                 dev_kfree_skb_any(skb);
595         }
596         return NETDEV_TX_OK;
597 }
598
599 static int be_change_mtu(struct net_device *netdev, int new_mtu)
600 {
601         struct be_adapter *adapter = netdev_priv(netdev);
602         if (new_mtu < BE_MIN_MTU ||
603                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
604                                         (ETH_HLEN + ETH_FCS_LEN))) {
605                 dev_info(&adapter->pdev->dev,
606                         "MTU must be between %d and %d bytes\n",
607                         BE_MIN_MTU,
608                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
609                 return -EINVAL;
610         }
611         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
612                         netdev->mtu, new_mtu);
613         netdev->mtu = new_mtu;
614         return 0;
615 }
616
617 /*
618  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
619  * If the user configures more, place BE in vlan promiscuous mode.
620  */
621 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
622 {
623         u16 vtag[BE_NUM_VLANS_SUPPORTED];
624         u16 ntags = 0, i;
625         int status = 0;
626         u32 if_handle;
627
628         if (vf) {
629                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
630                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
631                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
632         }
633
634         if (adapter->vlans_added <= adapter->max_vlans)  {
635                 /* Construct VLAN Table to give to HW */
636                 for (i = 0; i < VLAN_N_VID; i++) {
637                         if (adapter->vlan_tag[i]) {
638                                 vtag[ntags] = cpu_to_le16(i);
639                                 ntags++;
640                         }
641                 }
642                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
643                                         vtag, ntags, 1, 0);
644         } else {
645                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
646                                         NULL, 0, 1, 1);
647         }
648
649         return status;
650 }
651
652 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
653 {
654         struct be_adapter *adapter = netdev_priv(netdev);
655
656         adapter->vlan_grp = grp;
657 }
658
659 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
660 {
661         struct be_adapter *adapter = netdev_priv(netdev);
662
663         adapter->vlans_added++;
664         if (!be_physfn(adapter))
665                 return;
666
667         adapter->vlan_tag[vid] = 1;
668         if (adapter->vlans_added <= (adapter->max_vlans + 1))
669                 be_vid_config(adapter, false, 0);
670 }
671
672 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
673 {
674         struct be_adapter *adapter = netdev_priv(netdev);
675
676         adapter->vlans_added--;
677         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
678
679         if (!be_physfn(adapter))
680                 return;
681
682         adapter->vlan_tag[vid] = 0;
683         if (adapter->vlans_added <= adapter->max_vlans)
684                 be_vid_config(adapter, false, 0);
685 }
686
687 static void be_set_multicast_list(struct net_device *netdev)
688 {
689         struct be_adapter *adapter = netdev_priv(netdev);
690
691         if (netdev->flags & IFF_PROMISC) {
692                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
693                 adapter->promiscuous = true;
694                 goto done;
695         }
696
697         /* BE was previously in promiscous mode; disable it */
698         if (adapter->promiscuous) {
699                 adapter->promiscuous = false;
700                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
701         }
702
703         /* Enable multicast promisc if num configured exceeds what we support */
704         if (netdev->flags & IFF_ALLMULTI ||
705             netdev_mc_count(netdev) > BE_MAX_MC) {
706                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
707                                 &adapter->mc_cmd_mem);
708                 goto done;
709         }
710
711         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
712                 &adapter->mc_cmd_mem);
713 done:
714         return;
715 }
716
717 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
718 {
719         struct be_adapter *adapter = netdev_priv(netdev);
720         int status;
721
722         if (!adapter->sriov_enabled)
723                 return -EPERM;
724
725         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
726                 return -EINVAL;
727
728         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
729                 status = be_cmd_pmac_del(adapter,
730                                         adapter->vf_cfg[vf].vf_if_handle,
731                                         adapter->vf_cfg[vf].vf_pmac_id);
732
733         status = be_cmd_pmac_add(adapter, mac,
734                                 adapter->vf_cfg[vf].vf_if_handle,
735                                 &adapter->vf_cfg[vf].vf_pmac_id);
736
737         if (status)
738                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
739                                 mac, vf);
740         else
741                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
742
743         return status;
744 }
745
746 static int be_get_vf_config(struct net_device *netdev, int vf,
747                         struct ifla_vf_info *vi)
748 {
749         struct be_adapter *adapter = netdev_priv(netdev);
750
751         if (!adapter->sriov_enabled)
752                 return -EPERM;
753
754         if (vf >= num_vfs)
755                 return -EINVAL;
756
757         vi->vf = vf;
758         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
759         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
760         vi->qos = 0;
761         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
762
763         return 0;
764 }
765
766 static int be_set_vf_vlan(struct net_device *netdev,
767                         int vf, u16 vlan, u8 qos)
768 {
769         struct be_adapter *adapter = netdev_priv(netdev);
770         int status = 0;
771
772         if (!adapter->sriov_enabled)
773                 return -EPERM;
774
775         if ((vf >= num_vfs) || (vlan > 4095))
776                 return -EINVAL;
777
778         if (vlan) {
779                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
780                 adapter->vlans_added++;
781         } else {
782                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
783                 adapter->vlans_added--;
784         }
785
786         status = be_vid_config(adapter, true, vf);
787
788         if (status)
789                 dev_info(&adapter->pdev->dev,
790                                 "VLAN %d config on VF %d failed\n", vlan, vf);
791         return status;
792 }
793
794 static int be_set_vf_tx_rate(struct net_device *netdev,
795                         int vf, int rate)
796 {
797         struct be_adapter *adapter = netdev_priv(netdev);
798         int status = 0;
799
800         if (!adapter->sriov_enabled)
801                 return -EPERM;
802
803         if ((vf >= num_vfs) || (rate < 0))
804                 return -EINVAL;
805
806         if (rate > 10000)
807                 rate = 10000;
808
809         adapter->vf_cfg[vf].vf_tx_rate = rate;
810         status = be_cmd_set_qos(adapter, rate / 10, vf);
811
812         if (status)
813                 dev_info(&adapter->pdev->dev,
814                                 "tx rate %d on VF %d failed\n", rate, vf);
815         return status;
816 }
817
818 static void be_rx_rate_update(struct be_rx_obj *rxo)
819 {
820         struct be_rx_stats *stats = &rxo->stats;
821         ulong now = jiffies;
822
823         /* Wrapped around */
824         if (time_before(now, stats->rx_jiffies)) {
825                 stats->rx_jiffies = now;
826                 return;
827         }
828
829         /* Update the rate once in two seconds */
830         if ((now - stats->rx_jiffies) < 2 * HZ)
831                 return;
832
833         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
834                                 now - stats->rx_jiffies);
835         stats->rx_jiffies = now;
836         stats->rx_bytes_prev = stats->rx_bytes;
837 }
838
839 static void be_rx_stats_update(struct be_rx_obj *rxo,
840                 u32 pktsize, u16 numfrags, u8 pkt_type)
841 {
842         struct be_rx_stats *stats = &rxo->stats;
843
844         stats->rx_compl++;
845         stats->rx_frags += numfrags;
846         stats->rx_bytes += pktsize;
847         stats->rx_pkts++;
848         if (pkt_type == BE_MULTICAST_PACKET)
849                 stats->rx_mcast_pkts++;
850 }
851
852 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
853 {
854         u8 l4_cksm, ipv6, ipcksm;
855
856         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
857         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
858         ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
859
860         /* Ignore ipcksm for ipv6 pkts */
861         return l4_cksm && (ipcksm || ipv6);
862 }
863
864 static struct be_rx_page_info *
865 get_rx_page_info(struct be_adapter *adapter,
866                 struct be_rx_obj *rxo,
867                 u16 frag_idx)
868 {
869         struct be_rx_page_info *rx_page_info;
870         struct be_queue_info *rxq = &rxo->q;
871
872         rx_page_info = &rxo->page_info_tbl[frag_idx];
873         BUG_ON(!rx_page_info->page);
874
875         if (rx_page_info->last_page_user) {
876                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
877                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
878                 rx_page_info->last_page_user = false;
879         }
880
881         atomic_dec(&rxq->used);
882         return rx_page_info;
883 }
884
885 /* Throwaway the data in the Rx completion */
886 static void be_rx_compl_discard(struct be_adapter *adapter,
887                 struct be_rx_obj *rxo,
888                 struct be_eth_rx_compl *rxcp)
889 {
890         struct be_queue_info *rxq = &rxo->q;
891         struct be_rx_page_info *page_info;
892         u16 rxq_idx, i, num_rcvd;
893
894         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
895         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
896
897         for (i = 0; i < num_rcvd; i++) {
898                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
899                 put_page(page_info->page);
900                 memset(page_info, 0, sizeof(*page_info));
901                 index_inc(&rxq_idx, rxq->len);
902         }
903 }
904
905 /*
906  * skb_fill_rx_data forms a complete skb for an ether frame
907  * indicated by rxcp.
908  */
909 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
910                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
911                         u16 num_rcvd)
912 {
913         struct be_queue_info *rxq = &rxo->q;
914         struct be_rx_page_info *page_info;
915         u16 rxq_idx, i, j;
916         u32 pktsize, hdr_len, curr_frag_len, size;
917         u8 *start;
918         u8 pkt_type;
919
920         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
921         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
922         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
923
924         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
925
926         start = page_address(page_info->page) + page_info->page_offset;
927         prefetch(start);
928
929         /* Copy data in the first descriptor of this completion */
930         curr_frag_len = min(pktsize, rx_frag_size);
931
932         /* Copy the header portion into skb_data */
933         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
934         memcpy(skb->data, start, hdr_len);
935         skb->len = curr_frag_len;
936         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
937                 /* Complete packet has now been moved to data */
938                 put_page(page_info->page);
939                 skb->data_len = 0;
940                 skb->tail += curr_frag_len;
941         } else {
942                 skb_shinfo(skb)->nr_frags = 1;
943                 skb_shinfo(skb)->frags[0].page = page_info->page;
944                 skb_shinfo(skb)->frags[0].page_offset =
945                                         page_info->page_offset + hdr_len;
946                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
947                 skb->data_len = curr_frag_len - hdr_len;
948                 skb->tail += hdr_len;
949         }
950         page_info->page = NULL;
951
952         if (pktsize <= rx_frag_size) {
953                 BUG_ON(num_rcvd != 1);
954                 goto done;
955         }
956
957         /* More frags present for this completion */
958         size = pktsize;
959         for (i = 1, j = 0; i < num_rcvd; i++) {
960                 size -= curr_frag_len;
961                 index_inc(&rxq_idx, rxq->len);
962                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
963
964                 curr_frag_len = min(size, rx_frag_size);
965
966                 /* Coalesce all frags from the same physical page in one slot */
967                 if (page_info->page_offset == 0) {
968                         /* Fresh page */
969                         j++;
970                         skb_shinfo(skb)->frags[j].page = page_info->page;
971                         skb_shinfo(skb)->frags[j].page_offset =
972                                                         page_info->page_offset;
973                         skb_shinfo(skb)->frags[j].size = 0;
974                         skb_shinfo(skb)->nr_frags++;
975                 } else {
976                         put_page(page_info->page);
977                 }
978
979                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
980                 skb->len += curr_frag_len;
981                 skb->data_len += curr_frag_len;
982
983                 page_info->page = NULL;
984         }
985         BUG_ON(j > MAX_SKB_FRAGS);
986
987 done:
988         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
989 }
990
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993                         struct be_rx_obj *rxo,
994                         struct be_eth_rx_compl *rxcp)
995 {
996         struct sk_buff *skb;
997         u32 vlanf, vid;
998         u16 num_rcvd;
999         u8 vtm;
1000
1001         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1002         /* Is it a flush compl that has no data */
1003         if (unlikely(num_rcvd == 0))
1004                 return;
1005
1006         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1007         if (unlikely(!skb)) {
1008                 if (net_ratelimit())
1009                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1010                 be_rx_compl_discard(adapter, rxo, rxcp);
1011                 return;
1012         }
1013
1014         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1015
1016         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1017                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1018         else
1019                 skb_checksum_none_assert(skb);
1020
1021         skb->truesize = skb->len + sizeof(struct sk_buff);
1022         skb->protocol = eth_type_trans(skb, adapter->netdev);
1023
1024         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1025         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1026
1027         /* vlanf could be wrongly set in some cards.
1028          * ignore if vtm is not set */
1029         if ((adapter->function_mode & 0x400) && !vtm)
1030                 vlanf = 0;
1031
1032         if (unlikely(vlanf)) {
1033                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1034                         kfree_skb(skb);
1035                         return;
1036                 }
1037                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1038                 vid = swab16(vid);
1039                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1040         } else {
1041                 netif_receive_skb(skb);
1042         }
1043 }
1044
1045 /* Process the RX completion indicated by rxcp when GRO is enabled */
1046 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1047                 struct be_rx_obj *rxo,
1048                 struct be_eth_rx_compl *rxcp)
1049 {
1050         struct be_rx_page_info *page_info;
1051         struct sk_buff *skb = NULL;
1052         struct be_queue_info *rxq = &rxo->q;
1053         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1054         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1055         u16 i, rxq_idx = 0, vid, j;
1056         u8 vtm;
1057         u8 pkt_type;
1058
1059         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1060         /* Is it a flush compl that has no data */
1061         if (unlikely(num_rcvd == 0))
1062                 return;
1063
1064         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1065         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1066         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1067         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1068         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1069
1070         /* vlanf could be wrongly set in some cards.
1071          * ignore if vtm is not set */
1072         if ((adapter->function_mode & 0x400) && !vtm)
1073                 vlanf = 0;
1074
1075         skb = napi_get_frags(&eq_obj->napi);
1076         if (!skb) {
1077                 be_rx_compl_discard(adapter, rxo, rxcp);
1078                 return;
1079         }
1080
1081         remaining = pkt_size;
1082         for (i = 0, j = -1; i < num_rcvd; i++) {
1083                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1084
1085                 curr_frag_len = min(remaining, rx_frag_size);
1086
1087                 /* Coalesce all frags from the same physical page in one slot */
1088                 if (i == 0 || page_info->page_offset == 0) {
1089                         /* First frag or Fresh page */
1090                         j++;
1091                         skb_shinfo(skb)->frags[j].page = page_info->page;
1092                         skb_shinfo(skb)->frags[j].page_offset =
1093                                                         page_info->page_offset;
1094                         skb_shinfo(skb)->frags[j].size = 0;
1095                 } else {
1096                         put_page(page_info->page);
1097                 }
1098                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1099
1100                 remaining -= curr_frag_len;
1101                 index_inc(&rxq_idx, rxq->len);
1102                 memset(page_info, 0, sizeof(*page_info));
1103         }
1104         BUG_ON(j > MAX_SKB_FRAGS);
1105
1106         skb_shinfo(skb)->nr_frags = j + 1;
1107         skb->len = pkt_size;
1108         skb->data_len = pkt_size;
1109         skb->truesize += pkt_size;
1110         skb->ip_summed = CHECKSUM_UNNECESSARY;
1111
1112         if (likely(!vlanf)) {
1113                 napi_gro_frags(&eq_obj->napi);
1114         } else {
1115                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1116                 vid = swab16(vid);
1117
1118                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1119                         return;
1120
1121                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1122         }
1123
1124         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1125 }
1126
1127 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1128 {
1129         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1130
1131         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1132                 return NULL;
1133
1134         rmb();
1135         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1136
1137         queue_tail_inc(&rxo->cq);
1138         return rxcp;
1139 }
1140
1141 /* To reset the valid bit, we need to reset the whole word as
1142  * when walking the queue the valid entries are little-endian
1143  * and invalid entries are host endian
1144  */
1145 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1146 {
1147         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1148 }
1149
1150 static inline struct page *be_alloc_pages(u32 size)
1151 {
1152         gfp_t alloc_flags = GFP_ATOMIC;
1153         u32 order = get_order(size);
1154         if (order > 0)
1155                 alloc_flags |= __GFP_COMP;
1156         return  alloc_pages(alloc_flags, order);
1157 }
1158
1159 /*
1160  * Allocate a page, split it to fragments of size rx_frag_size and post as
1161  * receive buffers to BE
1162  */
1163 static void be_post_rx_frags(struct be_rx_obj *rxo)
1164 {
1165         struct be_adapter *adapter = rxo->adapter;
1166         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1167         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1168         struct be_queue_info *rxq = &rxo->q;
1169         struct page *pagep = NULL;
1170         struct be_eth_rx_d *rxd;
1171         u64 page_dmaaddr = 0, frag_dmaaddr;
1172         u32 posted, page_offset = 0;
1173
1174         page_info = &rxo->page_info_tbl[rxq->head];
1175         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1176                 if (!pagep) {
1177                         pagep = be_alloc_pages(adapter->big_page_size);
1178                         if (unlikely(!pagep)) {
1179                                 rxo->stats.rx_post_fail++;
1180                                 break;
1181                         }
1182                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1183                                                 adapter->big_page_size,
1184                                                 PCI_DMA_FROMDEVICE);
1185                         page_info->page_offset = 0;
1186                 } else {
1187                         get_page(pagep);
1188                         page_info->page_offset = page_offset + rx_frag_size;
1189                 }
1190                 page_offset = page_info->page_offset;
1191                 page_info->page = pagep;
1192                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1193                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1194
1195                 rxd = queue_head_node(rxq);
1196                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1197                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1198
1199                 /* Any space left in the current big page for another frag? */
1200                 if ((page_offset + rx_frag_size + rx_frag_size) >
1201                                         adapter->big_page_size) {
1202                         pagep = NULL;
1203                         page_info->last_page_user = true;
1204                 }
1205
1206                 prev_page_info = page_info;
1207                 queue_head_inc(rxq);
1208                 page_info = &page_info_tbl[rxq->head];
1209         }
1210         if (pagep)
1211                 prev_page_info->last_page_user = true;
1212
1213         if (posted) {
1214                 atomic_add(posted, &rxq->used);
1215                 be_rxq_notify(adapter, rxq->id, posted);
1216         } else if (atomic_read(&rxq->used) == 0) {
1217                 /* Let be_worker replenish when memory is available */
1218                 rxo->rx_post_starved = true;
1219         }
1220 }
1221
1222 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1223 {
1224         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1225
1226         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1227                 return NULL;
1228
1229         rmb();
1230         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1231
1232         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1233
1234         queue_tail_inc(tx_cq);
1235         return txcp;
1236 }
1237
1238 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1239 {
1240         struct be_queue_info *txq = &adapter->tx_obj.q;
1241         struct be_eth_wrb *wrb;
1242         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1243         struct sk_buff *sent_skb;
1244         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1245         bool unmap_skb_hdr = true;
1246
1247         sent_skb = sent_skbs[txq->tail];
1248         BUG_ON(!sent_skb);
1249         sent_skbs[txq->tail] = NULL;
1250
1251         /* skip header wrb */
1252         queue_tail_inc(txq);
1253
1254         do {
1255                 cur_index = txq->tail;
1256                 wrb = queue_tail_node(txq);
1257                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1258                                         skb_headlen(sent_skb)));
1259                 unmap_skb_hdr = false;
1260
1261                 num_wrbs++;
1262                 queue_tail_inc(txq);
1263         } while (cur_index != last_index);
1264
1265         atomic_sub(num_wrbs, &txq->used);
1266
1267         kfree_skb(sent_skb);
1268 }
1269
1270 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1271 {
1272         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1273
1274         if (!eqe->evt)
1275                 return NULL;
1276
1277         rmb();
1278         eqe->evt = le32_to_cpu(eqe->evt);
1279         queue_tail_inc(&eq_obj->q);
1280         return eqe;
1281 }
1282
1283 static int event_handle(struct be_adapter *adapter,
1284                         struct be_eq_obj *eq_obj)
1285 {
1286         struct be_eq_entry *eqe;
1287         u16 num = 0;
1288
1289         while ((eqe = event_get(eq_obj)) != NULL) {
1290                 eqe->evt = 0;
1291                 num++;
1292         }
1293
1294         /* Deal with any spurious interrupts that come
1295          * without events
1296          */
1297         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1298         if (num)
1299                 napi_schedule(&eq_obj->napi);
1300
1301         return num;
1302 }
1303
1304 /* Just read and notify events without processing them.
1305  * Used at the time of destroying event queues */
1306 static void be_eq_clean(struct be_adapter *adapter,
1307                         struct be_eq_obj *eq_obj)
1308 {
1309         struct be_eq_entry *eqe;
1310         u16 num = 0;
1311
1312         while ((eqe = event_get(eq_obj)) != NULL) {
1313                 eqe->evt = 0;
1314                 num++;
1315         }
1316
1317         if (num)
1318                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1319 }
1320
1321 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1322 {
1323         struct be_rx_page_info *page_info;
1324         struct be_queue_info *rxq = &rxo->q;
1325         struct be_queue_info *rx_cq = &rxo->cq;
1326         struct be_eth_rx_compl *rxcp;
1327         u16 tail;
1328
1329         /* First cleanup pending rx completions */
1330         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1331                 be_rx_compl_discard(adapter, rxo, rxcp);
1332                 be_rx_compl_reset(rxcp);
1333                 be_cq_notify(adapter, rx_cq->id, true, 1);
1334         }
1335
1336         /* Then free posted rx buffer that were not used */
1337         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1338         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1339                 page_info = get_rx_page_info(adapter, rxo, tail);
1340                 put_page(page_info->page);
1341                 memset(page_info, 0, sizeof(*page_info));
1342         }
1343         BUG_ON(atomic_read(&rxq->used));
1344 }
1345
1346 static void be_tx_compl_clean(struct be_adapter *adapter)
1347 {
1348         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1349         struct be_queue_info *txq = &adapter->tx_obj.q;
1350         struct be_eth_tx_compl *txcp;
1351         u16 end_idx, cmpl = 0, timeo = 0;
1352         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1353         struct sk_buff *sent_skb;
1354         bool dummy_wrb;
1355
1356         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1357         do {
1358                 while ((txcp = be_tx_compl_get(tx_cq))) {
1359                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1360                                         wrb_index, txcp);
1361                         be_tx_compl_process(adapter, end_idx);
1362                         cmpl++;
1363                 }
1364                 if (cmpl) {
1365                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1366                         cmpl = 0;
1367                 }
1368
1369                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1370                         break;
1371
1372                 mdelay(1);
1373         } while (true);
1374
1375         if (atomic_read(&txq->used))
1376                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1377                         atomic_read(&txq->used));
1378
1379         /* free posted tx for which compls will never arrive */
1380         while (atomic_read(&txq->used)) {
1381                 sent_skb = sent_skbs[txq->tail];
1382                 end_idx = txq->tail;
1383                 index_adv(&end_idx,
1384                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1385                 be_tx_compl_process(adapter, end_idx);
1386         }
1387 }
1388
1389 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1390 {
1391         struct be_queue_info *q;
1392
1393         q = &adapter->mcc_obj.q;
1394         if (q->created)
1395                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1396         be_queue_free(adapter, q);
1397
1398         q = &adapter->mcc_obj.cq;
1399         if (q->created)
1400                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1401         be_queue_free(adapter, q);
1402 }
1403
1404 /* Must be called only after TX qs are created as MCC shares TX EQ */
1405 static int be_mcc_queues_create(struct be_adapter *adapter)
1406 {
1407         struct be_queue_info *q, *cq;
1408
1409         /* Alloc MCC compl queue */
1410         cq = &adapter->mcc_obj.cq;
1411         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1412                         sizeof(struct be_mcc_compl)))
1413                 goto err;
1414
1415         /* Ask BE to create MCC compl queue; share TX's eq */
1416         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1417                 goto mcc_cq_free;
1418
1419         /* Alloc MCC queue */
1420         q = &adapter->mcc_obj.q;
1421         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1422                 goto mcc_cq_destroy;
1423
1424         /* Ask BE to create MCC queue */
1425         if (be_cmd_mccq_create(adapter, q, cq))
1426                 goto mcc_q_free;
1427
1428         return 0;
1429
1430 mcc_q_free:
1431         be_queue_free(adapter, q);
1432 mcc_cq_destroy:
1433         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1434 mcc_cq_free:
1435         be_queue_free(adapter, cq);
1436 err:
1437         return -1;
1438 }
1439
1440 static void be_tx_queues_destroy(struct be_adapter *adapter)
1441 {
1442         struct be_queue_info *q;
1443
1444         q = &adapter->tx_obj.q;
1445         if (q->created)
1446                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1447         be_queue_free(adapter, q);
1448
1449         q = &adapter->tx_obj.cq;
1450         if (q->created)
1451                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1452         be_queue_free(adapter, q);
1453
1454         /* Clear any residual events */
1455         be_eq_clean(adapter, &adapter->tx_eq);
1456
1457         q = &adapter->tx_eq.q;
1458         if (q->created)
1459                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1460         be_queue_free(adapter, q);
1461 }
1462
1463 static int be_tx_queues_create(struct be_adapter *adapter)
1464 {
1465         struct be_queue_info *eq, *q, *cq;
1466
1467         adapter->tx_eq.max_eqd = 0;
1468         adapter->tx_eq.min_eqd = 0;
1469         adapter->tx_eq.cur_eqd = 96;
1470         adapter->tx_eq.enable_aic = false;
1471         /* Alloc Tx Event queue */
1472         eq = &adapter->tx_eq.q;
1473         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1474                 return -1;
1475
1476         /* Ask BE to create Tx Event queue */
1477         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1478                 goto tx_eq_free;
1479         adapter->base_eq_id = adapter->tx_eq.q.id;
1480
1481         /* Alloc TX eth compl queue */
1482         cq = &adapter->tx_obj.cq;
1483         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1484                         sizeof(struct be_eth_tx_compl)))
1485                 goto tx_eq_destroy;
1486
1487         /* Ask BE to create Tx eth compl queue */
1488         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1489                 goto tx_cq_free;
1490
1491         /* Alloc TX eth queue */
1492         q = &adapter->tx_obj.q;
1493         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1494                 goto tx_cq_destroy;
1495
1496         /* Ask BE to create Tx eth queue */
1497         if (be_cmd_txq_create(adapter, q, cq))
1498                 goto tx_q_free;
1499         return 0;
1500
1501 tx_q_free:
1502         be_queue_free(adapter, q);
1503 tx_cq_destroy:
1504         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1505 tx_cq_free:
1506         be_queue_free(adapter, cq);
1507 tx_eq_destroy:
1508         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1509 tx_eq_free:
1510         be_queue_free(adapter, eq);
1511         return -1;
1512 }
1513
1514 static void be_rx_queues_destroy(struct be_adapter *adapter)
1515 {
1516         struct be_queue_info *q;
1517         struct be_rx_obj *rxo;
1518         int i;
1519
1520         for_all_rx_queues(adapter, rxo, i) {
1521                 q = &rxo->q;
1522                 if (q->created) {
1523                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1524                         /* After the rxq is invalidated, wait for a grace time
1525                          * of 1ms for all dma to end and the flush compl to
1526                          * arrive
1527                          */
1528                         mdelay(1);
1529                         be_rx_q_clean(adapter, rxo);
1530                 }
1531                 be_queue_free(adapter, q);
1532
1533                 q = &rxo->cq;
1534                 if (q->created)
1535                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1536                 be_queue_free(adapter, q);
1537
1538                 /* Clear any residual events */
1539                 q = &rxo->rx_eq.q;
1540                 if (q->created) {
1541                         be_eq_clean(adapter, &rxo->rx_eq);
1542                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1543                 }
1544                 be_queue_free(adapter, q);
1545         }
1546 }
1547
1548 static int be_rx_queues_create(struct be_adapter *adapter)
1549 {
1550         struct be_queue_info *eq, *q, *cq;
1551         struct be_rx_obj *rxo;
1552         int rc, i;
1553
1554         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1555         for_all_rx_queues(adapter, rxo, i) {
1556                 rxo->adapter = adapter;
1557                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1558                 rxo->rx_eq.enable_aic = true;
1559
1560                 /* EQ */
1561                 eq = &rxo->rx_eq.q;
1562                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1563                                         sizeof(struct be_eq_entry));
1564                 if (rc)
1565                         goto err;
1566
1567                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1568                 if (rc)
1569                         goto err;
1570
1571                 /* CQ */
1572                 cq = &rxo->cq;
1573                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1574                                 sizeof(struct be_eth_rx_compl));
1575                 if (rc)
1576                         goto err;
1577
1578                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579                 if (rc)
1580                         goto err;
1581
1582                 /* Rx Q */
1583                 q = &rxo->q;
1584                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1585                                 sizeof(struct be_eth_rx_d));
1586                 if (rc)
1587                         goto err;
1588
1589                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1590                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1591                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1592                 if (rc)
1593                         goto err;
1594         }
1595
1596         if (be_multi_rxq(adapter)) {
1597                 u8 rsstable[MAX_RSS_QS];
1598
1599                 for_all_rss_queues(adapter, rxo, i)
1600                         rsstable[i] = rxo->rss_id;
1601
1602                 rc = be_cmd_rss_config(adapter, rsstable,
1603                         adapter->num_rx_qs - 1);
1604                 if (rc)
1605                         goto err;
1606         }
1607
1608         return 0;
1609 err:
1610         be_rx_queues_destroy(adapter);
1611         return -1;
1612 }
1613
1614 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1615 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616 {
1617         return eq_id - adapter->base_eq_id;
1618 }
1619
1620 static irqreturn_t be_intx(int irq, void *dev)
1621 {
1622         struct be_adapter *adapter = dev;
1623         struct be_rx_obj *rxo;
1624         int isr, i;
1625
1626         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1627                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1628         if (!isr)
1629                 return IRQ_NONE;
1630
1631         if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1632                 event_handle(adapter, &adapter->tx_eq);
1633
1634         for_all_rx_queues(adapter, rxo, i) {
1635                 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1636                         event_handle(adapter, &rxo->rx_eq);
1637         }
1638
1639         return IRQ_HANDLED;
1640 }
1641
1642 static irqreturn_t be_msix_rx(int irq, void *dev)
1643 {
1644         struct be_rx_obj *rxo = dev;
1645         struct be_adapter *adapter = rxo->adapter;
1646
1647         event_handle(adapter, &rxo->rx_eq);
1648
1649         return IRQ_HANDLED;
1650 }
1651
1652 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1653 {
1654         struct be_adapter *adapter = dev;
1655
1656         event_handle(adapter, &adapter->tx_eq);
1657
1658         return IRQ_HANDLED;
1659 }
1660
1661 static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1662                         struct be_eth_rx_compl *rxcp)
1663 {
1664         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1665         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1666
1667         if (err)
1668                 rxo->stats.rxcp_err++;
1669
1670         return (tcp_frame && !err) ? true : false;
1671 }
1672
1673 static int be_poll_rx(struct napi_struct *napi, int budget)
1674 {
1675         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1676         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1677         struct be_adapter *adapter = rxo->adapter;
1678         struct be_queue_info *rx_cq = &rxo->cq;
1679         struct be_eth_rx_compl *rxcp;
1680         u32 work_done;
1681
1682         rxo->stats.rx_polls++;
1683         for (work_done = 0; work_done < budget; work_done++) {
1684                 rxcp = be_rx_compl_get(rxo);
1685                 if (!rxcp)
1686                         break;
1687
1688                 if (do_gro(adapter, rxo, rxcp))
1689                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1690                 else
1691                         be_rx_compl_process(adapter, rxo, rxcp);
1692
1693                 be_rx_compl_reset(rxcp);
1694         }
1695
1696         /* Refill the queue */
1697         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1698                 be_post_rx_frags(rxo);
1699
1700         /* All consumed */
1701         if (work_done < budget) {
1702                 napi_complete(napi);
1703                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1704         } else {
1705                 /* More to be consumed; continue with interrupts disabled */
1706                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1707         }
1708         return work_done;
1709 }
1710
1711 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1712  * For TX/MCC we don't honour budget; consume everything
1713  */
1714 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1715 {
1716         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1717         struct be_adapter *adapter =
1718                 container_of(tx_eq, struct be_adapter, tx_eq);
1719         struct be_queue_info *txq = &adapter->tx_obj.q;
1720         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1721         struct be_eth_tx_compl *txcp;
1722         int tx_compl = 0, mcc_compl, status = 0;
1723         u16 end_idx;
1724
1725         while ((txcp = be_tx_compl_get(tx_cq))) {
1726                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1727                                 wrb_index, txcp);
1728                 be_tx_compl_process(adapter, end_idx);
1729                 tx_compl++;
1730         }
1731
1732         mcc_compl = be_process_mcc(adapter, &status);
1733
1734         napi_complete(napi);
1735
1736         if (mcc_compl) {
1737                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1738                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1739         }
1740
1741         if (tx_compl) {
1742                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1743
1744                 /* As Tx wrbs have been freed up, wake up netdev queue if
1745                  * it was stopped due to lack of tx wrbs.
1746                  */
1747                 if (netif_queue_stopped(adapter->netdev) &&
1748                         atomic_read(&txq->used) < txq->len / 2) {
1749                         netif_wake_queue(adapter->netdev);
1750                 }
1751
1752                 tx_stats(adapter)->be_tx_events++;
1753                 tx_stats(adapter)->be_tx_compl += tx_compl;
1754         }
1755
1756         return 1;
1757 }
1758
1759 void be_detect_dump_ue(struct be_adapter *adapter)
1760 {
1761         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1762         u32 i;
1763
1764         pci_read_config_dword(adapter->pdev,
1765                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1766         pci_read_config_dword(adapter->pdev,
1767                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1768         pci_read_config_dword(adapter->pdev,
1769                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1770         pci_read_config_dword(adapter->pdev,
1771                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1772
1773         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1774         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1775
1776         if (ue_status_lo || ue_status_hi) {
1777                 adapter->ue_detected = true;
1778                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1779         }
1780
1781         if (ue_status_lo) {
1782                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1783                         if (ue_status_lo & 1)
1784                                 dev_err(&adapter->pdev->dev,
1785                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1786                 }
1787         }
1788         if (ue_status_hi) {
1789                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1790                         if (ue_status_hi & 1)
1791                                 dev_err(&adapter->pdev->dev,
1792                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1793                 }
1794         }
1795
1796 }
1797
1798 static void be_worker(struct work_struct *work)
1799 {
1800         struct be_adapter *adapter =
1801                 container_of(work, struct be_adapter, work.work);
1802         struct be_rx_obj *rxo;
1803         int i;
1804
1805         if (!adapter->stats_ioctl_sent)
1806                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1807
1808         be_tx_rate_update(adapter);
1809
1810         for_all_rx_queues(adapter, rxo, i) {
1811                 be_rx_rate_update(rxo);
1812                 be_rx_eqd_update(adapter, rxo);
1813
1814                 if (rxo->rx_post_starved) {
1815                         rxo->rx_post_starved = false;
1816                         be_post_rx_frags(rxo);
1817                 }
1818         }
1819
1820         if (!adapter->ue_detected)
1821                 be_detect_dump_ue(adapter);
1822
1823         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1824 }
1825
1826 static void be_msix_disable(struct be_adapter *adapter)
1827 {
1828         if (adapter->msix_enabled) {
1829                 pci_disable_msix(adapter->pdev);
1830                 adapter->msix_enabled = false;
1831         }
1832 }
1833
1834 static int be_num_rxqs_get(struct be_adapter *adapter)
1835 {
1836         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1837                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1838                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1839         } else {
1840                 dev_warn(&adapter->pdev->dev,
1841                         "No support for multiple RX queues\n");
1842                 return 1;
1843         }
1844 }
1845
1846 static void be_msix_enable(struct be_adapter *adapter)
1847 {
1848 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1849         int i, status;
1850
1851         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1852
1853         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1854                 adapter->msix_entries[i].entry = i;
1855
1856         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1857                         adapter->num_rx_qs + 1);
1858         if (status == 0) {
1859                 goto done;
1860         } else if (status >= BE_MIN_MSIX_VECTORS) {
1861                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1862                                 status) == 0) {
1863                         adapter->num_rx_qs = status - 1;
1864                         dev_warn(&adapter->pdev->dev,
1865                                 "Could alloc only %d MSIx vectors. "
1866                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1867                         goto done;
1868                 }
1869         }
1870         return;
1871 done:
1872         adapter->msix_enabled = true;
1873 }
1874
1875 static void be_sriov_enable(struct be_adapter *adapter)
1876 {
1877         be_check_sriov_fn_type(adapter);
1878 #ifdef CONFIG_PCI_IOV
1879         if (be_physfn(adapter) && num_vfs) {
1880                 int status;
1881
1882                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1883                 adapter->sriov_enabled = status ? false : true;
1884         }
1885 #endif
1886 }
1887
1888 static void be_sriov_disable(struct be_adapter *adapter)
1889 {
1890 #ifdef CONFIG_PCI_IOV
1891         if (adapter->sriov_enabled) {
1892                 pci_disable_sriov(adapter->pdev);
1893                 adapter->sriov_enabled = false;
1894         }
1895 #endif
1896 }
1897
1898 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1899 {
1900         return adapter->msix_entries[
1901                         be_evt_bit_get(adapter, eq_id)].vector;
1902 }
1903
1904 static int be_request_irq(struct be_adapter *adapter,
1905                 struct be_eq_obj *eq_obj,
1906                 void *handler, char *desc, void *context)
1907 {
1908         struct net_device *netdev = adapter->netdev;
1909         int vec;
1910
1911         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1912         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1913         return request_irq(vec, handler, 0, eq_obj->desc, context);
1914 }
1915
1916 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1917                         void *context)
1918 {
1919         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1920         free_irq(vec, context);
1921 }
1922
1923 static int be_msix_register(struct be_adapter *adapter)
1924 {
1925         struct be_rx_obj *rxo;
1926         int status, i;
1927         char qname[10];
1928
1929         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1930                                 adapter);
1931         if (status)
1932                 goto err;
1933
1934         for_all_rx_queues(adapter, rxo, i) {
1935                 sprintf(qname, "rxq%d", i);
1936                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1937                                 qname, rxo);
1938                 if (status)
1939                         goto err_msix;
1940         }
1941
1942         return 0;
1943
1944 err_msix:
1945         be_free_irq(adapter, &adapter->tx_eq, adapter);
1946
1947         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1948                 be_free_irq(adapter, &rxo->rx_eq, rxo);
1949
1950 err:
1951         dev_warn(&adapter->pdev->dev,
1952                 "MSIX Request IRQ failed - err %d\n", status);
1953         pci_disable_msix(adapter->pdev);
1954         adapter->msix_enabled = false;
1955         return status;
1956 }
1957
1958 static int be_irq_register(struct be_adapter *adapter)
1959 {
1960         struct net_device *netdev = adapter->netdev;
1961         int status;
1962
1963         if (adapter->msix_enabled) {
1964                 status = be_msix_register(adapter);
1965                 if (status == 0)
1966                         goto done;
1967                 /* INTx is not supported for VF */
1968                 if (!be_physfn(adapter))
1969                         return status;
1970         }
1971
1972         /* INTx */
1973         netdev->irq = adapter->pdev->irq;
1974         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1975                         adapter);
1976         if (status) {
1977                 dev_err(&adapter->pdev->dev,
1978                         "INTx request IRQ failed - err %d\n", status);
1979                 return status;
1980         }
1981 done:
1982         adapter->isr_registered = true;
1983         return 0;
1984 }
1985
1986 static void be_irq_unregister(struct be_adapter *adapter)
1987 {
1988         struct net_device *netdev = adapter->netdev;
1989         struct be_rx_obj *rxo;
1990         int i;
1991
1992         if (!adapter->isr_registered)
1993                 return;
1994
1995         /* INTx */
1996         if (!adapter->msix_enabled) {
1997                 free_irq(netdev->irq, adapter);
1998                 goto done;
1999         }
2000
2001         /* MSIx */
2002         be_free_irq(adapter, &adapter->tx_eq, adapter);
2003
2004         for_all_rx_queues(adapter, rxo, i)
2005                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2006
2007 done:
2008         adapter->isr_registered = false;
2009 }
2010
2011 static int be_close(struct net_device *netdev)
2012 {
2013         struct be_adapter *adapter = netdev_priv(netdev);
2014         struct be_rx_obj *rxo;
2015         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2016         int vec, i;
2017
2018         cancel_delayed_work_sync(&adapter->work);
2019
2020         be_async_mcc_disable(adapter);
2021
2022         netif_stop_queue(netdev);
2023         netif_carrier_off(netdev);
2024         adapter->link_up = false;
2025
2026         be_intr_set(adapter, false);
2027
2028         if (adapter->msix_enabled) {
2029                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2030                 synchronize_irq(vec);
2031
2032                 for_all_rx_queues(adapter, rxo, i) {
2033                         vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2034                         synchronize_irq(vec);
2035                 }
2036         } else {
2037                 synchronize_irq(netdev->irq);
2038         }
2039         be_irq_unregister(adapter);
2040
2041         for_all_rx_queues(adapter, rxo, i)
2042                 napi_disable(&rxo->rx_eq.napi);
2043
2044         napi_disable(&tx_eq->napi);
2045
2046         /* Wait for all pending tx completions to arrive so that
2047          * all tx skbs are freed.
2048          */
2049         be_tx_compl_clean(adapter);
2050
2051         return 0;
2052 }
2053
2054 static int be_open(struct net_device *netdev)
2055 {
2056         struct be_adapter *adapter = netdev_priv(netdev);
2057         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2058         struct be_rx_obj *rxo;
2059         bool link_up;
2060         int status, i;
2061         u8 mac_speed;
2062         u16 link_speed;
2063
2064         for_all_rx_queues(adapter, rxo, i) {
2065                 be_post_rx_frags(rxo);
2066                 napi_enable(&rxo->rx_eq.napi);
2067         }
2068         napi_enable(&tx_eq->napi);
2069
2070         be_irq_register(adapter);
2071
2072         be_intr_set(adapter, true);
2073
2074         /* The evt queues are created in unarmed state; arm them */
2075         for_all_rx_queues(adapter, rxo, i) {
2076                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2077                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2078         }
2079         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2080
2081         /* Now that interrupts are on we can process async mcc */
2082         be_async_mcc_enable(adapter);
2083
2084         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2085
2086         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2087                         &link_speed);
2088         if (status)
2089                 goto err;
2090         be_link_status_update(adapter, link_up);
2091
2092         if (be_physfn(adapter)) {
2093                 status = be_vid_config(adapter, false, 0);
2094                 if (status)
2095                         goto err;
2096
2097                 status = be_cmd_set_flow_control(adapter,
2098                                 adapter->tx_fc, adapter->rx_fc);
2099                 if (status)
2100                         goto err;
2101         }
2102
2103         return 0;
2104 err:
2105         be_close(adapter->netdev);
2106         return -EIO;
2107 }
2108
2109 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2110 {
2111         struct be_dma_mem cmd;
2112         int status = 0;
2113         u8 mac[ETH_ALEN];
2114
2115         memset(mac, 0, ETH_ALEN);
2116
2117         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2118         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2119         if (cmd.va == NULL)
2120                 return -1;
2121         memset(cmd.va, 0, cmd.size);
2122
2123         if (enable) {
2124                 status = pci_write_config_dword(adapter->pdev,
2125                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2126                 if (status) {
2127                         dev_err(&adapter->pdev->dev,
2128                                 "Could not enable Wake-on-lan\n");
2129                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2130                                         cmd.dma);
2131                         return status;
2132                 }
2133                 status = be_cmd_enable_magic_wol(adapter,
2134                                 adapter->netdev->dev_addr, &cmd);
2135                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2136                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2137         } else {
2138                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2139                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2140                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2141         }
2142
2143         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2144         return status;
2145 }
2146
2147 /*
2148  * Generate a seed MAC address from the PF MAC Address using jhash.
2149  * MAC Address for VFs are assigned incrementally starting from the seed.
2150  * These addresses are programmed in the ASIC by the PF and the VF driver
2151  * queries for the MAC address during its probe.
2152  */
2153 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2154 {
2155         u32 vf = 0;
2156         int status = 0;
2157         u8 mac[ETH_ALEN];
2158
2159         be_vf_eth_addr_generate(adapter, mac);
2160
2161         for (vf = 0; vf < num_vfs; vf++) {
2162                 status = be_cmd_pmac_add(adapter, mac,
2163                                         adapter->vf_cfg[vf].vf_if_handle,
2164                                         &adapter->vf_cfg[vf].vf_pmac_id);
2165                 if (status)
2166                         dev_err(&adapter->pdev->dev,
2167                                 "Mac address add failed for VF %d\n", vf);
2168                 else
2169                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2170
2171                 mac[5] += 1;
2172         }
2173         return status;
2174 }
2175
2176 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2177 {
2178         u32 vf;
2179
2180         for (vf = 0; vf < num_vfs; vf++) {
2181                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2182                         be_cmd_pmac_del(adapter,
2183                                         adapter->vf_cfg[vf].vf_if_handle,
2184                                         adapter->vf_cfg[vf].vf_pmac_id);
2185         }
2186 }
2187
2188 static int be_setup(struct be_adapter *adapter)
2189 {
2190         struct net_device *netdev = adapter->netdev;
2191         u32 cap_flags, en_flags, vf = 0;
2192         int status;
2193         u8 mac[ETH_ALEN];
2194
2195         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2196
2197         if (be_physfn(adapter)) {
2198                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2199                                 BE_IF_FLAGS_PROMISCUOUS |
2200                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2201                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2202
2203                 if (be_multi_rxq(adapter)) {
2204                         cap_flags |= BE_IF_FLAGS_RSS;
2205                         en_flags |= BE_IF_FLAGS_RSS;
2206                 }
2207         }
2208
2209         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2210                         netdev->dev_addr, false/* pmac_invalid */,
2211                         &adapter->if_handle, &adapter->pmac_id, 0);
2212         if (status != 0)
2213                 goto do_none;
2214
2215         if (be_physfn(adapter)) {
2216                 while (vf < num_vfs) {
2217                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2218                                         | BE_IF_FLAGS_BROADCAST;
2219                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2220                                         mac, true,
2221                                         &adapter->vf_cfg[vf].vf_if_handle,
2222                                         NULL, vf+1);
2223                         if (status) {
2224                                 dev_err(&adapter->pdev->dev,
2225                                 "Interface Create failed for VF %d\n", vf);
2226                                 goto if_destroy;
2227                         }
2228                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2229                         vf++;
2230                 }
2231         } else if (!be_physfn(adapter)) {
2232                 status = be_cmd_mac_addr_query(adapter, mac,
2233                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2234                 if (!status) {
2235                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2236                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2237                 }
2238         }
2239
2240         status = be_tx_queues_create(adapter);
2241         if (status != 0)
2242                 goto if_destroy;
2243
2244         status = be_rx_queues_create(adapter);
2245         if (status != 0)
2246                 goto tx_qs_destroy;
2247
2248         status = be_mcc_queues_create(adapter);
2249         if (status != 0)
2250                 goto rx_qs_destroy;
2251
2252         if (be_physfn(adapter)) {
2253                 status = be_vf_eth_addr_config(adapter);
2254                 if (status)
2255                         goto mcc_q_destroy;
2256         }
2257
2258         adapter->link_speed = -1;
2259
2260         return 0;
2261
2262 mcc_q_destroy:
2263         if (be_physfn(adapter))
2264                 be_vf_eth_addr_rem(adapter);
2265         be_mcc_queues_destroy(adapter);
2266 rx_qs_destroy:
2267         be_rx_queues_destroy(adapter);
2268 tx_qs_destroy:
2269         be_tx_queues_destroy(adapter);
2270 if_destroy:
2271         for (vf = 0; vf < num_vfs; vf++)
2272                 if (adapter->vf_cfg[vf].vf_if_handle)
2273                         be_cmd_if_destroy(adapter,
2274                                         adapter->vf_cfg[vf].vf_if_handle);
2275         be_cmd_if_destroy(adapter, adapter->if_handle);
2276 do_none:
2277         return status;
2278 }
2279
2280 static int be_clear(struct be_adapter *adapter)
2281 {
2282         if (be_physfn(adapter))
2283                 be_vf_eth_addr_rem(adapter);
2284
2285         be_mcc_queues_destroy(adapter);
2286         be_rx_queues_destroy(adapter);
2287         be_tx_queues_destroy(adapter);
2288
2289         be_cmd_if_destroy(adapter, adapter->if_handle);
2290
2291         /* tell fw we're done with firing cmds */
2292         be_cmd_fw_clean(adapter);
2293         return 0;
2294 }
2295
2296
2297 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2298 static bool be_flash_redboot(struct be_adapter *adapter,
2299                         const u8 *p, u32 img_start, int image_size,
2300                         int hdr_size)
2301 {
2302         u32 crc_offset;
2303         u8 flashed_crc[4];
2304         int status;
2305
2306         crc_offset = hdr_size + img_start + image_size - 4;
2307
2308         p += crc_offset;
2309
2310         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2311                         (image_size - 4));
2312         if (status) {
2313                 dev_err(&adapter->pdev->dev,
2314                 "could not get crc from flash, not flashing redboot\n");
2315                 return false;
2316         }
2317
2318         /*update redboot only if crc does not match*/
2319         if (!memcmp(flashed_crc, p, 4))
2320                 return false;
2321         else
2322                 return true;
2323 }
2324
2325 static int be_flash_data(struct be_adapter *adapter,
2326                         const struct firmware *fw,
2327                         struct be_dma_mem *flash_cmd, int num_of_images)
2328
2329 {
2330         int status = 0, i, filehdr_size = 0;
2331         u32 total_bytes = 0, flash_op;
2332         int num_bytes;
2333         const u8 *p = fw->data;
2334         struct be_cmd_write_flashrom *req = flash_cmd->va;
2335         struct flash_comp *pflashcomp;
2336         int num_comp;
2337
2338         struct flash_comp gen3_flash_types[9] = {
2339                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2340                         FLASH_IMAGE_MAX_SIZE_g3},
2341                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2342                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2343                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2344                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2345                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2346                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2347                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2348                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2349                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2350                         FLASH_IMAGE_MAX_SIZE_g3},
2351                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2352                         FLASH_IMAGE_MAX_SIZE_g3},
2353                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2354                         FLASH_IMAGE_MAX_SIZE_g3},
2355                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2356                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2357         };
2358         struct flash_comp gen2_flash_types[8] = {
2359                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2360                         FLASH_IMAGE_MAX_SIZE_g2},
2361                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2362                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2363                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2364                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2365                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2366                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2367                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2368                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2369                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2370                         FLASH_IMAGE_MAX_SIZE_g2},
2371                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2372                         FLASH_IMAGE_MAX_SIZE_g2},
2373                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2374                          FLASH_IMAGE_MAX_SIZE_g2}
2375         };
2376
2377         if (adapter->generation == BE_GEN3) {
2378                 pflashcomp = gen3_flash_types;
2379                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2380                 num_comp = 9;
2381         } else {
2382                 pflashcomp = gen2_flash_types;
2383                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2384                 num_comp = 8;
2385         }
2386         for (i = 0; i < num_comp; i++) {
2387                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2388                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2389                         continue;
2390                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2391                         (!be_flash_redboot(adapter, fw->data,
2392                          pflashcomp[i].offset, pflashcomp[i].size,
2393                          filehdr_size)))
2394                         continue;
2395                 p = fw->data;
2396                 p += filehdr_size + pflashcomp[i].offset
2397                         + (num_of_images * sizeof(struct image_hdr));
2398         if (p + pflashcomp[i].size > fw->data + fw->size)
2399                 return -1;
2400         total_bytes = pflashcomp[i].size;
2401                 while (total_bytes) {
2402                         if (total_bytes > 32*1024)
2403                                 num_bytes = 32*1024;
2404                         else
2405                                 num_bytes = total_bytes;
2406                         total_bytes -= num_bytes;
2407
2408                         if (!total_bytes)
2409                                 flash_op = FLASHROM_OPER_FLASH;
2410                         else
2411                                 flash_op = FLASHROM_OPER_SAVE;
2412                         memcpy(req->params.data_buf, p, num_bytes);
2413                         p += num_bytes;
2414                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2415                                 pflashcomp[i].optype, flash_op, num_bytes);
2416                         if (status) {
2417                                 dev_err(&adapter->pdev->dev,
2418                                         "cmd to write to flash rom failed.\n");
2419                                 return -1;
2420                         }
2421                         yield();
2422                 }
2423         }
2424         return 0;
2425 }
2426
2427 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2428 {
2429         if (fhdr == NULL)
2430                 return 0;
2431         if (fhdr->build[0] == '3')
2432                 return BE_GEN3;
2433         else if (fhdr->build[0] == '2')
2434                 return BE_GEN2;
2435         else
2436                 return 0;
2437 }
2438
2439 int be_load_fw(struct be_adapter *adapter, u8 *func)
2440 {
2441         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2442         const struct firmware *fw;
2443         struct flash_file_hdr_g2 *fhdr;
2444         struct flash_file_hdr_g3 *fhdr3;
2445         struct image_hdr *img_hdr_ptr = NULL;
2446         struct be_dma_mem flash_cmd;
2447         int status, i = 0, num_imgs = 0;
2448         const u8 *p;
2449
2450         strcpy(fw_file, func);
2451
2452         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2453         if (status)
2454                 goto fw_exit;
2455
2456         p = fw->data;
2457         fhdr = (struct flash_file_hdr_g2 *) p;
2458         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2459
2460         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2461         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2462                                         &flash_cmd.dma);
2463         if (!flash_cmd.va) {
2464                 status = -ENOMEM;
2465                 dev_err(&adapter->pdev->dev,
2466                         "Memory allocation failure while flashing\n");
2467                 goto fw_exit;
2468         }
2469
2470         if ((adapter->generation == BE_GEN3) &&
2471                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2472                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2473                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2474                 for (i = 0; i < num_imgs; i++) {
2475                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2476                                         (sizeof(struct flash_file_hdr_g3) +
2477                                          i * sizeof(struct image_hdr)));
2478                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2479                                 status = be_flash_data(adapter, fw, &flash_cmd,
2480                                                         num_imgs);
2481                 }
2482         } else if ((adapter->generation == BE_GEN2) &&
2483                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2484                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2485         } else {
2486                 dev_err(&adapter->pdev->dev,
2487                         "UFI and Interface are not compatible for flashing\n");
2488                 status = -1;
2489         }
2490
2491         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2492                                 flash_cmd.dma);
2493         if (status) {
2494                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2495                 goto fw_exit;
2496         }
2497
2498         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2499
2500 fw_exit:
2501         release_firmware(fw);
2502         return status;
2503 }
2504
2505 static struct net_device_ops be_netdev_ops = {
2506         .ndo_open               = be_open,
2507         .ndo_stop               = be_close,
2508         .ndo_start_xmit         = be_xmit,
2509         .ndo_set_rx_mode        = be_set_multicast_list,
2510         .ndo_set_mac_address    = be_mac_addr_set,
2511         .ndo_change_mtu         = be_change_mtu,
2512         .ndo_validate_addr      = eth_validate_addr,
2513         .ndo_vlan_rx_register   = be_vlan_register,
2514         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2515         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2516         .ndo_set_vf_mac         = be_set_vf_mac,
2517         .ndo_set_vf_vlan        = be_set_vf_vlan,
2518         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2519         .ndo_get_vf_config      = be_get_vf_config
2520 };
2521
2522 static void be_netdev_init(struct net_device *netdev)
2523 {
2524         struct be_adapter *adapter = netdev_priv(netdev);
2525         struct be_rx_obj *rxo;
2526         int i;
2527
2528         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2529                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2530                 NETIF_F_GRO | NETIF_F_TSO6;
2531
2532         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2533
2534         netdev->flags |= IFF_MULTICAST;
2535
2536         adapter->rx_csum = true;
2537
2538         /* Default settings for Rx and Tx flow control */
2539         adapter->rx_fc = true;
2540         adapter->tx_fc = true;
2541
2542         netif_set_gso_max_size(netdev, 65535);
2543
2544         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2545
2546         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2547
2548         for_all_rx_queues(adapter, rxo, i)
2549                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2550                                 BE_NAPI_WEIGHT);
2551
2552         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2553                 BE_NAPI_WEIGHT);
2554
2555         netif_stop_queue(netdev);
2556 }
2557
2558 static void be_unmap_pci_bars(struct be_adapter *adapter)
2559 {
2560         if (adapter->csr)
2561                 iounmap(adapter->csr);
2562         if (adapter->db)
2563                 iounmap(adapter->db);
2564         if (adapter->pcicfg && be_physfn(adapter))
2565                 iounmap(adapter->pcicfg);
2566 }
2567
2568 static int be_map_pci_bars(struct be_adapter *adapter)
2569 {
2570         u8 __iomem *addr;
2571         int pcicfg_reg, db_reg;
2572
2573         if (be_physfn(adapter)) {
2574                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2575                                 pci_resource_len(adapter->pdev, 2));
2576                 if (addr == NULL)
2577                         return -ENOMEM;
2578                 adapter->csr = addr;
2579         }
2580
2581         if (adapter->generation == BE_GEN2) {
2582                 pcicfg_reg = 1;
2583                 db_reg = 4;
2584         } else {
2585                 pcicfg_reg = 0;
2586                 if (be_physfn(adapter))
2587                         db_reg = 4;
2588                 else
2589                         db_reg = 0;
2590         }
2591         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2592                                 pci_resource_len(adapter->pdev, db_reg));
2593         if (addr == NULL)
2594                 goto pci_map_err;
2595         adapter->db = addr;
2596
2597         if (be_physfn(adapter)) {
2598                 addr = ioremap_nocache(
2599                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2600                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2601                 if (addr == NULL)
2602                         goto pci_map_err;
2603                 adapter->pcicfg = addr;
2604         } else
2605                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2606
2607         return 0;
2608 pci_map_err:
2609         be_unmap_pci_bars(adapter);
2610         return -ENOMEM;
2611 }
2612
2613
2614 static void be_ctrl_cleanup(struct be_adapter *adapter)
2615 {
2616         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2617
2618         be_unmap_pci_bars(adapter);
2619
2620         if (mem->va)
2621                 pci_free_consistent(adapter->pdev, mem->size,
2622                         mem->va, mem->dma);
2623
2624         mem = &adapter->mc_cmd_mem;
2625         if (mem->va)
2626                 pci_free_consistent(adapter->pdev, mem->size,
2627                         mem->va, mem->dma);
2628 }
2629
2630 static int be_ctrl_init(struct be_adapter *adapter)
2631 {
2632         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2633         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2634         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2635         int status;
2636
2637         status = be_map_pci_bars(adapter);
2638         if (status)
2639                 goto done;
2640
2641         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2642         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2643                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2644         if (!mbox_mem_alloc->va) {
2645                 status = -ENOMEM;
2646                 goto unmap_pci_bars;
2647         }
2648
2649         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2650         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2651         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2652         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2653
2654         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2655         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2656                         &mc_cmd_mem->dma);
2657         if (mc_cmd_mem->va == NULL) {
2658                 status = -ENOMEM;
2659                 goto free_mbox;
2660         }
2661         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2662
2663         spin_lock_init(&adapter->mbox_lock);
2664         spin_lock_init(&adapter->mcc_lock);
2665         spin_lock_init(&adapter->mcc_cq_lock);
2666
2667         init_completion(&adapter->flash_compl);
2668         pci_save_state(adapter->pdev);
2669         return 0;
2670
2671 free_mbox:
2672         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2673                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2674
2675 unmap_pci_bars:
2676         be_unmap_pci_bars(adapter);
2677
2678 done:
2679         return status;
2680 }
2681
2682 static void be_stats_cleanup(struct be_adapter *adapter)
2683 {
2684         struct be_dma_mem *cmd = &adapter->stats_cmd;
2685
2686         if (cmd->va)
2687                 pci_free_consistent(adapter->pdev, cmd->size,
2688                         cmd->va, cmd->dma);
2689 }
2690
2691 static int be_stats_init(struct be_adapter *adapter)
2692 {
2693         struct be_dma_mem *cmd = &adapter->stats_cmd;
2694
2695         cmd->size = sizeof(struct be_cmd_req_get_stats);
2696         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2697         if (cmd->va == NULL)
2698                 return -1;
2699         memset(cmd->va, 0, cmd->size);
2700         return 0;
2701 }
2702
2703 static void __devexit be_remove(struct pci_dev *pdev)
2704 {
2705         struct be_adapter *adapter = pci_get_drvdata(pdev);
2706
2707         if (!adapter)
2708                 return;
2709
2710         unregister_netdev(adapter->netdev);
2711
2712         be_clear(adapter);
2713
2714         be_stats_cleanup(adapter);
2715
2716         be_ctrl_cleanup(adapter);
2717
2718         be_sriov_disable(adapter);
2719
2720         be_msix_disable(adapter);
2721
2722         pci_set_drvdata(pdev, NULL);
2723         pci_release_regions(pdev);
2724         pci_disable_device(pdev);
2725
2726         free_netdev(adapter->netdev);
2727 }
2728
2729 static int be_get_config(struct be_adapter *adapter)
2730 {
2731         int status;
2732         u8 mac[ETH_ALEN];
2733
2734         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2735         if (status)
2736                 return status;
2737
2738         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2739                         &adapter->function_mode, &adapter->function_caps);
2740         if (status)
2741                 return status;
2742
2743         memset(mac, 0, ETH_ALEN);
2744
2745         if (be_physfn(adapter)) {
2746                 status = be_cmd_mac_addr_query(adapter, mac,
2747                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2748
2749                 if (status)
2750                         return status;
2751
2752                 if (!is_valid_ether_addr(mac))
2753                         return -EADDRNOTAVAIL;
2754
2755                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2756                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2757         }
2758
2759         if (adapter->function_mode & 0x400)
2760                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2761         else
2762                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2763
2764         return 0;
2765 }
2766
2767 static int __devinit be_probe(struct pci_dev *pdev,
2768                         const struct pci_device_id *pdev_id)
2769 {
2770         int status = 0;
2771         struct be_adapter *adapter;
2772         struct net_device *netdev;
2773
2774         status = pci_enable_device(pdev);
2775         if (status)
2776                 goto do_none;
2777
2778         status = pci_request_regions(pdev, DRV_NAME);
2779         if (status)
2780                 goto disable_dev;
2781         pci_set_master(pdev);
2782
2783         netdev = alloc_etherdev(sizeof(struct be_adapter));
2784         if (netdev == NULL) {
2785                 status = -ENOMEM;
2786                 goto rel_reg;
2787         }
2788         adapter = netdev_priv(netdev);
2789
2790         switch (pdev->device) {
2791         case BE_DEVICE_ID1:
2792         case OC_DEVICE_ID1:
2793                 adapter->generation = BE_GEN2;
2794                 break;
2795         case BE_DEVICE_ID2:
2796         case OC_DEVICE_ID2:
2797                 adapter->generation = BE_GEN3;
2798                 break;
2799         default:
2800                 adapter->generation = 0;
2801         }
2802
2803         adapter->pdev = pdev;
2804         pci_set_drvdata(pdev, adapter);
2805         adapter->netdev = netdev;
2806         SET_NETDEV_DEV(netdev, &pdev->dev);
2807
2808         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2809         if (!status) {
2810                 netdev->features |= NETIF_F_HIGHDMA;
2811         } else {
2812                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2813                 if (status) {
2814                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2815                         goto free_netdev;
2816                 }
2817         }
2818
2819         be_sriov_enable(adapter);
2820
2821         status = be_ctrl_init(adapter);
2822         if (status)
2823                 goto free_netdev;
2824
2825         /* sync up with fw's ready state */
2826         if (be_physfn(adapter)) {
2827                 status = be_cmd_POST(adapter);
2828                 if (status)
2829                         goto ctrl_clean;
2830         }
2831
2832         /* tell fw we're ready to fire cmds */
2833         status = be_cmd_fw_init(adapter);
2834         if (status)
2835                 goto ctrl_clean;
2836
2837         if (be_physfn(adapter)) {
2838                 status = be_cmd_reset_function(adapter);
2839                 if (status)
2840                         goto ctrl_clean;
2841         }
2842
2843         status = be_stats_init(adapter);
2844         if (status)
2845                 goto ctrl_clean;
2846
2847         status = be_get_config(adapter);
2848         if (status)
2849                 goto stats_clean;
2850
2851         be_msix_enable(adapter);
2852
2853         INIT_DELAYED_WORK(&adapter->work, be_worker);
2854
2855         status = be_setup(adapter);
2856         if (status)
2857                 goto msix_disable;
2858
2859         be_netdev_init(netdev);
2860         status = register_netdev(netdev);
2861         if (status != 0)
2862                 goto unsetup;
2863         netif_carrier_off(netdev);
2864
2865         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2866         return 0;
2867
2868 unsetup:
2869         be_clear(adapter);
2870 msix_disable:
2871         be_msix_disable(adapter);
2872 stats_clean:
2873         be_stats_cleanup(adapter);
2874 ctrl_clean:
2875         be_ctrl_cleanup(adapter);
2876 free_netdev:
2877         be_sriov_disable(adapter);
2878         free_netdev(adapter->netdev);
2879         pci_set_drvdata(pdev, NULL);
2880 rel_reg:
2881         pci_release_regions(pdev);
2882 disable_dev:
2883         pci_disable_device(pdev);
2884 do_none:
2885         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2886         return status;
2887 }
2888
2889 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2890 {
2891         struct be_adapter *adapter = pci_get_drvdata(pdev);
2892         struct net_device *netdev =  adapter->netdev;
2893
2894         if (adapter->wol)
2895                 be_setup_wol(adapter, true);
2896
2897         netif_device_detach(netdev);
2898         if (netif_running(netdev)) {
2899                 rtnl_lock();
2900                 be_close(netdev);
2901                 rtnl_unlock();
2902         }
2903         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2904         be_clear(adapter);
2905
2906         pci_save_state(pdev);
2907         pci_disable_device(pdev);
2908         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2909         return 0;
2910 }
2911
2912 static int be_resume(struct pci_dev *pdev)
2913 {
2914         int status = 0;
2915         struct be_adapter *adapter = pci_get_drvdata(pdev);
2916         struct net_device *netdev =  adapter->netdev;
2917
2918         netif_device_detach(netdev);
2919
2920         status = pci_enable_device(pdev);
2921         if (status)
2922                 return status;
2923
2924         pci_set_power_state(pdev, 0);
2925         pci_restore_state(pdev);
2926
2927         /* tell fw we're ready to fire cmds */
2928         status = be_cmd_fw_init(adapter);
2929         if (status)
2930                 return status;
2931
2932         be_setup(adapter);
2933         if (netif_running(netdev)) {
2934                 rtnl_lock();
2935                 be_open(netdev);
2936                 rtnl_unlock();
2937         }
2938         netif_device_attach(netdev);
2939
2940         if (adapter->wol)
2941                 be_setup_wol(adapter, false);
2942         return 0;
2943 }
2944
2945 /*
2946  * An FLR will stop BE from DMAing any data.
2947  */
2948 static void be_shutdown(struct pci_dev *pdev)
2949 {
2950         struct be_adapter *adapter = pci_get_drvdata(pdev);
2951         struct net_device *netdev =  adapter->netdev;
2952
2953         netif_device_detach(netdev);
2954
2955         be_cmd_reset_function(adapter);
2956
2957         if (adapter->wol)
2958                 be_setup_wol(adapter, true);
2959
2960         pci_disable_device(pdev);
2961 }
2962
2963 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2964                                 pci_channel_state_t state)
2965 {
2966         struct be_adapter *adapter = pci_get_drvdata(pdev);
2967         struct net_device *netdev =  adapter->netdev;
2968
2969         dev_err(&adapter->pdev->dev, "EEH error detected\n");
2970
2971         adapter->eeh_err = true;
2972
2973         netif_device_detach(netdev);
2974
2975         if (netif_running(netdev)) {
2976                 rtnl_lock();
2977                 be_close(netdev);
2978                 rtnl_unlock();
2979         }
2980         be_clear(adapter);
2981
2982         if (state == pci_channel_io_perm_failure)
2983                 return PCI_ERS_RESULT_DISCONNECT;
2984
2985         pci_disable_device(pdev);
2986
2987         return PCI_ERS_RESULT_NEED_RESET;
2988 }
2989
2990 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2991 {
2992         struct be_adapter *adapter = pci_get_drvdata(pdev);
2993         int status;
2994
2995         dev_info(&adapter->pdev->dev, "EEH reset\n");
2996         adapter->eeh_err = false;
2997
2998         status = pci_enable_device(pdev);
2999         if (status)
3000                 return PCI_ERS_RESULT_DISCONNECT;
3001
3002         pci_set_master(pdev);
3003         pci_set_power_state(pdev, 0);
3004         pci_restore_state(pdev);
3005
3006         /* Check if card is ok and fw is ready */
3007         status = be_cmd_POST(adapter);
3008         if (status)
3009                 return PCI_ERS_RESULT_DISCONNECT;
3010
3011         return PCI_ERS_RESULT_RECOVERED;
3012 }
3013
3014 static void be_eeh_resume(struct pci_dev *pdev)
3015 {
3016         int status = 0;
3017         struct be_adapter *adapter = pci_get_drvdata(pdev);
3018         struct net_device *netdev =  adapter->netdev;
3019
3020         dev_info(&adapter->pdev->dev, "EEH resume\n");
3021
3022         pci_save_state(pdev);
3023
3024         /* tell fw we're ready to fire cmds */
3025         status = be_cmd_fw_init(adapter);
3026         if (status)
3027                 goto err;
3028
3029         status = be_setup(adapter);
3030         if (status)
3031                 goto err;
3032
3033         if (netif_running(netdev)) {
3034                 status = be_open(netdev);
3035                 if (status)
3036                         goto err;
3037         }
3038         netif_device_attach(netdev);
3039         return;
3040 err:
3041         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3042 }
3043
3044 static struct pci_error_handlers be_eeh_handlers = {
3045         .error_detected = be_eeh_err_detected,
3046         .slot_reset = be_eeh_reset,
3047         .resume = be_eeh_resume,
3048 };
3049
3050 static struct pci_driver be_driver = {
3051         .name = DRV_NAME,
3052         .id_table = be_dev_ids,
3053         .probe = be_probe,
3054         .remove = be_remove,
3055         .suspend = be_suspend,
3056         .resume = be_resume,
3057         .shutdown = be_shutdown,
3058         .err_handler = &be_eeh_handlers
3059 };
3060
3061 static int __init be_init_module(void)
3062 {
3063         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3064             rx_frag_size != 2048) {
3065                 printk(KERN_WARNING DRV_NAME
3066                         " : Module param rx_frag_size must be 2048/4096/8192."
3067                         " Using 2048\n");
3068                 rx_frag_size = 2048;
3069         }
3070
3071         if (num_vfs > 32) {
3072                 printk(KERN_WARNING DRV_NAME
3073                         " : Module param num_vfs must not be greater than 32."
3074                         "Using 32\n");
3075                 num_vfs = 32;
3076         }
3077
3078         return pci_register_driver(&be_driver);
3079 }
3080 module_init(be_init_module);
3081
3082 static void __exit be_exit_module(void)
3083 {
3084         pci_unregister_driver(&be_driver);
3085 }
3086 module_exit(be_exit_module);