Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static char *ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC"
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 static inline bool be_multi_rxq(struct be_adapter *adapter)
119 {
120         return (adapter->num_rx_qs > 1);
121 }
122
123 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
124 {
125         struct be_dma_mem *mem = &q->dma_mem;
126         if (mem->va)
127                 pci_free_consistent(adapter->pdev, mem->size,
128                         mem->va, mem->dma);
129 }
130
131 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132                 u16 len, u16 entry_size)
133 {
134         struct be_dma_mem *mem = &q->dma_mem;
135
136         memset(q, 0, sizeof(*q));
137         q->len = len;
138         q->entry_size = entry_size;
139         mem->size = len * entry_size;
140         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141         if (!mem->va)
142                 return -1;
143         memset(mem->va, 0, mem->size);
144         return 0;
145 }
146
147 static void be_intr_set(struct be_adapter *adapter, bool enable)
148 {
149         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
150         u32 reg = ioread32(addr);
151         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153         if (adapter->eeh_err)
154                 return;
155
156         if (!enabled && enable)
157                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158         else if (enabled && !enable)
159                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else
161                 return;
162
163         iowrite32(reg, addr);
164 }
165
166 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
167 {
168         u32 val = 0;
169         val |= qid & DB_RQ_RING_ID_MASK;
170         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
171
172         wmb();
173         iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 }
175
176 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_TXULP_RING_ID_MASK;
180         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 }
185
186 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
187                 bool arm, bool clear_int, u16 num_popped)
188 {
189         u32 val = 0;
190         val |= qid & DB_EQ_RING_ID_MASK;
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208
209         if (adapter->eeh_err)
210                 return;
211
212         if (arm)
213                 val |= 1 << DB_CQ_REARM_SHIFT;
214         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_CQ_OFFSET);
216 }
217
218 static int be_mac_addr_set(struct net_device *netdev, void *p)
219 {
220         struct be_adapter *adapter = netdev_priv(netdev);
221         struct sockaddr *addr = p;
222         int status = 0;
223
224         if (!is_valid_ether_addr(addr->sa_data))
225                 return -EADDRNOTAVAIL;
226
227         /* MAC addr configuration will be done in hardware for VFs
228          * by their corresponding PFs. Just copy to netdev addr here
229          */
230         if (!be_physfn(adapter))
231                 goto netdev_addr;
232
233         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                         adapter->if_handle, &adapter->pmac_id);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 void netdev_stats_update(struct be_adapter *adapter)
247 {
248         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
249         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250         struct be_port_rxf_stats *port_stats =
251                         &rxf_stats->port[adapter->port_num];
252         struct net_device_stats *dev_stats = &adapter->netdev->stats;
253         struct be_erx_stats *erx_stats = &hw_stats->erx;
254         struct be_rx_obj *rxo;
255         int i;
256
257         memset(dev_stats, 0, sizeof(*dev_stats));
258         for_all_rx_queues(adapter, rxo, i) {
259                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262                 /*  no space in linux buffers: best possible approximation */
263                 dev_stats->rx_dropped +=
264                         erx_stats->rx_drops_no_fragments[rxo->q.id];
265         }
266
267         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
269
270         /* bad pkts received */
271         dev_stats->rx_errors = port_stats->rx_crc_errors +
272                 port_stats->rx_alignment_symbol_errors +
273                 port_stats->rx_in_range_errors +
274                 port_stats->rx_out_range_errors +
275                 port_stats->rx_frame_too_long +
276                 port_stats->rx_dropped_too_small +
277                 port_stats->rx_dropped_too_short +
278                 port_stats->rx_dropped_header_too_small +
279                 port_stats->rx_dropped_tcp_length +
280                 port_stats->rx_dropped_runt +
281                 port_stats->rx_tcp_checksum_errs +
282                 port_stats->rx_ip_checksum_errs +
283                 port_stats->rx_udp_checksum_errs;
284
285         /* detailed rx errors */
286         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
287                 port_stats->rx_out_range_errors +
288                 port_stats->rx_frame_too_long;
289
290         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
291
292         /* frame alignment errors */
293         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
294
295         /* receiver fifo overrun */
296         /* drops_no_pbuf is no per i/f, it's per BE card */
297         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298                                         port_stats->rx_input_fifo_overflow +
299                                         rxf_stats->rx_drops_no_pbuf;
300 }
301
302 void be_link_status_update(struct be_adapter *adapter, bool link_up)
303 {
304         struct net_device *netdev = adapter->netdev;
305
306         /* If link came up or went down */
307         if (adapter->link_up != link_up) {
308                 adapter->link_speed = -1;
309                 if (link_up) {
310                         netif_start_queue(netdev);
311                         netif_carrier_on(netdev);
312                         printk(KERN_INFO "%s: Link up\n", netdev->name);
313                 } else {
314                         netif_stop_queue(netdev);
315                         netif_carrier_off(netdev);
316                         printk(KERN_INFO "%s: Link down\n", netdev->name);
317                 }
318                 adapter->link_up = link_up;
319         }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325         struct be_eq_obj *rx_eq = &rxo->rx_eq;
326         struct be_rx_stats *stats = &rxo->stats;
327         ulong now = jiffies;
328         u32 eqd;
329
330         if (!rx_eq->enable_aic)
331                 return;
332
333         /* Wrapped around */
334         if (time_before(now, stats->rx_fps_jiffies)) {
335                 stats->rx_fps_jiffies = now;
336                 return;
337         }
338
339         /* Update once a second */
340         if ((now - stats->rx_fps_jiffies) < HZ)
341                 return;
342
343         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344                         ((now - stats->rx_fps_jiffies) / HZ);
345
346         stats->rx_fps_jiffies = now;
347         stats->prev_rx_frags = stats->rx_frags;
348         eqd = stats->rx_fps / 110000;
349         eqd = eqd << 3;
350         if (eqd > rx_eq->max_eqd)
351                 eqd = rx_eq->max_eqd;
352         if (eqd < rx_eq->min_eqd)
353                 eqd = rx_eq->min_eqd;
354         if (eqd < 10)
355                 eqd = 0;
356         if (eqd != rx_eq->cur_eqd)
357                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359         rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364         u64 rate = bytes;
365
366         do_div(rate, ticks / HZ);
367         rate <<= 3;                     /* bytes/sec -> bits/sec */
368         do_div(rate, 1000000ul);        /* MB/Sec */
369
370         return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375         struct be_tx_stats *stats = tx_stats(adapter);
376         ulong now = jiffies;
377
378         /* Wrapped around? */
379         if (time_before(now, stats->be_tx_jiffies)) {
380                 stats->be_tx_jiffies = now;
381                 return;
382         }
383
384         /* Update tx rate once in two seconds */
385         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387                                                   - stats->be_tx_bytes_prev,
388                                                  now - stats->be_tx_jiffies);
389                 stats->be_tx_jiffies = now;
390                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391         }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397         struct be_tx_stats *stats = tx_stats(adapter);
398         stats->be_tx_reqs++;
399         stats->be_tx_wrbs += wrb_cnt;
400         stats->be_tx_bytes += copied;
401         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402         if (stopped)
403                 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
408 {
409         int cnt = (skb->len > skb->data_len);
410
411         cnt += skb_shinfo(skb)->nr_frags;
412
413         /* to account for hdr wrb */
414         cnt++;
415         if (cnt & 1) {
416                 /* add a dummy to make it an even num */
417                 cnt++;
418                 *dummy = true;
419         } else
420                 *dummy = false;
421         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422         return cnt;
423 }
424
425 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
426 {
427         wrb->frag_pa_hi = upper_32_bits(addr);
428         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
430 }
431
432 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
433                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
434 {
435         u8 vlan_prio = 0;
436         u16 vlan_tag = 0;
437
438         memset(hdr, 0, sizeof(*hdr));
439
440         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
441
442         if (skb_is_gso(skb)) {
443                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445                         hdr, skb_shinfo(skb)->gso_size);
446                 if (skb_is_gso_v6(skb))
447                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
448         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449                 if (is_tcp_pkt(skb))
450                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
451                 else if (is_udp_pkt(skb))
452                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
453         }
454
455         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
456                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
457                 vlan_tag = vlan_tx_tag_get(skb);
458                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
459                 /* If vlan priority provided by OS is NOT in available bmap */
460                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
461                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
462                                         adapter->recommended_prio;
463                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
464         }
465
466         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
467         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
468         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
469         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
470 }
471
472 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
473                 bool unmap_single)
474 {
475         dma_addr_t dma;
476
477         be_dws_le_to_cpu(wrb, sizeof(*wrb));
478
479         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
480         if (wrb->frag_len) {
481                 if (unmap_single)
482                         pci_unmap_single(pdev, dma, wrb->frag_len,
483                                 PCI_DMA_TODEVICE);
484                 else
485                         pci_unmap_page(pdev, dma, wrb->frag_len,
486                                 PCI_DMA_TODEVICE);
487         }
488 }
489
490 static int make_tx_wrbs(struct be_adapter *adapter,
491                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
492 {
493         dma_addr_t busaddr;
494         int i, copied = 0;
495         struct pci_dev *pdev = adapter->pdev;
496         struct sk_buff *first_skb = skb;
497         struct be_queue_info *txq = &adapter->tx_obj.q;
498         struct be_eth_wrb *wrb;
499         struct be_eth_hdr_wrb *hdr;
500         bool map_single = false;
501         u16 map_head;
502
503         hdr = queue_head_node(txq);
504         queue_head_inc(txq);
505         map_head = txq->head;
506
507         if (skb->len > skb->data_len) {
508                 int len = skb_headlen(skb);
509                 busaddr = pci_map_single(pdev, skb->data, len,
510                                          PCI_DMA_TODEVICE);
511                 if (pci_dma_mapping_error(pdev, busaddr))
512                         goto dma_err;
513                 map_single = true;
514                 wrb = queue_head_node(txq);
515                 wrb_fill(wrb, busaddr, len);
516                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
517                 queue_head_inc(txq);
518                 copied += len;
519         }
520
521         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
522                 struct skb_frag_struct *frag =
523                         &skb_shinfo(skb)->frags[i];
524                 busaddr = pci_map_page(pdev, frag->page,
525                                        frag->page_offset,
526                                        frag->size, PCI_DMA_TODEVICE);
527                 if (pci_dma_mapping_error(pdev, busaddr))
528                         goto dma_err;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, frag->size);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += frag->size;
534         }
535
536         if (dummy_wrb) {
537                 wrb = queue_head_node(txq);
538                 wrb_fill(wrb, 0, 0);
539                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
540                 queue_head_inc(txq);
541         }
542
543         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
544         be_dws_cpu_to_le(hdr, sizeof(*hdr));
545
546         return copied;
547 dma_err:
548         txq->head = map_head;
549         while (copied) {
550                 wrb = queue_head_node(txq);
551                 unmap_tx_frag(pdev, wrb, map_single);
552                 map_single = false;
553                 copied -= wrb->frag_len;
554                 queue_head_inc(txq);
555         }
556         return 0;
557 }
558
559 static netdev_tx_t be_xmit(struct sk_buff *skb,
560                         struct net_device *netdev)
561 {
562         struct be_adapter *adapter = netdev_priv(netdev);
563         struct be_tx_obj *tx_obj = &adapter->tx_obj;
564         struct be_queue_info *txq = &tx_obj->q;
565         u32 wrb_cnt = 0, copied = 0;
566         u32 start = txq->head;
567         bool dummy_wrb, stopped = false;
568
569         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
570
571         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572         if (copied) {
573                 /* record the sent skb in the sent_skb table */
574                 BUG_ON(tx_obj->sent_skb_list[start]);
575                 tx_obj->sent_skb_list[start] = skb;
576
577                 /* Ensure txq has space for the next skb; Else stop the queue
578                  * *BEFORE* ringing the tx doorbell, so that we serialze the
579                  * tx compls of the current transmit which'll wake up the queue
580                  */
581                 atomic_add(wrb_cnt, &txq->used);
582                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
583                                                                 txq->len) {
584                         netif_stop_queue(netdev);
585                         stopped = true;
586                 }
587
588                 be_txq_notify(adapter, txq->id, wrb_cnt);
589
590                 be_tx_stats_update(adapter, wrb_cnt, copied,
591                                 skb_shinfo(skb)->gso_segs, stopped);
592         } else {
593                 txq->head = start;
594                 dev_kfree_skb_any(skb);
595         }
596         return NETDEV_TX_OK;
597 }
598
599 static int be_change_mtu(struct net_device *netdev, int new_mtu)
600 {
601         struct be_adapter *adapter = netdev_priv(netdev);
602         if (new_mtu < BE_MIN_MTU ||
603                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
604                                         (ETH_HLEN + ETH_FCS_LEN))) {
605                 dev_info(&adapter->pdev->dev,
606                         "MTU must be between %d and %d bytes\n",
607                         BE_MIN_MTU,
608                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
609                 return -EINVAL;
610         }
611         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
612                         netdev->mtu, new_mtu);
613         netdev->mtu = new_mtu;
614         return 0;
615 }
616
617 /*
618  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
619  * If the user configures more, place BE in vlan promiscuous mode.
620  */
621 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
622 {
623         u16 vtag[BE_NUM_VLANS_SUPPORTED];
624         u16 ntags = 0, i;
625         int status = 0;
626         u32 if_handle;
627
628         if (vf) {
629                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
630                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
631                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
632         }
633
634         if (adapter->vlans_added <= adapter->max_vlans)  {
635                 /* Construct VLAN Table to give to HW */
636                 for (i = 0; i < VLAN_N_VID; i++) {
637                         if (adapter->vlan_tag[i]) {
638                                 vtag[ntags] = cpu_to_le16(i);
639                                 ntags++;
640                         }
641                 }
642                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
643                                         vtag, ntags, 1, 0);
644         } else {
645                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
646                                         NULL, 0, 1, 1);
647         }
648
649         return status;
650 }
651
652 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
653 {
654         struct be_adapter *adapter = netdev_priv(netdev);
655
656         adapter->vlan_grp = grp;
657 }
658
659 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
660 {
661         struct be_adapter *adapter = netdev_priv(netdev);
662
663         adapter->vlans_added++;
664         if (!be_physfn(adapter))
665                 return;
666
667         adapter->vlan_tag[vid] = 1;
668         if (adapter->vlans_added <= (adapter->max_vlans + 1))
669                 be_vid_config(adapter, false, 0);
670 }
671
672 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
673 {
674         struct be_adapter *adapter = netdev_priv(netdev);
675
676         adapter->vlans_added--;
677         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
678
679         if (!be_physfn(adapter))
680                 return;
681
682         adapter->vlan_tag[vid] = 0;
683         if (adapter->vlans_added <= adapter->max_vlans)
684                 be_vid_config(adapter, false, 0);
685 }
686
687 static void be_set_multicast_list(struct net_device *netdev)
688 {
689         struct be_adapter *adapter = netdev_priv(netdev);
690
691         if (netdev->flags & IFF_PROMISC) {
692                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
693                 adapter->promiscuous = true;
694                 goto done;
695         }
696
697         /* BE was previously in promiscous mode; disable it */
698         if (adapter->promiscuous) {
699                 adapter->promiscuous = false;
700                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
701         }
702
703         /* Enable multicast promisc if num configured exceeds what we support */
704         if (netdev->flags & IFF_ALLMULTI ||
705             netdev_mc_count(netdev) > BE_MAX_MC) {
706                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
707                                 &adapter->mc_cmd_mem);
708                 goto done;
709         }
710
711         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
712                 &adapter->mc_cmd_mem);
713 done:
714         return;
715 }
716
717 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
718 {
719         struct be_adapter *adapter = netdev_priv(netdev);
720         int status;
721
722         if (!adapter->sriov_enabled)
723                 return -EPERM;
724
725         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
726                 return -EINVAL;
727
728         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
729                 status = be_cmd_pmac_del(adapter,
730                                         adapter->vf_cfg[vf].vf_if_handle,
731                                         adapter->vf_cfg[vf].vf_pmac_id);
732
733         status = be_cmd_pmac_add(adapter, mac,
734                                 adapter->vf_cfg[vf].vf_if_handle,
735                                 &adapter->vf_cfg[vf].vf_pmac_id);
736
737         if (status)
738                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
739                                 mac, vf);
740         else
741                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
742
743         return status;
744 }
745
746 static int be_get_vf_config(struct net_device *netdev, int vf,
747                         struct ifla_vf_info *vi)
748 {
749         struct be_adapter *adapter = netdev_priv(netdev);
750
751         if (!adapter->sriov_enabled)
752                 return -EPERM;
753
754         if (vf >= num_vfs)
755                 return -EINVAL;
756
757         vi->vf = vf;
758         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
759         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
760         vi->qos = 0;
761         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
762
763         return 0;
764 }
765
766 static int be_set_vf_vlan(struct net_device *netdev,
767                         int vf, u16 vlan, u8 qos)
768 {
769         struct be_adapter *adapter = netdev_priv(netdev);
770         int status = 0;
771
772         if (!adapter->sriov_enabled)
773                 return -EPERM;
774
775         if ((vf >= num_vfs) || (vlan > 4095))
776                 return -EINVAL;
777
778         if (vlan) {
779                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
780                 adapter->vlans_added++;
781         } else {
782                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
783                 adapter->vlans_added--;
784         }
785
786         status = be_vid_config(adapter, true, vf);
787
788         if (status)
789                 dev_info(&adapter->pdev->dev,
790                                 "VLAN %d config on VF %d failed\n", vlan, vf);
791         return status;
792 }
793
794 static int be_set_vf_tx_rate(struct net_device *netdev,
795                         int vf, int rate)
796 {
797         struct be_adapter *adapter = netdev_priv(netdev);
798         int status = 0;
799
800         if (!adapter->sriov_enabled)
801                 return -EPERM;
802
803         if ((vf >= num_vfs) || (rate < 0))
804                 return -EINVAL;
805
806         if (rate > 10000)
807                 rate = 10000;
808
809         adapter->vf_cfg[vf].vf_tx_rate = rate;
810         status = be_cmd_set_qos(adapter, rate / 10, vf);
811
812         if (status)
813                 dev_info(&adapter->pdev->dev,
814                                 "tx rate %d on VF %d failed\n", rate, vf);
815         return status;
816 }
817
818 static void be_rx_rate_update(struct be_rx_obj *rxo)
819 {
820         struct be_rx_stats *stats = &rxo->stats;
821         ulong now = jiffies;
822
823         /* Wrapped around */
824         if (time_before(now, stats->rx_jiffies)) {
825                 stats->rx_jiffies = now;
826                 return;
827         }
828
829         /* Update the rate once in two seconds */
830         if ((now - stats->rx_jiffies) < 2 * HZ)
831                 return;
832
833         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
834                                 now - stats->rx_jiffies);
835         stats->rx_jiffies = now;
836         stats->rx_bytes_prev = stats->rx_bytes;
837 }
838
839 static void be_rx_stats_update(struct be_rx_obj *rxo,
840                 u32 pktsize, u16 numfrags, u8 pkt_type)
841 {
842         struct be_rx_stats *stats = &rxo->stats;
843
844         stats->rx_compl++;
845         stats->rx_frags += numfrags;
846         stats->rx_bytes += pktsize;
847         stats->rx_pkts++;
848         if (pkt_type == BE_MULTICAST_PACKET)
849                 stats->rx_mcast_pkts++;
850 }
851
852 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
853 {
854         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
855
856         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
857         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
858         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
859         if (ip_version) {
860                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
861                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
862         }
863         ipv6_chk = (ip_version && (tcpf || udpf));
864
865         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
866 }
867
868 static struct be_rx_page_info *
869 get_rx_page_info(struct be_adapter *adapter,
870                 struct be_rx_obj *rxo,
871                 u16 frag_idx)
872 {
873         struct be_rx_page_info *rx_page_info;
874         struct be_queue_info *rxq = &rxo->q;
875
876         rx_page_info = &rxo->page_info_tbl[frag_idx];
877         BUG_ON(!rx_page_info->page);
878
879         if (rx_page_info->last_page_user) {
880                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
881                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
882                 rx_page_info->last_page_user = false;
883         }
884
885         atomic_dec(&rxq->used);
886         return rx_page_info;
887 }
888
889 /* Throwaway the data in the Rx completion */
890 static void be_rx_compl_discard(struct be_adapter *adapter,
891                 struct be_rx_obj *rxo,
892                 struct be_eth_rx_compl *rxcp)
893 {
894         struct be_queue_info *rxq = &rxo->q;
895         struct be_rx_page_info *page_info;
896         u16 rxq_idx, i, num_rcvd;
897
898         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
899         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
900
901         for (i = 0; i < num_rcvd; i++) {
902                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
903                 put_page(page_info->page);
904                 memset(page_info, 0, sizeof(*page_info));
905                 index_inc(&rxq_idx, rxq->len);
906         }
907 }
908
909 /*
910  * skb_fill_rx_data forms a complete skb for an ether frame
911  * indicated by rxcp.
912  */
913 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
914                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
915                         u16 num_rcvd)
916 {
917         struct be_queue_info *rxq = &rxo->q;
918         struct be_rx_page_info *page_info;
919         u16 rxq_idx, i, j;
920         u32 pktsize, hdr_len, curr_frag_len, size;
921         u8 *start;
922         u8 pkt_type;
923
924         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
925         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
926         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
927
928         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
929
930         start = page_address(page_info->page) + page_info->page_offset;
931         prefetch(start);
932
933         /* Copy data in the first descriptor of this completion */
934         curr_frag_len = min(pktsize, rx_frag_size);
935
936         /* Copy the header portion into skb_data */
937         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
938         memcpy(skb->data, start, hdr_len);
939         skb->len = curr_frag_len;
940         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
941                 /* Complete packet has now been moved to data */
942                 put_page(page_info->page);
943                 skb->data_len = 0;
944                 skb->tail += curr_frag_len;
945         } else {
946                 skb_shinfo(skb)->nr_frags = 1;
947                 skb_shinfo(skb)->frags[0].page = page_info->page;
948                 skb_shinfo(skb)->frags[0].page_offset =
949                                         page_info->page_offset + hdr_len;
950                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
951                 skb->data_len = curr_frag_len - hdr_len;
952                 skb->tail += hdr_len;
953         }
954         page_info->page = NULL;
955
956         if (pktsize <= rx_frag_size) {
957                 BUG_ON(num_rcvd != 1);
958                 goto done;
959         }
960
961         /* More frags present for this completion */
962         size = pktsize;
963         for (i = 1, j = 0; i < num_rcvd; i++) {
964                 size -= curr_frag_len;
965                 index_inc(&rxq_idx, rxq->len);
966                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
967
968                 curr_frag_len = min(size, rx_frag_size);
969
970                 /* Coalesce all frags from the same physical page in one slot */
971                 if (page_info->page_offset == 0) {
972                         /* Fresh page */
973                         j++;
974                         skb_shinfo(skb)->frags[j].page = page_info->page;
975                         skb_shinfo(skb)->frags[j].page_offset =
976                                                         page_info->page_offset;
977                         skb_shinfo(skb)->frags[j].size = 0;
978                         skb_shinfo(skb)->nr_frags++;
979                 } else {
980                         put_page(page_info->page);
981                 }
982
983                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
984                 skb->len += curr_frag_len;
985                 skb->data_len += curr_frag_len;
986
987                 page_info->page = NULL;
988         }
989         BUG_ON(j > MAX_SKB_FRAGS);
990
991 done:
992         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
993 }
994
995 /* Process the RX completion indicated by rxcp when GRO is disabled */
996 static void be_rx_compl_process(struct be_adapter *adapter,
997                         struct be_rx_obj *rxo,
998                         struct be_eth_rx_compl *rxcp)
999 {
1000         struct sk_buff *skb;
1001         u32 vlanf, vid;
1002         u16 num_rcvd;
1003         u8 vtm;
1004
1005         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1006         /* Is it a flush compl that has no data */
1007         if (unlikely(num_rcvd == 0))
1008                 return;
1009
1010         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1011         if (unlikely(!skb)) {
1012                 if (net_ratelimit())
1013                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1014                 be_rx_compl_discard(adapter, rxo, rxcp);
1015                 return;
1016         }
1017
1018         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1019
1020         if (do_pkt_csum(rxcp, adapter->rx_csum))
1021                 skb_checksum_none_assert(skb);
1022         else
1023                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1024
1025         skb->truesize = skb->len + sizeof(struct sk_buff);
1026         skb->protocol = eth_type_trans(skb, adapter->netdev);
1027
1028         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1029         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1030
1031         /* vlanf could be wrongly set in some cards.
1032          * ignore if vtm is not set */
1033         if ((adapter->function_mode & 0x400) && !vtm)
1034                 vlanf = 0;
1035
1036         if (unlikely(vlanf)) {
1037                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1038                         kfree_skb(skb);
1039                         return;
1040                 }
1041                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1042                 vid = swab16(vid);
1043                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1044         } else {
1045                 netif_receive_skb(skb);
1046         }
1047 }
1048
1049 /* Process the RX completion indicated by rxcp when GRO is enabled */
1050 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1051                 struct be_rx_obj *rxo,
1052                 struct be_eth_rx_compl *rxcp)
1053 {
1054         struct be_rx_page_info *page_info;
1055         struct sk_buff *skb = NULL;
1056         struct be_queue_info *rxq = &rxo->q;
1057         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1058         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1059         u16 i, rxq_idx = 0, vid, j;
1060         u8 vtm;
1061         u8 pkt_type;
1062
1063         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1064         /* Is it a flush compl that has no data */
1065         if (unlikely(num_rcvd == 0))
1066                 return;
1067
1068         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1069         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1070         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1071         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1072         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1073
1074         /* vlanf could be wrongly set in some cards.
1075          * ignore if vtm is not set */
1076         if ((adapter->function_mode & 0x400) && !vtm)
1077                 vlanf = 0;
1078
1079         skb = napi_get_frags(&eq_obj->napi);
1080         if (!skb) {
1081                 be_rx_compl_discard(adapter, rxo, rxcp);
1082                 return;
1083         }
1084
1085         remaining = pkt_size;
1086         for (i = 0, j = -1; i < num_rcvd; i++) {
1087                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1088
1089                 curr_frag_len = min(remaining, rx_frag_size);
1090
1091                 /* Coalesce all frags from the same physical page in one slot */
1092                 if (i == 0 || page_info->page_offset == 0) {
1093                         /* First frag or Fresh page */
1094                         j++;
1095                         skb_shinfo(skb)->frags[j].page = page_info->page;
1096                         skb_shinfo(skb)->frags[j].page_offset =
1097                                                         page_info->page_offset;
1098                         skb_shinfo(skb)->frags[j].size = 0;
1099                 } else {
1100                         put_page(page_info->page);
1101                 }
1102                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1103
1104                 remaining -= curr_frag_len;
1105                 index_inc(&rxq_idx, rxq->len);
1106                 memset(page_info, 0, sizeof(*page_info));
1107         }
1108         BUG_ON(j > MAX_SKB_FRAGS);
1109
1110         skb_shinfo(skb)->nr_frags = j + 1;
1111         skb->len = pkt_size;
1112         skb->data_len = pkt_size;
1113         skb->truesize += pkt_size;
1114         skb->ip_summed = CHECKSUM_UNNECESSARY;
1115
1116         if (likely(!vlanf)) {
1117                 napi_gro_frags(&eq_obj->napi);
1118         } else {
1119                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1120                 vid = swab16(vid);
1121
1122                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1123                         return;
1124
1125                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1126         }
1127
1128         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1129 }
1130
1131 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1132 {
1133         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1134
1135         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1136                 return NULL;
1137
1138         rmb();
1139         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1140
1141         queue_tail_inc(&rxo->cq);
1142         return rxcp;
1143 }
1144
1145 /* To reset the valid bit, we need to reset the whole word as
1146  * when walking the queue the valid entries are little-endian
1147  * and invalid entries are host endian
1148  */
1149 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1150 {
1151         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1152 }
1153
1154 static inline struct page *be_alloc_pages(u32 size)
1155 {
1156         gfp_t alloc_flags = GFP_ATOMIC;
1157         u32 order = get_order(size);
1158         if (order > 0)
1159                 alloc_flags |= __GFP_COMP;
1160         return  alloc_pages(alloc_flags, order);
1161 }
1162
1163 /*
1164  * Allocate a page, split it to fragments of size rx_frag_size and post as
1165  * receive buffers to BE
1166  */
1167 static void be_post_rx_frags(struct be_rx_obj *rxo)
1168 {
1169         struct be_adapter *adapter = rxo->adapter;
1170         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1171         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1172         struct be_queue_info *rxq = &rxo->q;
1173         struct page *pagep = NULL;
1174         struct be_eth_rx_d *rxd;
1175         u64 page_dmaaddr = 0, frag_dmaaddr;
1176         u32 posted, page_offset = 0;
1177
1178         page_info = &rxo->page_info_tbl[rxq->head];
1179         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1180                 if (!pagep) {
1181                         pagep = be_alloc_pages(adapter->big_page_size);
1182                         if (unlikely(!pagep)) {
1183                                 rxo->stats.rx_post_fail++;
1184                                 break;
1185                         }
1186                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1187                                                 adapter->big_page_size,
1188                                                 PCI_DMA_FROMDEVICE);
1189                         page_info->page_offset = 0;
1190                 } else {
1191                         get_page(pagep);
1192                         page_info->page_offset = page_offset + rx_frag_size;
1193                 }
1194                 page_offset = page_info->page_offset;
1195                 page_info->page = pagep;
1196                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1197                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1198
1199                 rxd = queue_head_node(rxq);
1200                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1201                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1202
1203                 /* Any space left in the current big page for another frag? */
1204                 if ((page_offset + rx_frag_size + rx_frag_size) >
1205                                         adapter->big_page_size) {
1206                         pagep = NULL;
1207                         page_info->last_page_user = true;
1208                 }
1209
1210                 prev_page_info = page_info;
1211                 queue_head_inc(rxq);
1212                 page_info = &page_info_tbl[rxq->head];
1213         }
1214         if (pagep)
1215                 prev_page_info->last_page_user = true;
1216
1217         if (posted) {
1218                 atomic_add(posted, &rxq->used);
1219                 be_rxq_notify(adapter, rxq->id, posted);
1220         } else if (atomic_read(&rxq->used) == 0) {
1221                 /* Let be_worker replenish when memory is available */
1222                 rxo->rx_post_starved = true;
1223         }
1224 }
1225
1226 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1227 {
1228         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1229
1230         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1231                 return NULL;
1232
1233         rmb();
1234         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1235
1236         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1237
1238         queue_tail_inc(tx_cq);
1239         return txcp;
1240 }
1241
1242 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1243 {
1244         struct be_queue_info *txq = &adapter->tx_obj.q;
1245         struct be_eth_wrb *wrb;
1246         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1247         struct sk_buff *sent_skb;
1248         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1249         bool unmap_skb_hdr = true;
1250
1251         sent_skb = sent_skbs[txq->tail];
1252         BUG_ON(!sent_skb);
1253         sent_skbs[txq->tail] = NULL;
1254
1255         /* skip header wrb */
1256         queue_tail_inc(txq);
1257
1258         do {
1259                 cur_index = txq->tail;
1260                 wrb = queue_tail_node(txq);
1261                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1262                                         skb_headlen(sent_skb)));
1263                 unmap_skb_hdr = false;
1264
1265                 num_wrbs++;
1266                 queue_tail_inc(txq);
1267         } while (cur_index != last_index);
1268
1269         atomic_sub(num_wrbs, &txq->used);
1270
1271         kfree_skb(sent_skb);
1272 }
1273
1274 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1275 {
1276         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1277
1278         if (!eqe->evt)
1279                 return NULL;
1280
1281         rmb();
1282         eqe->evt = le32_to_cpu(eqe->evt);
1283         queue_tail_inc(&eq_obj->q);
1284         return eqe;
1285 }
1286
1287 static int event_handle(struct be_adapter *adapter,
1288                         struct be_eq_obj *eq_obj)
1289 {
1290         struct be_eq_entry *eqe;
1291         u16 num = 0;
1292
1293         while ((eqe = event_get(eq_obj)) != NULL) {
1294                 eqe->evt = 0;
1295                 num++;
1296         }
1297
1298         /* Deal with any spurious interrupts that come
1299          * without events
1300          */
1301         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1302         if (num)
1303                 napi_schedule(&eq_obj->napi);
1304
1305         return num;
1306 }
1307
1308 /* Just read and notify events without processing them.
1309  * Used at the time of destroying event queues */
1310 static void be_eq_clean(struct be_adapter *adapter,
1311                         struct be_eq_obj *eq_obj)
1312 {
1313         struct be_eq_entry *eqe;
1314         u16 num = 0;
1315
1316         while ((eqe = event_get(eq_obj)) != NULL) {
1317                 eqe->evt = 0;
1318                 num++;
1319         }
1320
1321         if (num)
1322                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1323 }
1324
1325 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1326 {
1327         struct be_rx_page_info *page_info;
1328         struct be_queue_info *rxq = &rxo->q;
1329         struct be_queue_info *rx_cq = &rxo->cq;
1330         struct be_eth_rx_compl *rxcp;
1331         u16 tail;
1332
1333         /* First cleanup pending rx completions */
1334         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1335                 be_rx_compl_discard(adapter, rxo, rxcp);
1336                 be_rx_compl_reset(rxcp);
1337                 be_cq_notify(adapter, rx_cq->id, true, 1);
1338         }
1339
1340         /* Then free posted rx buffer that were not used */
1341         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1342         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1343                 page_info = get_rx_page_info(adapter, rxo, tail);
1344                 put_page(page_info->page);
1345                 memset(page_info, 0, sizeof(*page_info));
1346         }
1347         BUG_ON(atomic_read(&rxq->used));
1348 }
1349
1350 static void be_tx_compl_clean(struct be_adapter *adapter)
1351 {
1352         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1353         struct be_queue_info *txq = &adapter->tx_obj.q;
1354         struct be_eth_tx_compl *txcp;
1355         u16 end_idx, cmpl = 0, timeo = 0;
1356         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1357         struct sk_buff *sent_skb;
1358         bool dummy_wrb;
1359
1360         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1361         do {
1362                 while ((txcp = be_tx_compl_get(tx_cq))) {
1363                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1364                                         wrb_index, txcp);
1365                         be_tx_compl_process(adapter, end_idx);
1366                         cmpl++;
1367                 }
1368                 if (cmpl) {
1369                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1370                         cmpl = 0;
1371                 }
1372
1373                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1374                         break;
1375
1376                 mdelay(1);
1377         } while (true);
1378
1379         if (atomic_read(&txq->used))
1380                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1381                         atomic_read(&txq->used));
1382
1383         /* free posted tx for which compls will never arrive */
1384         while (atomic_read(&txq->used)) {
1385                 sent_skb = sent_skbs[txq->tail];
1386                 end_idx = txq->tail;
1387                 index_adv(&end_idx,
1388                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1389                 be_tx_compl_process(adapter, end_idx);
1390         }
1391 }
1392
1393 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1394 {
1395         struct be_queue_info *q;
1396
1397         q = &adapter->mcc_obj.q;
1398         if (q->created)
1399                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1400         be_queue_free(adapter, q);
1401
1402         q = &adapter->mcc_obj.cq;
1403         if (q->created)
1404                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1405         be_queue_free(adapter, q);
1406 }
1407
1408 /* Must be called only after TX qs are created as MCC shares TX EQ */
1409 static int be_mcc_queues_create(struct be_adapter *adapter)
1410 {
1411         struct be_queue_info *q, *cq;
1412
1413         /* Alloc MCC compl queue */
1414         cq = &adapter->mcc_obj.cq;
1415         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1416                         sizeof(struct be_mcc_compl)))
1417                 goto err;
1418
1419         /* Ask BE to create MCC compl queue; share TX's eq */
1420         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1421                 goto mcc_cq_free;
1422
1423         /* Alloc MCC queue */
1424         q = &adapter->mcc_obj.q;
1425         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1426                 goto mcc_cq_destroy;
1427
1428         /* Ask BE to create MCC queue */
1429         if (be_cmd_mccq_create(adapter, q, cq))
1430                 goto mcc_q_free;
1431
1432         return 0;
1433
1434 mcc_q_free:
1435         be_queue_free(adapter, q);
1436 mcc_cq_destroy:
1437         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1438 mcc_cq_free:
1439         be_queue_free(adapter, cq);
1440 err:
1441         return -1;
1442 }
1443
1444 static void be_tx_queues_destroy(struct be_adapter *adapter)
1445 {
1446         struct be_queue_info *q;
1447
1448         q = &adapter->tx_obj.q;
1449         if (q->created)
1450                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1451         be_queue_free(adapter, q);
1452
1453         q = &adapter->tx_obj.cq;
1454         if (q->created)
1455                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1456         be_queue_free(adapter, q);
1457
1458         /* Clear any residual events */
1459         be_eq_clean(adapter, &adapter->tx_eq);
1460
1461         q = &adapter->tx_eq.q;
1462         if (q->created)
1463                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1464         be_queue_free(adapter, q);
1465 }
1466
1467 static int be_tx_queues_create(struct be_adapter *adapter)
1468 {
1469         struct be_queue_info *eq, *q, *cq;
1470
1471         adapter->tx_eq.max_eqd = 0;
1472         adapter->tx_eq.min_eqd = 0;
1473         adapter->tx_eq.cur_eqd = 96;
1474         adapter->tx_eq.enable_aic = false;
1475         /* Alloc Tx Event queue */
1476         eq = &adapter->tx_eq.q;
1477         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1478                 return -1;
1479
1480         /* Ask BE to create Tx Event queue */
1481         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1482                 goto tx_eq_free;
1483         adapter->base_eq_id = adapter->tx_eq.q.id;
1484
1485         /* Alloc TX eth compl queue */
1486         cq = &adapter->tx_obj.cq;
1487         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1488                         sizeof(struct be_eth_tx_compl)))
1489                 goto tx_eq_destroy;
1490
1491         /* Ask BE to create Tx eth compl queue */
1492         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1493                 goto tx_cq_free;
1494
1495         /* Alloc TX eth queue */
1496         q = &adapter->tx_obj.q;
1497         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1498                 goto tx_cq_destroy;
1499
1500         /* Ask BE to create Tx eth queue */
1501         if (be_cmd_txq_create(adapter, q, cq))
1502                 goto tx_q_free;
1503         return 0;
1504
1505 tx_q_free:
1506         be_queue_free(adapter, q);
1507 tx_cq_destroy:
1508         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1509 tx_cq_free:
1510         be_queue_free(adapter, cq);
1511 tx_eq_destroy:
1512         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1513 tx_eq_free:
1514         be_queue_free(adapter, eq);
1515         return -1;
1516 }
1517
1518 static void be_rx_queues_destroy(struct be_adapter *adapter)
1519 {
1520         struct be_queue_info *q;
1521         struct be_rx_obj *rxo;
1522         int i;
1523
1524         for_all_rx_queues(adapter, rxo, i) {
1525                 q = &rxo->q;
1526                 if (q->created) {
1527                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1528                         /* After the rxq is invalidated, wait for a grace time
1529                          * of 1ms for all dma to end and the flush compl to
1530                          * arrive
1531                          */
1532                         mdelay(1);
1533                         be_rx_q_clean(adapter, rxo);
1534                 }
1535                 be_queue_free(adapter, q);
1536
1537                 q = &rxo->cq;
1538                 if (q->created)
1539                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1540                 be_queue_free(adapter, q);
1541
1542                 /* Clear any residual events */
1543                 q = &rxo->rx_eq.q;
1544                 if (q->created) {
1545                         be_eq_clean(adapter, &rxo->rx_eq);
1546                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1547                 }
1548                 be_queue_free(adapter, q);
1549         }
1550 }
1551
1552 static int be_rx_queues_create(struct be_adapter *adapter)
1553 {
1554         struct be_queue_info *eq, *q, *cq;
1555         struct be_rx_obj *rxo;
1556         int rc, i;
1557
1558         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1559         for_all_rx_queues(adapter, rxo, i) {
1560                 rxo->adapter = adapter;
1561                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1562                 rxo->rx_eq.enable_aic = true;
1563
1564                 /* EQ */
1565                 eq = &rxo->rx_eq.q;
1566                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1567                                         sizeof(struct be_eq_entry));
1568                 if (rc)
1569                         goto err;
1570
1571                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1572                 if (rc)
1573                         goto err;
1574
1575                 /* CQ */
1576                 cq = &rxo->cq;
1577                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1578                                 sizeof(struct be_eth_rx_compl));
1579                 if (rc)
1580                         goto err;
1581
1582                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1583                 if (rc)
1584                         goto err;
1585
1586                 /* Rx Q */
1587                 q = &rxo->q;
1588                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1589                                 sizeof(struct be_eth_rx_d));
1590                 if (rc)
1591                         goto err;
1592
1593                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1594                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1595                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1596                 if (rc)
1597                         goto err;
1598         }
1599
1600         if (be_multi_rxq(adapter)) {
1601                 u8 rsstable[MAX_RSS_QS];
1602
1603                 for_all_rss_queues(adapter, rxo, i)
1604                         rsstable[i] = rxo->rss_id;
1605
1606                 rc = be_cmd_rss_config(adapter, rsstable,
1607                         adapter->num_rx_qs - 1);
1608                 if (rc)
1609                         goto err;
1610         }
1611
1612         return 0;
1613 err:
1614         be_rx_queues_destroy(adapter);
1615         return -1;
1616 }
1617
1618 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1619 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1620 {
1621         return eq_id - adapter->base_eq_id;
1622 }
1623
1624 static irqreturn_t be_intx(int irq, void *dev)
1625 {
1626         struct be_adapter *adapter = dev;
1627         struct be_rx_obj *rxo;
1628         int isr, i;
1629
1630         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1631                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1632         if (!isr)
1633                 return IRQ_NONE;
1634
1635         if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1636                 event_handle(adapter, &adapter->tx_eq);
1637
1638         for_all_rx_queues(adapter, rxo, i) {
1639                 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1640                         event_handle(adapter, &rxo->rx_eq);
1641         }
1642
1643         return IRQ_HANDLED;
1644 }
1645
1646 static irqreturn_t be_msix_rx(int irq, void *dev)
1647 {
1648         struct be_rx_obj *rxo = dev;
1649         struct be_adapter *adapter = rxo->adapter;
1650
1651         event_handle(adapter, &rxo->rx_eq);
1652
1653         return IRQ_HANDLED;
1654 }
1655
1656 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1657 {
1658         struct be_adapter *adapter = dev;
1659
1660         event_handle(adapter, &adapter->tx_eq);
1661
1662         return IRQ_HANDLED;
1663 }
1664
1665 static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1666                         struct be_eth_rx_compl *rxcp)
1667 {
1668         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1669         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1670
1671         if (err)
1672                 rxo->stats.rxcp_err++;
1673
1674         return (tcp_frame && !err) ? true : false;
1675 }
1676
1677 int be_poll_rx(struct napi_struct *napi, int budget)
1678 {
1679         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1680         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1681         struct be_adapter *adapter = rxo->adapter;
1682         struct be_queue_info *rx_cq = &rxo->cq;
1683         struct be_eth_rx_compl *rxcp;
1684         u32 work_done;
1685
1686         rxo->stats.rx_polls++;
1687         for (work_done = 0; work_done < budget; work_done++) {
1688                 rxcp = be_rx_compl_get(rxo);
1689                 if (!rxcp)
1690                         break;
1691
1692                 if (do_gro(adapter, rxo, rxcp))
1693                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1694                 else
1695                         be_rx_compl_process(adapter, rxo, rxcp);
1696
1697                 be_rx_compl_reset(rxcp);
1698         }
1699
1700         /* Refill the queue */
1701         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1702                 be_post_rx_frags(rxo);
1703
1704         /* All consumed */
1705         if (work_done < budget) {
1706                 napi_complete(napi);
1707                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1708         } else {
1709                 /* More to be consumed; continue with interrupts disabled */
1710                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1711         }
1712         return work_done;
1713 }
1714
1715 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1716  * For TX/MCC we don't honour budget; consume everything
1717  */
1718 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1719 {
1720         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1721         struct be_adapter *adapter =
1722                 container_of(tx_eq, struct be_adapter, tx_eq);
1723         struct be_queue_info *txq = &adapter->tx_obj.q;
1724         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1725         struct be_eth_tx_compl *txcp;
1726         int tx_compl = 0, mcc_compl, status = 0;
1727         u16 end_idx;
1728
1729         while ((txcp = be_tx_compl_get(tx_cq))) {
1730                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1731                                 wrb_index, txcp);
1732                 be_tx_compl_process(adapter, end_idx);
1733                 tx_compl++;
1734         }
1735
1736         mcc_compl = be_process_mcc(adapter, &status);
1737
1738         napi_complete(napi);
1739
1740         if (mcc_compl) {
1741                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1742                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1743         }
1744
1745         if (tx_compl) {
1746                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1747
1748                 /* As Tx wrbs have been freed up, wake up netdev queue if
1749                  * it was stopped due to lack of tx wrbs.
1750                  */
1751                 if (netif_queue_stopped(adapter->netdev) &&
1752                         atomic_read(&txq->used) < txq->len / 2) {
1753                         netif_wake_queue(adapter->netdev);
1754                 }
1755
1756                 tx_stats(adapter)->be_tx_events++;
1757                 tx_stats(adapter)->be_tx_compl += tx_compl;
1758         }
1759
1760         return 1;
1761 }
1762
1763 void be_detect_dump_ue(struct be_adapter *adapter)
1764 {
1765         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1766         u32 i;
1767
1768         pci_read_config_dword(adapter->pdev,
1769                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1770         pci_read_config_dword(adapter->pdev,
1771                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1772         pci_read_config_dword(adapter->pdev,
1773                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1774         pci_read_config_dword(adapter->pdev,
1775                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1776
1777         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1778         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1779
1780         if (ue_status_lo || ue_status_hi) {
1781                 adapter->ue_detected = true;
1782                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1783         }
1784
1785         if (ue_status_lo) {
1786                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1787                         if (ue_status_lo & 1)
1788                                 dev_err(&adapter->pdev->dev,
1789                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1790                 }
1791         }
1792         if (ue_status_hi) {
1793                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1794                         if (ue_status_hi & 1)
1795                                 dev_err(&adapter->pdev->dev,
1796                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1797                 }
1798         }
1799
1800 }
1801
1802 static void be_worker(struct work_struct *work)
1803 {
1804         struct be_adapter *adapter =
1805                 container_of(work, struct be_adapter, work.work);
1806         struct be_rx_obj *rxo;
1807         int i;
1808
1809         if (!adapter->stats_ioctl_sent)
1810                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1811
1812         be_tx_rate_update(adapter);
1813
1814         for_all_rx_queues(adapter, rxo, i) {
1815                 be_rx_rate_update(rxo);
1816                 be_rx_eqd_update(adapter, rxo);
1817
1818                 if (rxo->rx_post_starved) {
1819                         rxo->rx_post_starved = false;
1820                         be_post_rx_frags(rxo);
1821                 }
1822         }
1823
1824         if (!adapter->ue_detected)
1825                 be_detect_dump_ue(adapter);
1826
1827         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1828 }
1829
1830 static void be_msix_disable(struct be_adapter *adapter)
1831 {
1832         if (adapter->msix_enabled) {
1833                 pci_disable_msix(adapter->pdev);
1834                 adapter->msix_enabled = false;
1835         }
1836 }
1837
1838 static int be_num_rxqs_get(struct be_adapter *adapter)
1839 {
1840         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1841                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1842                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1843         } else {
1844                 dev_warn(&adapter->pdev->dev,
1845                         "No support for multiple RX queues\n");
1846                 return 1;
1847         }
1848 }
1849
1850 static void be_msix_enable(struct be_adapter *adapter)
1851 {
1852 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1853         int i, status;
1854
1855         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1856
1857         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1858                 adapter->msix_entries[i].entry = i;
1859
1860         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1861                         adapter->num_rx_qs + 1);
1862         if (status == 0) {
1863                 goto done;
1864         } else if (status >= BE_MIN_MSIX_VECTORS) {
1865                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1866                                 status) == 0) {
1867                         adapter->num_rx_qs = status - 1;
1868                         dev_warn(&adapter->pdev->dev,
1869                                 "Could alloc only %d MSIx vectors. "
1870                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1871                         goto done;
1872                 }
1873         }
1874         return;
1875 done:
1876         adapter->msix_enabled = true;
1877 }
1878
1879 static void be_sriov_enable(struct be_adapter *adapter)
1880 {
1881         be_check_sriov_fn_type(adapter);
1882 #ifdef CONFIG_PCI_IOV
1883         if (be_physfn(adapter) && num_vfs) {
1884                 int status;
1885
1886                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1887                 adapter->sriov_enabled = status ? false : true;
1888         }
1889 #endif
1890 }
1891
1892 static void be_sriov_disable(struct be_adapter *adapter)
1893 {
1894 #ifdef CONFIG_PCI_IOV
1895         if (adapter->sriov_enabled) {
1896                 pci_disable_sriov(adapter->pdev);
1897                 adapter->sriov_enabled = false;
1898         }
1899 #endif
1900 }
1901
1902 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1903 {
1904         return adapter->msix_entries[
1905                         be_evt_bit_get(adapter, eq_id)].vector;
1906 }
1907
1908 static int be_request_irq(struct be_adapter *adapter,
1909                 struct be_eq_obj *eq_obj,
1910                 void *handler, char *desc, void *context)
1911 {
1912         struct net_device *netdev = adapter->netdev;
1913         int vec;
1914
1915         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1916         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1917         return request_irq(vec, handler, 0, eq_obj->desc, context);
1918 }
1919
1920 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1921                         void *context)
1922 {
1923         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1924         free_irq(vec, context);
1925 }
1926
1927 static int be_msix_register(struct be_adapter *adapter)
1928 {
1929         struct be_rx_obj *rxo;
1930         int status, i;
1931         char qname[10];
1932
1933         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1934                                 adapter);
1935         if (status)
1936                 goto err;
1937
1938         for_all_rx_queues(adapter, rxo, i) {
1939                 sprintf(qname, "rxq%d", i);
1940                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1941                                 qname, rxo);
1942                 if (status)
1943                         goto err_msix;
1944         }
1945
1946         return 0;
1947
1948 err_msix:
1949         be_free_irq(adapter, &adapter->tx_eq, adapter);
1950
1951         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1952                 be_free_irq(adapter, &rxo->rx_eq, rxo);
1953
1954 err:
1955         dev_warn(&adapter->pdev->dev,
1956                 "MSIX Request IRQ failed - err %d\n", status);
1957         pci_disable_msix(adapter->pdev);
1958         adapter->msix_enabled = false;
1959         return status;
1960 }
1961
1962 static int be_irq_register(struct be_adapter *adapter)
1963 {
1964         struct net_device *netdev = adapter->netdev;
1965         int status;
1966
1967         if (adapter->msix_enabled) {
1968                 status = be_msix_register(adapter);
1969                 if (status == 0)
1970                         goto done;
1971                 /* INTx is not supported for VF */
1972                 if (!be_physfn(adapter))
1973                         return status;
1974         }
1975
1976         /* INTx */
1977         netdev->irq = adapter->pdev->irq;
1978         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1979                         adapter);
1980         if (status) {
1981                 dev_err(&adapter->pdev->dev,
1982                         "INTx request IRQ failed - err %d\n", status);
1983                 return status;
1984         }
1985 done:
1986         adapter->isr_registered = true;
1987         return 0;
1988 }
1989
1990 static void be_irq_unregister(struct be_adapter *adapter)
1991 {
1992         struct net_device *netdev = adapter->netdev;
1993         struct be_rx_obj *rxo;
1994         int i;
1995
1996         if (!adapter->isr_registered)
1997                 return;
1998
1999         /* INTx */
2000         if (!adapter->msix_enabled) {
2001                 free_irq(netdev->irq, adapter);
2002                 goto done;
2003         }
2004
2005         /* MSIx */
2006         be_free_irq(adapter, &adapter->tx_eq, adapter);
2007
2008         for_all_rx_queues(adapter, rxo, i)
2009                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2010
2011 done:
2012         adapter->isr_registered = false;
2013 }
2014
2015 static int be_close(struct net_device *netdev)
2016 {
2017         struct be_adapter *adapter = netdev_priv(netdev);
2018         struct be_rx_obj *rxo;
2019         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2020         int vec, i;
2021
2022         cancel_delayed_work_sync(&adapter->work);
2023
2024         be_async_mcc_disable(adapter);
2025
2026         netif_stop_queue(netdev);
2027         netif_carrier_off(netdev);
2028         adapter->link_up = false;
2029
2030         be_intr_set(adapter, false);
2031
2032         if (adapter->msix_enabled) {
2033                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2034                 synchronize_irq(vec);
2035
2036                 for_all_rx_queues(adapter, rxo, i) {
2037                         vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2038                         synchronize_irq(vec);
2039                 }
2040         } else {
2041                 synchronize_irq(netdev->irq);
2042         }
2043         be_irq_unregister(adapter);
2044
2045         for_all_rx_queues(adapter, rxo, i)
2046                 napi_disable(&rxo->rx_eq.napi);
2047
2048         napi_disable(&tx_eq->napi);
2049
2050         /* Wait for all pending tx completions to arrive so that
2051          * all tx skbs are freed.
2052          */
2053         be_tx_compl_clean(adapter);
2054
2055         return 0;
2056 }
2057
2058 static int be_open(struct net_device *netdev)
2059 {
2060         struct be_adapter *adapter = netdev_priv(netdev);
2061         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2062         struct be_rx_obj *rxo;
2063         bool link_up;
2064         int status, i;
2065         u8 mac_speed;
2066         u16 link_speed;
2067
2068         for_all_rx_queues(adapter, rxo, i) {
2069                 be_post_rx_frags(rxo);
2070                 napi_enable(&rxo->rx_eq.napi);
2071         }
2072         napi_enable(&tx_eq->napi);
2073
2074         be_irq_register(adapter);
2075
2076         be_intr_set(adapter, true);
2077
2078         /* The evt queues are created in unarmed state; arm them */
2079         for_all_rx_queues(adapter, rxo, i) {
2080                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2081                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2082         }
2083         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2084
2085         /* Now that interrupts are on we can process async mcc */
2086         be_async_mcc_enable(adapter);
2087
2088         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2089
2090         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2091                         &link_speed);
2092         if (status)
2093                 goto err;
2094         be_link_status_update(adapter, link_up);
2095
2096         if (be_physfn(adapter)) {
2097                 status = be_vid_config(adapter, false, 0);
2098                 if (status)
2099                         goto err;
2100
2101                 status = be_cmd_set_flow_control(adapter,
2102                                 adapter->tx_fc, adapter->rx_fc);
2103                 if (status)
2104                         goto err;
2105         }
2106
2107         return 0;
2108 err:
2109         be_close(adapter->netdev);
2110         return -EIO;
2111 }
2112
2113 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2114 {
2115         struct be_dma_mem cmd;
2116         int status = 0;
2117         u8 mac[ETH_ALEN];
2118
2119         memset(mac, 0, ETH_ALEN);
2120
2121         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2122         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2123         if (cmd.va == NULL)
2124                 return -1;
2125         memset(cmd.va, 0, cmd.size);
2126
2127         if (enable) {
2128                 status = pci_write_config_dword(adapter->pdev,
2129                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2130                 if (status) {
2131                         dev_err(&adapter->pdev->dev,
2132                                 "Could not enable Wake-on-lan\n");
2133                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2134                                         cmd.dma);
2135                         return status;
2136                 }
2137                 status = be_cmd_enable_magic_wol(adapter,
2138                                 adapter->netdev->dev_addr, &cmd);
2139                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2140                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2141         } else {
2142                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2143                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2144                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2145         }
2146
2147         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2148         return status;
2149 }
2150
2151 /*
2152  * Generate a seed MAC address from the PF MAC Address using jhash.
2153  * MAC Address for VFs are assigned incrementally starting from the seed.
2154  * These addresses are programmed in the ASIC by the PF and the VF driver
2155  * queries for the MAC address during its probe.
2156  */
2157 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2158 {
2159         u32 vf = 0;
2160         int status = 0;
2161         u8 mac[ETH_ALEN];
2162
2163         be_vf_eth_addr_generate(adapter, mac);
2164
2165         for (vf = 0; vf < num_vfs; vf++) {
2166                 status = be_cmd_pmac_add(adapter, mac,
2167                                         adapter->vf_cfg[vf].vf_if_handle,
2168                                         &adapter->vf_cfg[vf].vf_pmac_id);
2169                 if (status)
2170                         dev_err(&adapter->pdev->dev,
2171                                 "Mac address add failed for VF %d\n", vf);
2172                 else
2173                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2174
2175                 mac[5] += 1;
2176         }
2177         return status;
2178 }
2179
2180 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2181 {
2182         u32 vf;
2183
2184         for (vf = 0; vf < num_vfs; vf++) {
2185                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2186                         be_cmd_pmac_del(adapter,
2187                                         adapter->vf_cfg[vf].vf_if_handle,
2188                                         adapter->vf_cfg[vf].vf_pmac_id);
2189         }
2190 }
2191
2192 static int be_setup(struct be_adapter *adapter)
2193 {
2194         struct net_device *netdev = adapter->netdev;
2195         u32 cap_flags, en_flags, vf = 0;
2196         int status;
2197         u8 mac[ETH_ALEN];
2198
2199         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2200
2201         if (be_physfn(adapter)) {
2202                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2203                                 BE_IF_FLAGS_PROMISCUOUS |
2204                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2205                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2206
2207                 if (be_multi_rxq(adapter)) {
2208                         cap_flags |= BE_IF_FLAGS_RSS;
2209                         en_flags |= BE_IF_FLAGS_RSS;
2210                 }
2211         }
2212
2213         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2214                         netdev->dev_addr, false/* pmac_invalid */,
2215                         &adapter->if_handle, &adapter->pmac_id, 0);
2216         if (status != 0)
2217                 goto do_none;
2218
2219         if (be_physfn(adapter)) {
2220                 while (vf < num_vfs) {
2221                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2222                                         | BE_IF_FLAGS_BROADCAST;
2223                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2224                                         mac, true,
2225                                         &adapter->vf_cfg[vf].vf_if_handle,
2226                                         NULL, vf+1);
2227                         if (status) {
2228                                 dev_err(&adapter->pdev->dev,
2229                                 "Interface Create failed for VF %d\n", vf);
2230                                 goto if_destroy;
2231                         }
2232                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2233                         vf++;
2234                 }
2235         } else if (!be_physfn(adapter)) {
2236                 status = be_cmd_mac_addr_query(adapter, mac,
2237                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2238                 if (!status) {
2239                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2240                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2241                 }
2242         }
2243
2244         status = be_tx_queues_create(adapter);
2245         if (status != 0)
2246                 goto if_destroy;
2247
2248         status = be_rx_queues_create(adapter);
2249         if (status != 0)
2250                 goto tx_qs_destroy;
2251
2252         status = be_mcc_queues_create(adapter);
2253         if (status != 0)
2254                 goto rx_qs_destroy;
2255
2256         if (be_physfn(adapter)) {
2257                 status = be_vf_eth_addr_config(adapter);
2258                 if (status)
2259                         goto mcc_q_destroy;
2260         }
2261
2262         adapter->link_speed = -1;
2263
2264         return 0;
2265
2266 mcc_q_destroy:
2267         if (be_physfn(adapter))
2268                 be_vf_eth_addr_rem(adapter);
2269         be_mcc_queues_destroy(adapter);
2270 rx_qs_destroy:
2271         be_rx_queues_destroy(adapter);
2272 tx_qs_destroy:
2273         be_tx_queues_destroy(adapter);
2274 if_destroy:
2275         for (vf = 0; vf < num_vfs; vf++)
2276                 if (adapter->vf_cfg[vf].vf_if_handle)
2277                         be_cmd_if_destroy(adapter,
2278                                         adapter->vf_cfg[vf].vf_if_handle);
2279         be_cmd_if_destroy(adapter, adapter->if_handle);
2280 do_none:
2281         return status;
2282 }
2283
2284 static int be_clear(struct be_adapter *adapter)
2285 {
2286         if (be_physfn(adapter))
2287                 be_vf_eth_addr_rem(adapter);
2288
2289         be_mcc_queues_destroy(adapter);
2290         be_rx_queues_destroy(adapter);
2291         be_tx_queues_destroy(adapter);
2292
2293         be_cmd_if_destroy(adapter, adapter->if_handle);
2294
2295         /* tell fw we're done with firing cmds */
2296         be_cmd_fw_clean(adapter);
2297         return 0;
2298 }
2299
2300
2301 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2302 char flash_cookie[2][16] =      {"*** SE FLAS",
2303                                 "H DIRECTORY *** "};
2304
2305 static bool be_flash_redboot(struct be_adapter *adapter,
2306                         const u8 *p, u32 img_start, int image_size,
2307                         int hdr_size)
2308 {
2309         u32 crc_offset;
2310         u8 flashed_crc[4];
2311         int status;
2312
2313         crc_offset = hdr_size + img_start + image_size - 4;
2314
2315         p += crc_offset;
2316
2317         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2318                         (image_size - 4));
2319         if (status) {
2320                 dev_err(&adapter->pdev->dev,
2321                 "could not get crc from flash, not flashing redboot\n");
2322                 return false;
2323         }
2324
2325         /*update redboot only if crc does not match*/
2326         if (!memcmp(flashed_crc, p, 4))
2327                 return false;
2328         else
2329                 return true;
2330 }
2331
2332 static int be_flash_data(struct be_adapter *adapter,
2333                         const struct firmware *fw,
2334                         struct be_dma_mem *flash_cmd, int num_of_images)
2335
2336 {
2337         int status = 0, i, filehdr_size = 0;
2338         u32 total_bytes = 0, flash_op;
2339         int num_bytes;
2340         const u8 *p = fw->data;
2341         struct be_cmd_write_flashrom *req = flash_cmd->va;
2342         struct flash_comp *pflashcomp;
2343         int num_comp;
2344
2345         struct flash_comp gen3_flash_types[9] = {
2346                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2347                         FLASH_IMAGE_MAX_SIZE_g3},
2348                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2349                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2350                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2351                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2352                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2353                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2354                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2355                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2356                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2357                         FLASH_IMAGE_MAX_SIZE_g3},
2358                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2359                         FLASH_IMAGE_MAX_SIZE_g3},
2360                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2361                         FLASH_IMAGE_MAX_SIZE_g3},
2362                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2363                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2364         };
2365         struct flash_comp gen2_flash_types[8] = {
2366                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2367                         FLASH_IMAGE_MAX_SIZE_g2},
2368                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2369                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2370                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2371                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2372                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2373                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2374                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2375                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2376                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2377                         FLASH_IMAGE_MAX_SIZE_g2},
2378                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2379                         FLASH_IMAGE_MAX_SIZE_g2},
2380                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2381                          FLASH_IMAGE_MAX_SIZE_g2}
2382         };
2383
2384         if (adapter->generation == BE_GEN3) {
2385                 pflashcomp = gen3_flash_types;
2386                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2387                 num_comp = 9;
2388         } else {
2389                 pflashcomp = gen2_flash_types;
2390                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2391                 num_comp = 8;
2392         }
2393         for (i = 0; i < num_comp; i++) {
2394                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2395                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2396                         continue;
2397                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2398                         (!be_flash_redboot(adapter, fw->data,
2399                          pflashcomp[i].offset, pflashcomp[i].size,
2400                          filehdr_size)))
2401                         continue;
2402                 p = fw->data;
2403                 p += filehdr_size + pflashcomp[i].offset
2404                         + (num_of_images * sizeof(struct image_hdr));
2405         if (p + pflashcomp[i].size > fw->data + fw->size)
2406                 return -1;
2407         total_bytes = pflashcomp[i].size;
2408                 while (total_bytes) {
2409                         if (total_bytes > 32*1024)
2410                                 num_bytes = 32*1024;
2411                         else
2412                                 num_bytes = total_bytes;
2413                         total_bytes -= num_bytes;
2414
2415                         if (!total_bytes)
2416                                 flash_op = FLASHROM_OPER_FLASH;
2417                         else
2418                                 flash_op = FLASHROM_OPER_SAVE;
2419                         memcpy(req->params.data_buf, p, num_bytes);
2420                         p += num_bytes;
2421                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2422                                 pflashcomp[i].optype, flash_op, num_bytes);
2423                         if (status) {
2424                                 dev_err(&adapter->pdev->dev,
2425                                         "cmd to write to flash rom failed.\n");
2426                                 return -1;
2427                         }
2428                         yield();
2429                 }
2430         }
2431         return 0;
2432 }
2433
2434 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2435 {
2436         if (fhdr == NULL)
2437                 return 0;
2438         if (fhdr->build[0] == '3')
2439                 return BE_GEN3;
2440         else if (fhdr->build[0] == '2')
2441                 return BE_GEN2;
2442         else
2443                 return 0;
2444 }
2445
2446 int be_load_fw(struct be_adapter *adapter, u8 *func)
2447 {
2448         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2449         const struct firmware *fw;
2450         struct flash_file_hdr_g2 *fhdr;
2451         struct flash_file_hdr_g3 *fhdr3;
2452         struct image_hdr *img_hdr_ptr = NULL;
2453         struct be_dma_mem flash_cmd;
2454         int status, i = 0, num_imgs = 0;
2455         const u8 *p;
2456
2457         strcpy(fw_file, func);
2458
2459         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2460         if (status)
2461                 goto fw_exit;
2462
2463         p = fw->data;
2464         fhdr = (struct flash_file_hdr_g2 *) p;
2465         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2466
2467         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2468         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2469                                         &flash_cmd.dma);
2470         if (!flash_cmd.va) {
2471                 status = -ENOMEM;
2472                 dev_err(&adapter->pdev->dev,
2473                         "Memory allocation failure while flashing\n");
2474                 goto fw_exit;
2475         }
2476
2477         if ((adapter->generation == BE_GEN3) &&
2478                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2479                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2480                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2481                 for (i = 0; i < num_imgs; i++) {
2482                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2483                                         (sizeof(struct flash_file_hdr_g3) +
2484                                          i * sizeof(struct image_hdr)));
2485                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2486                                 status = be_flash_data(adapter, fw, &flash_cmd,
2487                                                         num_imgs);
2488                 }
2489         } else if ((adapter->generation == BE_GEN2) &&
2490                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2491                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2492         } else {
2493                 dev_err(&adapter->pdev->dev,
2494                         "UFI and Interface are not compatible for flashing\n");
2495                 status = -1;
2496         }
2497
2498         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2499                                 flash_cmd.dma);
2500         if (status) {
2501                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2502                 goto fw_exit;
2503         }
2504
2505         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2506
2507 fw_exit:
2508         release_firmware(fw);
2509         return status;
2510 }
2511
2512 static struct net_device_ops be_netdev_ops = {
2513         .ndo_open               = be_open,
2514         .ndo_stop               = be_close,
2515         .ndo_start_xmit         = be_xmit,
2516         .ndo_set_rx_mode        = be_set_multicast_list,
2517         .ndo_set_mac_address    = be_mac_addr_set,
2518         .ndo_change_mtu         = be_change_mtu,
2519         .ndo_validate_addr      = eth_validate_addr,
2520         .ndo_vlan_rx_register   = be_vlan_register,
2521         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2522         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2523         .ndo_set_vf_mac         = be_set_vf_mac,
2524         .ndo_set_vf_vlan        = be_set_vf_vlan,
2525         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2526         .ndo_get_vf_config      = be_get_vf_config
2527 };
2528
2529 static void be_netdev_init(struct net_device *netdev)
2530 {
2531         struct be_adapter *adapter = netdev_priv(netdev);
2532         struct be_rx_obj *rxo;
2533         int i;
2534
2535         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2536                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2537                 NETIF_F_GRO | NETIF_F_TSO6;
2538
2539         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2540
2541         netdev->flags |= IFF_MULTICAST;
2542
2543         adapter->rx_csum = true;
2544
2545         /* Default settings for Rx and Tx flow control */
2546         adapter->rx_fc = true;
2547         adapter->tx_fc = true;
2548
2549         netif_set_gso_max_size(netdev, 65535);
2550
2551         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2552
2553         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2554
2555         for_all_rx_queues(adapter, rxo, i)
2556                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2557                                 BE_NAPI_WEIGHT);
2558
2559         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2560                 BE_NAPI_WEIGHT);
2561
2562         netif_carrier_off(netdev);
2563         netif_stop_queue(netdev);
2564 }
2565
2566 static void be_unmap_pci_bars(struct be_adapter *adapter)
2567 {
2568         if (adapter->csr)
2569                 iounmap(adapter->csr);
2570         if (adapter->db)
2571                 iounmap(adapter->db);
2572         if (adapter->pcicfg && be_physfn(adapter))
2573                 iounmap(adapter->pcicfg);
2574 }
2575
2576 static int be_map_pci_bars(struct be_adapter *adapter)
2577 {
2578         u8 __iomem *addr;
2579         int pcicfg_reg, db_reg;
2580
2581         if (be_physfn(adapter)) {
2582                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2583                                 pci_resource_len(adapter->pdev, 2));
2584                 if (addr == NULL)
2585                         return -ENOMEM;
2586                 adapter->csr = addr;
2587         }
2588
2589         if (adapter->generation == BE_GEN2) {
2590                 pcicfg_reg = 1;
2591                 db_reg = 4;
2592         } else {
2593                 pcicfg_reg = 0;
2594                 if (be_physfn(adapter))
2595                         db_reg = 4;
2596                 else
2597                         db_reg = 0;
2598         }
2599         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2600                                 pci_resource_len(adapter->pdev, db_reg));
2601         if (addr == NULL)
2602                 goto pci_map_err;
2603         adapter->db = addr;
2604
2605         if (be_physfn(adapter)) {
2606                 addr = ioremap_nocache(
2607                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2608                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2609                 if (addr == NULL)
2610                         goto pci_map_err;
2611                 adapter->pcicfg = addr;
2612         } else
2613                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2614
2615         return 0;
2616 pci_map_err:
2617         be_unmap_pci_bars(adapter);
2618         return -ENOMEM;
2619 }
2620
2621
2622 static void be_ctrl_cleanup(struct be_adapter *adapter)
2623 {
2624         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2625
2626         be_unmap_pci_bars(adapter);
2627
2628         if (mem->va)
2629                 pci_free_consistent(adapter->pdev, mem->size,
2630                         mem->va, mem->dma);
2631
2632         mem = &adapter->mc_cmd_mem;
2633         if (mem->va)
2634                 pci_free_consistent(adapter->pdev, mem->size,
2635                         mem->va, mem->dma);
2636 }
2637
2638 static int be_ctrl_init(struct be_adapter *adapter)
2639 {
2640         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2641         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2642         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2643         int status;
2644
2645         status = be_map_pci_bars(adapter);
2646         if (status)
2647                 goto done;
2648
2649         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2650         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2651                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2652         if (!mbox_mem_alloc->va) {
2653                 status = -ENOMEM;
2654                 goto unmap_pci_bars;
2655         }
2656
2657         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2658         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2659         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2660         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2661
2662         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2663         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2664                         &mc_cmd_mem->dma);
2665         if (mc_cmd_mem->va == NULL) {
2666                 status = -ENOMEM;
2667                 goto free_mbox;
2668         }
2669         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2670
2671         spin_lock_init(&adapter->mbox_lock);
2672         spin_lock_init(&adapter->mcc_lock);
2673         spin_lock_init(&adapter->mcc_cq_lock);
2674
2675         init_completion(&adapter->flash_compl);
2676         pci_save_state(adapter->pdev);
2677         return 0;
2678
2679 free_mbox:
2680         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2681                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2682
2683 unmap_pci_bars:
2684         be_unmap_pci_bars(adapter);
2685
2686 done:
2687         return status;
2688 }
2689
2690 static void be_stats_cleanup(struct be_adapter *adapter)
2691 {
2692         struct be_dma_mem *cmd = &adapter->stats_cmd;
2693
2694         if (cmd->va)
2695                 pci_free_consistent(adapter->pdev, cmd->size,
2696                         cmd->va, cmd->dma);
2697 }
2698
2699 static int be_stats_init(struct be_adapter *adapter)
2700 {
2701         struct be_dma_mem *cmd = &adapter->stats_cmd;
2702
2703         cmd->size = sizeof(struct be_cmd_req_get_stats);
2704         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2705         if (cmd->va == NULL)
2706                 return -1;
2707         memset(cmd->va, 0, cmd->size);
2708         return 0;
2709 }
2710
2711 static void __devexit be_remove(struct pci_dev *pdev)
2712 {
2713         struct be_adapter *adapter = pci_get_drvdata(pdev);
2714
2715         if (!adapter)
2716                 return;
2717
2718         unregister_netdev(adapter->netdev);
2719
2720         be_clear(adapter);
2721
2722         be_stats_cleanup(adapter);
2723
2724         be_ctrl_cleanup(adapter);
2725
2726         be_sriov_disable(adapter);
2727
2728         be_msix_disable(adapter);
2729
2730         pci_set_drvdata(pdev, NULL);
2731         pci_release_regions(pdev);
2732         pci_disable_device(pdev);
2733
2734         free_netdev(adapter->netdev);
2735 }
2736
2737 static int be_get_config(struct be_adapter *adapter)
2738 {
2739         int status;
2740         u8 mac[ETH_ALEN];
2741
2742         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2743         if (status)
2744                 return status;
2745
2746         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2747                         &adapter->function_mode, &adapter->function_caps);
2748         if (status)
2749                 return status;
2750
2751         memset(mac, 0, ETH_ALEN);
2752
2753         if (be_physfn(adapter)) {
2754                 status = be_cmd_mac_addr_query(adapter, mac,
2755                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2756
2757                 if (status)
2758                         return status;
2759
2760                 if (!is_valid_ether_addr(mac))
2761                         return -EADDRNOTAVAIL;
2762
2763                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2764                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2765         }
2766
2767         if (adapter->function_mode & 0x400)
2768                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2769         else
2770                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2771
2772         return 0;
2773 }
2774
2775 static int __devinit be_probe(struct pci_dev *pdev,
2776                         const struct pci_device_id *pdev_id)
2777 {
2778         int status = 0;
2779         struct be_adapter *adapter;
2780         struct net_device *netdev;
2781
2782         status = pci_enable_device(pdev);
2783         if (status)
2784                 goto do_none;
2785
2786         status = pci_request_regions(pdev, DRV_NAME);
2787         if (status)
2788                 goto disable_dev;
2789         pci_set_master(pdev);
2790
2791         netdev = alloc_etherdev(sizeof(struct be_adapter));
2792         if (netdev == NULL) {
2793                 status = -ENOMEM;
2794                 goto rel_reg;
2795         }
2796         adapter = netdev_priv(netdev);
2797
2798         switch (pdev->device) {
2799         case BE_DEVICE_ID1:
2800         case OC_DEVICE_ID1:
2801                 adapter->generation = BE_GEN2;
2802                 break;
2803         case BE_DEVICE_ID2:
2804         case OC_DEVICE_ID2:
2805                 adapter->generation = BE_GEN3;
2806                 break;
2807         default:
2808                 adapter->generation = 0;
2809         }
2810
2811         adapter->pdev = pdev;
2812         pci_set_drvdata(pdev, adapter);
2813         adapter->netdev = netdev;
2814         SET_NETDEV_DEV(netdev, &pdev->dev);
2815
2816         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2817         if (!status) {
2818                 netdev->features |= NETIF_F_HIGHDMA;
2819         } else {
2820                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2821                 if (status) {
2822                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2823                         goto free_netdev;
2824                 }
2825         }
2826
2827         be_sriov_enable(adapter);
2828
2829         status = be_ctrl_init(adapter);
2830         if (status)
2831                 goto free_netdev;
2832
2833         /* sync up with fw's ready state */
2834         if (be_physfn(adapter)) {
2835                 status = be_cmd_POST(adapter);
2836                 if (status)
2837                         goto ctrl_clean;
2838         }
2839
2840         /* tell fw we're ready to fire cmds */
2841         status = be_cmd_fw_init(adapter);
2842         if (status)
2843                 goto ctrl_clean;
2844
2845         if (be_physfn(adapter)) {
2846                 status = be_cmd_reset_function(adapter);
2847                 if (status)
2848                         goto ctrl_clean;
2849         }
2850
2851         status = be_stats_init(adapter);
2852         if (status)
2853                 goto ctrl_clean;
2854
2855         status = be_get_config(adapter);
2856         if (status)
2857                 goto stats_clean;
2858
2859         be_msix_enable(adapter);
2860
2861         INIT_DELAYED_WORK(&adapter->work, be_worker);
2862
2863         status = be_setup(adapter);
2864         if (status)
2865                 goto msix_disable;
2866
2867         be_netdev_init(netdev);
2868         status = register_netdev(netdev);
2869         if (status != 0)
2870                 goto unsetup;
2871
2872         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2873         return 0;
2874
2875 unsetup:
2876         be_clear(adapter);
2877 msix_disable:
2878         be_msix_disable(adapter);
2879 stats_clean:
2880         be_stats_cleanup(adapter);
2881 ctrl_clean:
2882         be_ctrl_cleanup(adapter);
2883 free_netdev:
2884         be_sriov_disable(adapter);
2885         free_netdev(adapter->netdev);
2886         pci_set_drvdata(pdev, NULL);
2887 rel_reg:
2888         pci_release_regions(pdev);
2889 disable_dev:
2890         pci_disable_device(pdev);
2891 do_none:
2892         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2893         return status;
2894 }
2895
2896 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2897 {
2898         struct be_adapter *adapter = pci_get_drvdata(pdev);
2899         struct net_device *netdev =  adapter->netdev;
2900
2901         if (adapter->wol)
2902                 be_setup_wol(adapter, true);
2903
2904         netif_device_detach(netdev);
2905         if (netif_running(netdev)) {
2906                 rtnl_lock();
2907                 be_close(netdev);
2908                 rtnl_unlock();
2909         }
2910         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2911         be_clear(adapter);
2912
2913         pci_save_state(pdev);
2914         pci_disable_device(pdev);
2915         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2916         return 0;
2917 }
2918
2919 static int be_resume(struct pci_dev *pdev)
2920 {
2921         int status = 0;
2922         struct be_adapter *adapter = pci_get_drvdata(pdev);
2923         struct net_device *netdev =  adapter->netdev;
2924
2925         netif_device_detach(netdev);
2926
2927         status = pci_enable_device(pdev);
2928         if (status)
2929                 return status;
2930
2931         pci_set_power_state(pdev, 0);
2932         pci_restore_state(pdev);
2933
2934         /* tell fw we're ready to fire cmds */
2935         status = be_cmd_fw_init(adapter);
2936         if (status)
2937                 return status;
2938
2939         be_setup(adapter);
2940         if (netif_running(netdev)) {
2941                 rtnl_lock();
2942                 be_open(netdev);
2943                 rtnl_unlock();
2944         }
2945         netif_device_attach(netdev);
2946
2947         if (adapter->wol)
2948                 be_setup_wol(adapter, false);
2949         return 0;
2950 }
2951
2952 /*
2953  * An FLR will stop BE from DMAing any data.
2954  */
2955 static void be_shutdown(struct pci_dev *pdev)
2956 {
2957         struct be_adapter *adapter = pci_get_drvdata(pdev);
2958         struct net_device *netdev =  adapter->netdev;
2959
2960         netif_device_detach(netdev);
2961
2962         be_cmd_reset_function(adapter);
2963
2964         if (adapter->wol)
2965                 be_setup_wol(adapter, true);
2966
2967         pci_disable_device(pdev);
2968 }
2969
2970 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2971                                 pci_channel_state_t state)
2972 {
2973         struct be_adapter *adapter = pci_get_drvdata(pdev);
2974         struct net_device *netdev =  adapter->netdev;
2975
2976         dev_err(&adapter->pdev->dev, "EEH error detected\n");
2977
2978         adapter->eeh_err = true;
2979
2980         netif_device_detach(netdev);
2981
2982         if (netif_running(netdev)) {
2983                 rtnl_lock();
2984                 be_close(netdev);
2985                 rtnl_unlock();
2986         }
2987         be_clear(adapter);
2988
2989         if (state == pci_channel_io_perm_failure)
2990                 return PCI_ERS_RESULT_DISCONNECT;
2991
2992         pci_disable_device(pdev);
2993
2994         return PCI_ERS_RESULT_NEED_RESET;
2995 }
2996
2997 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2998 {
2999         struct be_adapter *adapter = pci_get_drvdata(pdev);
3000         int status;
3001
3002         dev_info(&adapter->pdev->dev, "EEH reset\n");
3003         adapter->eeh_err = false;
3004
3005         status = pci_enable_device(pdev);
3006         if (status)
3007                 return PCI_ERS_RESULT_DISCONNECT;
3008
3009         pci_set_master(pdev);
3010         pci_set_power_state(pdev, 0);
3011         pci_restore_state(pdev);
3012
3013         /* Check if card is ok and fw is ready */
3014         status = be_cmd_POST(adapter);
3015         if (status)
3016                 return PCI_ERS_RESULT_DISCONNECT;
3017
3018         return PCI_ERS_RESULT_RECOVERED;
3019 }
3020
3021 static void be_eeh_resume(struct pci_dev *pdev)
3022 {
3023         int status = 0;
3024         struct be_adapter *adapter = pci_get_drvdata(pdev);
3025         struct net_device *netdev =  adapter->netdev;
3026
3027         dev_info(&adapter->pdev->dev, "EEH resume\n");
3028
3029         pci_save_state(pdev);
3030
3031         /* tell fw we're ready to fire cmds */
3032         status = be_cmd_fw_init(adapter);
3033         if (status)
3034                 goto err;
3035
3036         status = be_setup(adapter);
3037         if (status)
3038                 goto err;
3039
3040         if (netif_running(netdev)) {
3041                 status = be_open(netdev);
3042                 if (status)
3043                         goto err;
3044         }
3045         netif_device_attach(netdev);
3046         return;
3047 err:
3048         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3049 }
3050
3051 static struct pci_error_handlers be_eeh_handlers = {
3052         .error_detected = be_eeh_err_detected,
3053         .slot_reset = be_eeh_reset,
3054         .resume = be_eeh_resume,
3055 };
3056
3057 static struct pci_driver be_driver = {
3058         .name = DRV_NAME,
3059         .id_table = be_dev_ids,
3060         .probe = be_probe,
3061         .remove = be_remove,
3062         .suspend = be_suspend,
3063         .resume = be_resume,
3064         .shutdown = be_shutdown,
3065         .err_handler = &be_eeh_handlers
3066 };
3067
3068 static int __init be_init_module(void)
3069 {
3070         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3071             rx_frag_size != 2048) {
3072                 printk(KERN_WARNING DRV_NAME
3073                         " : Module param rx_frag_size must be 2048/4096/8192."
3074                         " Using 2048\n");
3075                 rx_frag_size = 2048;
3076         }
3077
3078         if (num_vfs > 32) {
3079                 printk(KERN_WARNING DRV_NAME
3080                         " : Module param num_vfs must not be greater than 32."
3081                         "Using 32\n");
3082                 num_vfs = 32;
3083         }
3084
3085         return pci_register_driver(&be_driver);
3086 }
3087 module_init(be_init_module);
3088
3089 static void __exit be_exit_module(void)
3090 {
3091         pci_unregister_driver(&be_driver);
3092 }
3093 module_exit(be_exit_module);