Merge branch 'stable/bug-fixes-for-rc5' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscuous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 struct be_rx_compl_info *rxcp)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += rxcp->num_rcvd;
860         stats->rx_bytes += rxcp->pkt_size;
861         stats->rx_pkts++;
862         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864         if (rxcp->err)
865                 stats->rxcp_err++;
866 }
867
868 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
869 {
870         /* L4 checksum is not reliable for non TCP/UDP packets.
871          * Also ignore ipcksm for ipv6 pkts */
872         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
873                                 (rxcp->ip_csum || rxcp->ipv6);
874 }
875
876 static struct be_rx_page_info *
877 get_rx_page_info(struct be_adapter *adapter,
878                 struct be_rx_obj *rxo,
879                 u16 frag_idx)
880 {
881         struct be_rx_page_info *rx_page_info;
882         struct be_queue_info *rxq = &rxo->q;
883
884         rx_page_info = &rxo->page_info_tbl[frag_idx];
885         BUG_ON(!rx_page_info->page);
886
887         if (rx_page_info->last_page_user) {
888                 dma_unmap_page(&adapter->pdev->dev,
889                                dma_unmap_addr(rx_page_info, bus),
890                                adapter->big_page_size, DMA_FROM_DEVICE);
891                 rx_page_info->last_page_user = false;
892         }
893
894         atomic_dec(&rxq->used);
895         return rx_page_info;
896 }
897
898 /* Throwaway the data in the Rx completion */
899 static void be_rx_compl_discard(struct be_adapter *adapter,
900                 struct be_rx_obj *rxo,
901                 struct be_rx_compl_info *rxcp)
902 {
903         struct be_queue_info *rxq = &rxo->q;
904         struct be_rx_page_info *page_info;
905         u16 i, num_rcvd = rxcp->num_rcvd;
906
907         for (i = 0; i < num_rcvd; i++) {
908                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
909                 put_page(page_info->page);
910                 memset(page_info, 0, sizeof(*page_info));
911                 index_inc(&rxcp->rxq_idx, rxq->len);
912         }
913 }
914
915 /*
916  * skb_fill_rx_data forms a complete skb for an ether frame
917  * indicated by rxcp.
918  */
919 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
920                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
921 {
922         struct be_queue_info *rxq = &rxo->q;
923         struct be_rx_page_info *page_info;
924         u16 i, j;
925         u16 hdr_len, curr_frag_len, remaining;
926         u8 *start;
927
928         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
929         start = page_address(page_info->page) + page_info->page_offset;
930         prefetch(start);
931
932         /* Copy data in the first descriptor of this completion */
933         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
934
935         /* Copy the header portion into skb_data */
936         hdr_len = min(BE_HDR_LEN, curr_frag_len);
937         memcpy(skb->data, start, hdr_len);
938         skb->len = curr_frag_len;
939         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
940                 /* Complete packet has now been moved to data */
941                 put_page(page_info->page);
942                 skb->data_len = 0;
943                 skb->tail += curr_frag_len;
944         } else {
945                 skb_shinfo(skb)->nr_frags = 1;
946                 skb_shinfo(skb)->frags[0].page = page_info->page;
947                 skb_shinfo(skb)->frags[0].page_offset =
948                                         page_info->page_offset + hdr_len;
949                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
950                 skb->data_len = curr_frag_len - hdr_len;
951                 skb->tail += hdr_len;
952         }
953         page_info->page = NULL;
954
955         if (rxcp->pkt_size <= rx_frag_size) {
956                 BUG_ON(rxcp->num_rcvd != 1);
957                 return;
958         }
959
960         /* More frags present for this completion */
961         index_inc(&rxcp->rxq_idx, rxq->len);
962         remaining = rxcp->pkt_size - curr_frag_len;
963         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
964                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
965                 curr_frag_len = min(remaining, rx_frag_size);
966
967                 /* Coalesce all frags from the same physical page in one slot */
968                 if (page_info->page_offset == 0) {
969                         /* Fresh page */
970                         j++;
971                         skb_shinfo(skb)->frags[j].page = page_info->page;
972                         skb_shinfo(skb)->frags[j].page_offset =
973                                                         page_info->page_offset;
974                         skb_shinfo(skb)->frags[j].size = 0;
975                         skb_shinfo(skb)->nr_frags++;
976                 } else {
977                         put_page(page_info->page);
978                 }
979
980                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
981                 skb->len += curr_frag_len;
982                 skb->data_len += curr_frag_len;
983
984                 remaining -= curr_frag_len;
985                 index_inc(&rxcp->rxq_idx, rxq->len);
986                 page_info->page = NULL;
987         }
988         BUG_ON(j > MAX_SKB_FRAGS);
989 }
990
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993                         struct be_rx_obj *rxo,
994                         struct be_rx_compl_info *rxcp)
995 {
996         struct sk_buff *skb;
997
998         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
999         if (unlikely(!skb)) {
1000                 if (net_ratelimit())
1001                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1002                 be_rx_compl_discard(adapter, rxo, rxcp);
1003                 return;
1004         }
1005
1006         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007
1008         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1009                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010         else
1011                 skb_checksum_none_assert(skb);
1012
1013         skb->truesize = skb->len + sizeof(struct sk_buff);
1014         skb->protocol = eth_type_trans(skb, adapter->netdev);
1015
1016         if (unlikely(rxcp->vlanf)) {
1017                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1018                         kfree_skb(skb);
1019                         return;
1020                 }
1021                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1022         } else {
1023                 netif_receive_skb(skb);
1024         }
1025 }
1026
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029                 struct be_rx_obj *rxo,
1030                 struct be_rx_compl_info *rxcp)
1031 {
1032         struct be_rx_page_info *page_info;
1033         struct sk_buff *skb = NULL;
1034         struct be_queue_info *rxq = &rxo->q;
1035         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1036         u16 remaining, curr_frag_len;
1037         u16 i, j;
1038
1039         skb = napi_get_frags(&eq_obj->napi);
1040         if (!skb) {
1041                 be_rx_compl_discard(adapter, rxo, rxcp);
1042                 return;
1043         }
1044
1045         remaining = rxcp->pkt_size;
1046         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049                 curr_frag_len = min(remaining, rx_frag_size);
1050
1051                 /* Coalesce all frags from the same physical page in one slot */
1052                 if (i == 0 || page_info->page_offset == 0) {
1053                         /* First frag or Fresh page */
1054                         j++;
1055                         skb_shinfo(skb)->frags[j].page = page_info->page;
1056                         skb_shinfo(skb)->frags[j].page_offset =
1057                                                         page_info->page_offset;
1058                         skb_shinfo(skb)->frags[j].size = 0;
1059                 } else {
1060                         put_page(page_info->page);
1061                 }
1062                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064                 remaining -= curr_frag_len;
1065                 index_inc(&rxcp->rxq_idx, rxq->len);
1066                 memset(page_info, 0, sizeof(*page_info));
1067         }
1068         BUG_ON(j > MAX_SKB_FRAGS);
1069
1070         skb_shinfo(skb)->nr_frags = j + 1;
1071         skb->len = rxcp->pkt_size;
1072         skb->data_len = rxcp->pkt_size;
1073         skb->truesize += rxcp->pkt_size;
1074         skb->ip_summed = CHECKSUM_UNNECESSARY;
1075
1076         if (likely(!rxcp->vlanf))
1077                 napi_gro_frags(&eq_obj->napi);
1078         else
1079                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080 }
1081
1082 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083                                 struct be_eth_rx_compl *compl,
1084                                 struct be_rx_compl_info *rxcp)
1085 {
1086         rxcp->pkt_size =
1087                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1092         rxcp->ip_csum =
1093                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094         rxcp->l4_csum =
1095                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096         rxcp->ipv6 =
1097                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098         rxcp->rxq_idx =
1099                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100         rxcp->num_rcvd =
1101                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102         rxcp->pkt_type =
1103                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1105         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
1106 }
1107
1108 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1109                                 struct be_eth_rx_compl *compl,
1110                                 struct be_rx_compl_info *rxcp)
1111 {
1112         rxcp->pkt_size =
1113                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1114         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1115         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1116         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1117         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1118         rxcp->ip_csum =
1119                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1120         rxcp->l4_csum =
1121                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1122         rxcp->ipv6 =
1123                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1124         rxcp->rxq_idx =
1125                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1126         rxcp->num_rcvd =
1127                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1128         rxcp->pkt_type =
1129                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1130         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1131         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
1132 }
1133
1134 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1135 {
1136         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1137         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1138         struct be_adapter *adapter = rxo->adapter;
1139
1140         /* For checking the valid bit it is Ok to use either definition as the
1141          * valid bit is at the same position in both v0 and v1 Rx compl */
1142         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1143                 return NULL;
1144
1145         rmb();
1146         be_dws_le_to_cpu(compl, sizeof(*compl));
1147
1148         if (adapter->be3_native)
1149                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1150         else
1151                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1152
1153         /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1154         if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1155                 rxcp->vlanf = 0;
1156
1157         if (!lancer_chip(adapter))
1158                 rxcp->vid = swab16(rxcp->vid);
1159
1160         if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
1161                 rxcp->vlanf = 0;
1162
1163         /* As the compl has been parsed, reset it; we wont touch it again */
1164         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1165
1166         queue_tail_inc(&rxo->cq);
1167         return rxcp;
1168 }
1169
1170 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1171 {
1172         u32 order = get_order(size);
1173
1174         if (order > 0)
1175                 gfp |= __GFP_COMP;
1176         return  alloc_pages(gfp, order);
1177 }
1178
1179 /*
1180  * Allocate a page, split it to fragments of size rx_frag_size and post as
1181  * receive buffers to BE
1182  */
1183 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1184 {
1185         struct be_adapter *adapter = rxo->adapter;
1186         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1187         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1188         struct be_queue_info *rxq = &rxo->q;
1189         struct page *pagep = NULL;
1190         struct be_eth_rx_d *rxd;
1191         u64 page_dmaaddr = 0, frag_dmaaddr;
1192         u32 posted, page_offset = 0;
1193
1194         page_info = &rxo->page_info_tbl[rxq->head];
1195         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1196                 if (!pagep) {
1197                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1198                         if (unlikely(!pagep)) {
1199                                 rxo->stats.rx_post_fail++;
1200                                 break;
1201                         }
1202                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1203                                                     0, adapter->big_page_size,
1204                                                     DMA_FROM_DEVICE);
1205                         page_info->page_offset = 0;
1206                 } else {
1207                         get_page(pagep);
1208                         page_info->page_offset = page_offset + rx_frag_size;
1209                 }
1210                 page_offset = page_info->page_offset;
1211                 page_info->page = pagep;
1212                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1213                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1214
1215                 rxd = queue_head_node(rxq);
1216                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1217                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1218
1219                 /* Any space left in the current big page for another frag? */
1220                 if ((page_offset + rx_frag_size + rx_frag_size) >
1221                                         adapter->big_page_size) {
1222                         pagep = NULL;
1223                         page_info->last_page_user = true;
1224                 }
1225
1226                 prev_page_info = page_info;
1227                 queue_head_inc(rxq);
1228                 page_info = &page_info_tbl[rxq->head];
1229         }
1230         if (pagep)
1231                 prev_page_info->last_page_user = true;
1232
1233         if (posted) {
1234                 atomic_add(posted, &rxq->used);
1235                 be_rxq_notify(adapter, rxq->id, posted);
1236         } else if (atomic_read(&rxq->used) == 0) {
1237                 /* Let be_worker replenish when memory is available */
1238                 rxo->rx_post_starved = true;
1239         }
1240 }
1241
1242 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1243 {
1244         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1245
1246         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1247                 return NULL;
1248
1249         rmb();
1250         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1251
1252         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1253
1254         queue_tail_inc(tx_cq);
1255         return txcp;
1256 }
1257
1258 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1259 {
1260         struct be_queue_info *txq = &adapter->tx_obj.q;
1261         struct be_eth_wrb *wrb;
1262         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1263         struct sk_buff *sent_skb;
1264         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1265         bool unmap_skb_hdr = true;
1266
1267         sent_skb = sent_skbs[txq->tail];
1268         BUG_ON(!sent_skb);
1269         sent_skbs[txq->tail] = NULL;
1270
1271         /* skip header wrb */
1272         queue_tail_inc(txq);
1273
1274         do {
1275                 cur_index = txq->tail;
1276                 wrb = queue_tail_node(txq);
1277                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1278                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1279                 unmap_skb_hdr = false;
1280
1281                 num_wrbs++;
1282                 queue_tail_inc(txq);
1283         } while (cur_index != last_index);
1284
1285         atomic_sub(num_wrbs, &txq->used);
1286
1287         kfree_skb(sent_skb);
1288 }
1289
1290 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1291 {
1292         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1293
1294         if (!eqe->evt)
1295                 return NULL;
1296
1297         rmb();
1298         eqe->evt = le32_to_cpu(eqe->evt);
1299         queue_tail_inc(&eq_obj->q);
1300         return eqe;
1301 }
1302
1303 static int event_handle(struct be_adapter *adapter,
1304                         struct be_eq_obj *eq_obj)
1305 {
1306         struct be_eq_entry *eqe;
1307         u16 num = 0;
1308
1309         while ((eqe = event_get(eq_obj)) != NULL) {
1310                 eqe->evt = 0;
1311                 num++;
1312         }
1313
1314         /* Deal with any spurious interrupts that come
1315          * without events
1316          */
1317         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1318         if (num)
1319                 napi_schedule(&eq_obj->napi);
1320
1321         return num;
1322 }
1323
1324 /* Just read and notify events without processing them.
1325  * Used at the time of destroying event queues */
1326 static void be_eq_clean(struct be_adapter *adapter,
1327                         struct be_eq_obj *eq_obj)
1328 {
1329         struct be_eq_entry *eqe;
1330         u16 num = 0;
1331
1332         while ((eqe = event_get(eq_obj)) != NULL) {
1333                 eqe->evt = 0;
1334                 num++;
1335         }
1336
1337         if (num)
1338                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1339 }
1340
1341 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1342 {
1343         struct be_rx_page_info *page_info;
1344         struct be_queue_info *rxq = &rxo->q;
1345         struct be_queue_info *rx_cq = &rxo->cq;
1346         struct be_rx_compl_info *rxcp;
1347         u16 tail;
1348
1349         /* First cleanup pending rx completions */
1350         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1351                 be_rx_compl_discard(adapter, rxo, rxcp);
1352                 be_cq_notify(adapter, rx_cq->id, false, 1);
1353         }
1354
1355         /* Then free posted rx buffer that were not used */
1356         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1357         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1358                 page_info = get_rx_page_info(adapter, rxo, tail);
1359                 put_page(page_info->page);
1360                 memset(page_info, 0, sizeof(*page_info));
1361         }
1362         BUG_ON(atomic_read(&rxq->used));
1363 }
1364
1365 static void be_tx_compl_clean(struct be_adapter *adapter)
1366 {
1367         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1368         struct be_queue_info *txq = &adapter->tx_obj.q;
1369         struct be_eth_tx_compl *txcp;
1370         u16 end_idx, cmpl = 0, timeo = 0;
1371         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1372         struct sk_buff *sent_skb;
1373         bool dummy_wrb;
1374
1375         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1376         do {
1377                 while ((txcp = be_tx_compl_get(tx_cq))) {
1378                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1379                                         wrb_index, txcp);
1380                         be_tx_compl_process(adapter, end_idx);
1381                         cmpl++;
1382                 }
1383                 if (cmpl) {
1384                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1385                         cmpl = 0;
1386                 }
1387
1388                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1389                         break;
1390
1391                 mdelay(1);
1392         } while (true);
1393
1394         if (atomic_read(&txq->used))
1395                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1396                         atomic_read(&txq->used));
1397
1398         /* free posted tx for which compls will never arrive */
1399         while (atomic_read(&txq->used)) {
1400                 sent_skb = sent_skbs[txq->tail];
1401                 end_idx = txq->tail;
1402                 index_adv(&end_idx,
1403                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404                         txq->len);
1405                 be_tx_compl_process(adapter, end_idx);
1406         }
1407 }
1408
1409 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1410 {
1411         struct be_queue_info *q;
1412
1413         q = &adapter->mcc_obj.q;
1414         if (q->created)
1415                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1416         be_queue_free(adapter, q);
1417
1418         q = &adapter->mcc_obj.cq;
1419         if (q->created)
1420                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1421         be_queue_free(adapter, q);
1422 }
1423
1424 /* Must be called only after TX qs are created as MCC shares TX EQ */
1425 static int be_mcc_queues_create(struct be_adapter *adapter)
1426 {
1427         struct be_queue_info *q, *cq;
1428
1429         /* Alloc MCC compl queue */
1430         cq = &adapter->mcc_obj.cq;
1431         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1432                         sizeof(struct be_mcc_compl)))
1433                 goto err;
1434
1435         /* Ask BE to create MCC compl queue; share TX's eq */
1436         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1437                 goto mcc_cq_free;
1438
1439         /* Alloc MCC queue */
1440         q = &adapter->mcc_obj.q;
1441         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1442                 goto mcc_cq_destroy;
1443
1444         /* Ask BE to create MCC queue */
1445         if (be_cmd_mccq_create(adapter, q, cq))
1446                 goto mcc_q_free;
1447
1448         return 0;
1449
1450 mcc_q_free:
1451         be_queue_free(adapter, q);
1452 mcc_cq_destroy:
1453         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1454 mcc_cq_free:
1455         be_queue_free(adapter, cq);
1456 err:
1457         return -1;
1458 }
1459
1460 static void be_tx_queues_destroy(struct be_adapter *adapter)
1461 {
1462         struct be_queue_info *q;
1463
1464         q = &adapter->tx_obj.q;
1465         if (q->created)
1466                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1467         be_queue_free(adapter, q);
1468
1469         q = &adapter->tx_obj.cq;
1470         if (q->created)
1471                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1472         be_queue_free(adapter, q);
1473
1474         /* Clear any residual events */
1475         be_eq_clean(adapter, &adapter->tx_eq);
1476
1477         q = &adapter->tx_eq.q;
1478         if (q->created)
1479                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1480         be_queue_free(adapter, q);
1481 }
1482
1483 static int be_tx_queues_create(struct be_adapter *adapter)
1484 {
1485         struct be_queue_info *eq, *q, *cq;
1486
1487         adapter->tx_eq.max_eqd = 0;
1488         adapter->tx_eq.min_eqd = 0;
1489         adapter->tx_eq.cur_eqd = 96;
1490         adapter->tx_eq.enable_aic = false;
1491         /* Alloc Tx Event queue */
1492         eq = &adapter->tx_eq.q;
1493         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494                 return -1;
1495
1496         /* Ask BE to create Tx Event queue */
1497         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498                 goto tx_eq_free;
1499
1500         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1501
1502
1503         /* Alloc TX eth compl queue */
1504         cq = &adapter->tx_obj.cq;
1505         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1506                         sizeof(struct be_eth_tx_compl)))
1507                 goto tx_eq_destroy;
1508
1509         /* Ask BE to create Tx eth compl queue */
1510         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1511                 goto tx_cq_free;
1512
1513         /* Alloc TX eth queue */
1514         q = &adapter->tx_obj.q;
1515         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516                 goto tx_cq_destroy;
1517
1518         /* Ask BE to create Tx eth queue */
1519         if (be_cmd_txq_create(adapter, q, cq))
1520                 goto tx_q_free;
1521         return 0;
1522
1523 tx_q_free:
1524         be_queue_free(adapter, q);
1525 tx_cq_destroy:
1526         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1527 tx_cq_free:
1528         be_queue_free(adapter, cq);
1529 tx_eq_destroy:
1530         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1531 tx_eq_free:
1532         be_queue_free(adapter, eq);
1533         return -1;
1534 }
1535
1536 static void be_rx_queues_destroy(struct be_adapter *adapter)
1537 {
1538         struct be_queue_info *q;
1539         struct be_rx_obj *rxo;
1540         int i;
1541
1542         for_all_rx_queues(adapter, rxo, i) {
1543                 q = &rxo->q;
1544                 if (q->created) {
1545                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1546                         /* After the rxq is invalidated, wait for a grace time
1547                          * of 1ms for all dma to end and the flush compl to
1548                          * arrive
1549                          */
1550                         mdelay(1);
1551                         be_rx_q_clean(adapter, rxo);
1552                 }
1553                 be_queue_free(adapter, q);
1554
1555                 q = &rxo->cq;
1556                 if (q->created)
1557                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1558                 be_queue_free(adapter, q);
1559
1560                 /* Clear any residual events */
1561                 q = &rxo->rx_eq.q;
1562                 if (q->created) {
1563                         be_eq_clean(adapter, &rxo->rx_eq);
1564                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1565                 }
1566                 be_queue_free(adapter, q);
1567         }
1568 }
1569
1570 static int be_rx_queues_create(struct be_adapter *adapter)
1571 {
1572         struct be_queue_info *eq, *q, *cq;
1573         struct be_rx_obj *rxo;
1574         int rc, i;
1575
1576         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1577         for_all_rx_queues(adapter, rxo, i) {
1578                 rxo->adapter = adapter;
1579                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580                 rxo->rx_eq.enable_aic = true;
1581
1582                 /* EQ */
1583                 eq = &rxo->rx_eq.q;
1584                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585                                         sizeof(struct be_eq_entry));
1586                 if (rc)
1587                         goto err;
1588
1589                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590                 if (rc)
1591                         goto err;
1592
1593                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1594
1595                 /* CQ */
1596                 cq = &rxo->cq;
1597                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598                                 sizeof(struct be_eth_rx_compl));
1599                 if (rc)
1600                         goto err;
1601
1602                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603                 if (rc)
1604                         goto err;
1605                 /* Rx Q */
1606                 q = &rxo->q;
1607                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608                                 sizeof(struct be_eth_rx_d));
1609                 if (rc)
1610                         goto err;
1611
1612                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615                 if (rc)
1616                         goto err;
1617         }
1618
1619         if (be_multi_rxq(adapter)) {
1620                 u8 rsstable[MAX_RSS_QS];
1621
1622                 for_all_rss_queues(adapter, rxo, i)
1623                         rsstable[i] = rxo->rss_id;
1624
1625                 rc = be_cmd_rss_config(adapter, rsstable,
1626                         adapter->num_rx_qs - 1);
1627                 if (rc)
1628                         goto err;
1629         }
1630
1631         return 0;
1632 err:
1633         be_rx_queues_destroy(adapter);
1634         return -1;
1635 }
1636
1637 static bool event_peek(struct be_eq_obj *eq_obj)
1638 {
1639         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640         if (!eqe->evt)
1641                 return false;
1642         else
1643                 return true;
1644 }
1645
1646 static irqreturn_t be_intx(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649         struct be_rx_obj *rxo;
1650         int isr, i, tx = 0 , rx = 0;
1651
1652         if (lancer_chip(adapter)) {
1653                 if (event_peek(&adapter->tx_eq))
1654                         tx = event_handle(adapter, &adapter->tx_eq);
1655                 for_all_rx_queues(adapter, rxo, i) {
1656                         if (event_peek(&rxo->rx_eq))
1657                                 rx |= event_handle(adapter, &rxo->rx_eq);
1658                 }
1659
1660                 if (!(tx || rx))
1661                         return IRQ_NONE;
1662
1663         } else {
1664                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666                 if (!isr)
1667                         return IRQ_NONE;
1668
1669                 if ((1 << adapter->tx_eq.eq_idx & isr))
1670                         event_handle(adapter, &adapter->tx_eq);
1671
1672                 for_all_rx_queues(adapter, rxo, i) {
1673                         if ((1 << rxo->rx_eq.eq_idx & isr))
1674                                 event_handle(adapter, &rxo->rx_eq);
1675                 }
1676         }
1677
1678         return IRQ_HANDLED;
1679 }
1680
1681 static irqreturn_t be_msix_rx(int irq, void *dev)
1682 {
1683         struct be_rx_obj *rxo = dev;
1684         struct be_adapter *adapter = rxo->adapter;
1685
1686         event_handle(adapter, &rxo->rx_eq);
1687
1688         return IRQ_HANDLED;
1689 }
1690
1691 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1692 {
1693         struct be_adapter *adapter = dev;
1694
1695         event_handle(adapter, &adapter->tx_eq);
1696
1697         return IRQ_HANDLED;
1698 }
1699
1700 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1701 {
1702         return (rxcp->tcpf && !rxcp->err) ? true : false;
1703 }
1704
1705 static int be_poll_rx(struct napi_struct *napi, int budget)
1706 {
1707         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1708         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1709         struct be_adapter *adapter = rxo->adapter;
1710         struct be_queue_info *rx_cq = &rxo->cq;
1711         struct be_rx_compl_info *rxcp;
1712         u32 work_done;
1713
1714         rxo->stats.rx_polls++;
1715         for (work_done = 0; work_done < budget; work_done++) {
1716                 rxcp = be_rx_compl_get(rxo);
1717                 if (!rxcp)
1718                         break;
1719
1720                 /* Ignore flush completions */
1721                 if (rxcp->num_rcvd) {
1722                         if (do_gro(rxcp))
1723                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1724                         else
1725                                 be_rx_compl_process(adapter, rxo, rxcp);
1726                 }
1727                 be_rx_stats_update(rxo, rxcp);
1728         }
1729
1730         /* Refill the queue */
1731         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1732                 be_post_rx_frags(rxo, GFP_ATOMIC);
1733
1734         /* All consumed */
1735         if (work_done < budget) {
1736                 napi_complete(napi);
1737                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1738         } else {
1739                 /* More to be consumed; continue with interrupts disabled */
1740                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1741         }
1742         return work_done;
1743 }
1744
1745 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1746  * For TX/MCC we don't honour budget; consume everything
1747  */
1748 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1749 {
1750         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1751         struct be_adapter *adapter =
1752                 container_of(tx_eq, struct be_adapter, tx_eq);
1753         struct be_queue_info *txq = &adapter->tx_obj.q;
1754         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1755         struct be_eth_tx_compl *txcp;
1756         int tx_compl = 0, mcc_compl, status = 0;
1757         u16 end_idx;
1758
1759         while ((txcp = be_tx_compl_get(tx_cq))) {
1760                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1761                                 wrb_index, txcp);
1762                 be_tx_compl_process(adapter, end_idx);
1763                 tx_compl++;
1764         }
1765
1766         mcc_compl = be_process_mcc(adapter, &status);
1767
1768         napi_complete(napi);
1769
1770         if (mcc_compl) {
1771                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1772                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1773         }
1774
1775         if (tx_compl) {
1776                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1777
1778                 /* As Tx wrbs have been freed up, wake up netdev queue if
1779                  * it was stopped due to lack of tx wrbs.
1780                  */
1781                 if (netif_queue_stopped(adapter->netdev) &&
1782                         atomic_read(&txq->used) < txq->len / 2) {
1783                         netif_wake_queue(adapter->netdev);
1784                 }
1785
1786                 tx_stats(adapter)->be_tx_events++;
1787                 tx_stats(adapter)->be_tx_compl += tx_compl;
1788         }
1789
1790         return 1;
1791 }
1792
1793 void be_detect_dump_ue(struct be_adapter *adapter)
1794 {
1795         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1796         u32 i;
1797
1798         pci_read_config_dword(adapter->pdev,
1799                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1800         pci_read_config_dword(adapter->pdev,
1801                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1802         pci_read_config_dword(adapter->pdev,
1803                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1804         pci_read_config_dword(adapter->pdev,
1805                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1806
1807         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1808         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1809
1810         if (ue_status_lo || ue_status_hi) {
1811                 adapter->ue_detected = true;
1812                 adapter->eeh_err = true;
1813                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1814         }
1815
1816         if (ue_status_lo) {
1817                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1818                         if (ue_status_lo & 1)
1819                                 dev_err(&adapter->pdev->dev,
1820                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1821                 }
1822         }
1823         if (ue_status_hi) {
1824                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1825                         if (ue_status_hi & 1)
1826                                 dev_err(&adapter->pdev->dev,
1827                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1828                 }
1829         }
1830
1831 }
1832
1833 static void be_worker(struct work_struct *work)
1834 {
1835         struct be_adapter *adapter =
1836                 container_of(work, struct be_adapter, work.work);
1837         struct be_rx_obj *rxo;
1838         int i;
1839
1840         /* when interrupts are not yet enabled, just reap any pending
1841         * mcc completions */
1842         if (!netif_running(adapter->netdev)) {
1843                 int mcc_compl, status = 0;
1844
1845                 mcc_compl = be_process_mcc(adapter, &status);
1846
1847                 if (mcc_compl) {
1848                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1849                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1850                 }
1851
1852                 if (!adapter->ue_detected && !lancer_chip(adapter))
1853                         be_detect_dump_ue(adapter);
1854
1855                 goto reschedule;
1856         }
1857
1858         if (!adapter->stats_cmd_sent)
1859                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1860
1861         be_tx_rate_update(adapter);
1862
1863         for_all_rx_queues(adapter, rxo, i) {
1864                 be_rx_rate_update(rxo);
1865                 be_rx_eqd_update(adapter, rxo);
1866
1867                 if (rxo->rx_post_starved) {
1868                         rxo->rx_post_starved = false;
1869                         be_post_rx_frags(rxo, GFP_KERNEL);
1870                 }
1871         }
1872         if (!adapter->ue_detected && !lancer_chip(adapter))
1873                 be_detect_dump_ue(adapter);
1874
1875 reschedule:
1876         adapter->work_counter++;
1877         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1878 }
1879
1880 static void be_msix_disable(struct be_adapter *adapter)
1881 {
1882         if (adapter->msix_enabled) {
1883                 pci_disable_msix(adapter->pdev);
1884                 adapter->msix_enabled = false;
1885         }
1886 }
1887
1888 static int be_num_rxqs_get(struct be_adapter *adapter)
1889 {
1890         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1891                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1892                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1893         } else {
1894                 dev_warn(&adapter->pdev->dev,
1895                         "No support for multiple RX queues\n");
1896                 return 1;
1897         }
1898 }
1899
1900 static void be_msix_enable(struct be_adapter *adapter)
1901 {
1902 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1903         int i, status;
1904
1905         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1906
1907         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1908                 adapter->msix_entries[i].entry = i;
1909
1910         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1911                         adapter->num_rx_qs + 1);
1912         if (status == 0) {
1913                 goto done;
1914         } else if (status >= BE_MIN_MSIX_VECTORS) {
1915                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1916                                 status) == 0) {
1917                         adapter->num_rx_qs = status - 1;
1918                         dev_warn(&adapter->pdev->dev,
1919                                 "Could alloc only %d MSIx vectors. "
1920                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1921                         goto done;
1922                 }
1923         }
1924         return;
1925 done:
1926         adapter->msix_enabled = true;
1927 }
1928
1929 static void be_sriov_enable(struct be_adapter *adapter)
1930 {
1931         be_check_sriov_fn_type(adapter);
1932 #ifdef CONFIG_PCI_IOV
1933         if (be_physfn(adapter) && num_vfs) {
1934                 int status;
1935
1936                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1937                 adapter->sriov_enabled = status ? false : true;
1938         }
1939 #endif
1940 }
1941
1942 static void be_sriov_disable(struct be_adapter *adapter)
1943 {
1944 #ifdef CONFIG_PCI_IOV
1945         if (adapter->sriov_enabled) {
1946                 pci_disable_sriov(adapter->pdev);
1947                 adapter->sriov_enabled = false;
1948         }
1949 #endif
1950 }
1951
1952 static inline int be_msix_vec_get(struct be_adapter *adapter,
1953                                         struct be_eq_obj *eq_obj)
1954 {
1955         return adapter->msix_entries[eq_obj->eq_idx].vector;
1956 }
1957
1958 static int be_request_irq(struct be_adapter *adapter,
1959                 struct be_eq_obj *eq_obj,
1960                 void *handler, char *desc, void *context)
1961 {
1962         struct net_device *netdev = adapter->netdev;
1963         int vec;
1964
1965         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1966         vec = be_msix_vec_get(adapter, eq_obj);
1967         return request_irq(vec, handler, 0, eq_obj->desc, context);
1968 }
1969
1970 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1971                         void *context)
1972 {
1973         int vec = be_msix_vec_get(adapter, eq_obj);
1974         free_irq(vec, context);
1975 }
1976
1977 static int be_msix_register(struct be_adapter *adapter)
1978 {
1979         struct be_rx_obj *rxo;
1980         int status, i;
1981         char qname[10];
1982
1983         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1984                                 adapter);
1985         if (status)
1986                 goto err;
1987
1988         for_all_rx_queues(adapter, rxo, i) {
1989                 sprintf(qname, "rxq%d", i);
1990                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1991                                 qname, rxo);
1992                 if (status)
1993                         goto err_msix;
1994         }
1995
1996         return 0;
1997
1998 err_msix:
1999         be_free_irq(adapter, &adapter->tx_eq, adapter);
2000
2001         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2002                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2003
2004 err:
2005         dev_warn(&adapter->pdev->dev,
2006                 "MSIX Request IRQ failed - err %d\n", status);
2007         pci_disable_msix(adapter->pdev);
2008         adapter->msix_enabled = false;
2009         return status;
2010 }
2011
2012 static int be_irq_register(struct be_adapter *adapter)
2013 {
2014         struct net_device *netdev = adapter->netdev;
2015         int status;
2016
2017         if (adapter->msix_enabled) {
2018                 status = be_msix_register(adapter);
2019                 if (status == 0)
2020                         goto done;
2021                 /* INTx is not supported for VF */
2022                 if (!be_physfn(adapter))
2023                         return status;
2024         }
2025
2026         /* INTx */
2027         netdev->irq = adapter->pdev->irq;
2028         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2029                         adapter);
2030         if (status) {
2031                 dev_err(&adapter->pdev->dev,
2032                         "INTx request IRQ failed - err %d\n", status);
2033                 return status;
2034         }
2035 done:
2036         adapter->isr_registered = true;
2037         return 0;
2038 }
2039
2040 static void be_irq_unregister(struct be_adapter *adapter)
2041 {
2042         struct net_device *netdev = adapter->netdev;
2043         struct be_rx_obj *rxo;
2044         int i;
2045
2046         if (!adapter->isr_registered)
2047                 return;
2048
2049         /* INTx */
2050         if (!adapter->msix_enabled) {
2051                 free_irq(netdev->irq, adapter);
2052                 goto done;
2053         }
2054
2055         /* MSIx */
2056         be_free_irq(adapter, &adapter->tx_eq, adapter);
2057
2058         for_all_rx_queues(adapter, rxo, i)
2059                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2060
2061 done:
2062         adapter->isr_registered = false;
2063 }
2064
2065 static int be_close(struct net_device *netdev)
2066 {
2067         struct be_adapter *adapter = netdev_priv(netdev);
2068         struct be_rx_obj *rxo;
2069         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2070         int vec, i;
2071
2072         be_async_mcc_disable(adapter);
2073
2074         netif_carrier_off(netdev);
2075         adapter->link_up = false;
2076
2077         if (!lancer_chip(adapter))
2078                 be_intr_set(adapter, false);
2079
2080         for_all_rx_queues(adapter, rxo, i)
2081                 napi_disable(&rxo->rx_eq.napi);
2082
2083         napi_disable(&tx_eq->napi);
2084
2085         if (lancer_chip(adapter)) {
2086                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2087                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2088                 for_all_rx_queues(adapter, rxo, i)
2089                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2090         }
2091
2092         if (adapter->msix_enabled) {
2093                 vec = be_msix_vec_get(adapter, tx_eq);
2094                 synchronize_irq(vec);
2095
2096                 for_all_rx_queues(adapter, rxo, i) {
2097                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2098                         synchronize_irq(vec);
2099                 }
2100         } else {
2101                 synchronize_irq(netdev->irq);
2102         }
2103         be_irq_unregister(adapter);
2104
2105         /* Wait for all pending tx completions to arrive so that
2106          * all tx skbs are freed.
2107          */
2108         be_tx_compl_clean(adapter);
2109
2110         return 0;
2111 }
2112
2113 static int be_open(struct net_device *netdev)
2114 {
2115         struct be_adapter *adapter = netdev_priv(netdev);
2116         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2117         struct be_rx_obj *rxo;
2118         bool link_up;
2119         int status, i;
2120         u8 mac_speed;
2121         u16 link_speed;
2122
2123         for_all_rx_queues(adapter, rxo, i) {
2124                 be_post_rx_frags(rxo, GFP_KERNEL);
2125                 napi_enable(&rxo->rx_eq.napi);
2126         }
2127         napi_enable(&tx_eq->napi);
2128
2129         be_irq_register(adapter);
2130
2131         if (!lancer_chip(adapter))
2132                 be_intr_set(adapter, true);
2133
2134         /* The evt queues are created in unarmed state; arm them */
2135         for_all_rx_queues(adapter, rxo, i) {
2136                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2137                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2138         }
2139         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2140
2141         /* Now that interrupts are on we can process async mcc */
2142         be_async_mcc_enable(adapter);
2143
2144         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2145                         &link_speed);
2146         if (status)
2147                 goto err;
2148         be_link_status_update(adapter, link_up);
2149
2150         if (be_physfn(adapter)) {
2151                 status = be_vid_config(adapter, false, 0);
2152                 if (status)
2153                         goto err;
2154
2155                 status = be_cmd_set_flow_control(adapter,
2156                                 adapter->tx_fc, adapter->rx_fc);
2157                 if (status)
2158                         goto err;
2159         }
2160
2161         return 0;
2162 err:
2163         be_close(adapter->netdev);
2164         return -EIO;
2165 }
2166
2167 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2168 {
2169         struct be_dma_mem cmd;
2170         int status = 0;
2171         u8 mac[ETH_ALEN];
2172
2173         memset(mac, 0, ETH_ALEN);
2174
2175         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2176         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2177                                     GFP_KERNEL);
2178         if (cmd.va == NULL)
2179                 return -1;
2180         memset(cmd.va, 0, cmd.size);
2181
2182         if (enable) {
2183                 status = pci_write_config_dword(adapter->pdev,
2184                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2185                 if (status) {
2186                         dev_err(&adapter->pdev->dev,
2187                                 "Could not enable Wake-on-lan\n");
2188                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2189                                           cmd.dma);
2190                         return status;
2191                 }
2192                 status = be_cmd_enable_magic_wol(adapter,
2193                                 adapter->netdev->dev_addr, &cmd);
2194                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2195                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2196         } else {
2197                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2198                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2199                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2200         }
2201
2202         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2203         return status;
2204 }
2205
2206 /*
2207  * Generate a seed MAC address from the PF MAC Address using jhash.
2208  * MAC Address for VFs are assigned incrementally starting from the seed.
2209  * These addresses are programmed in the ASIC by the PF and the VF driver
2210  * queries for the MAC address during its probe.
2211  */
2212 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2213 {
2214         u32 vf = 0;
2215         int status = 0;
2216         u8 mac[ETH_ALEN];
2217
2218         be_vf_eth_addr_generate(adapter, mac);
2219
2220         for (vf = 0; vf < num_vfs; vf++) {
2221                 status = be_cmd_pmac_add(adapter, mac,
2222                                         adapter->vf_cfg[vf].vf_if_handle,
2223                                         &adapter->vf_cfg[vf].vf_pmac_id,
2224                                         vf + 1);
2225                 if (status)
2226                         dev_err(&adapter->pdev->dev,
2227                                 "Mac address add failed for VF %d\n", vf);
2228                 else
2229                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2230
2231                 mac[5] += 1;
2232         }
2233         return status;
2234 }
2235
2236 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2237 {
2238         u32 vf;
2239
2240         for (vf = 0; vf < num_vfs; vf++) {
2241                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2242                         be_cmd_pmac_del(adapter,
2243                                         adapter->vf_cfg[vf].vf_if_handle,
2244                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2245         }
2246 }
2247
2248 static int be_setup(struct be_adapter *adapter)
2249 {
2250         struct net_device *netdev = adapter->netdev;
2251         u32 cap_flags, en_flags, vf = 0;
2252         int status;
2253         u8 mac[ETH_ALEN];
2254
2255         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2256                                 BE_IF_FLAGS_BROADCAST |
2257                                 BE_IF_FLAGS_MULTICAST;
2258
2259         if (be_physfn(adapter)) {
2260                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2261                                 BE_IF_FLAGS_PROMISCUOUS |
2262                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2263                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2264
2265                 if (be_multi_rxq(adapter)) {
2266                         cap_flags |= BE_IF_FLAGS_RSS;
2267                         en_flags |= BE_IF_FLAGS_RSS;
2268                 }
2269         }
2270
2271         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2272                         netdev->dev_addr, false/* pmac_invalid */,
2273                         &adapter->if_handle, &adapter->pmac_id, 0);
2274         if (status != 0)
2275                 goto do_none;
2276
2277         if (be_physfn(adapter)) {
2278                 if (adapter->sriov_enabled) {
2279                         while (vf < num_vfs) {
2280                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2281                                                         BE_IF_FLAGS_BROADCAST;
2282                                 status = be_cmd_if_create(adapter, cap_flags,
2283                                         en_flags, mac, true,
2284                                         &adapter->vf_cfg[vf].vf_if_handle,
2285                                         NULL, vf+1);
2286                                 if (status) {
2287                                         dev_err(&adapter->pdev->dev,
2288                                         "Interface Create failed for VF %d\n",
2289                                         vf);
2290                                         goto if_destroy;
2291                                 }
2292                                 adapter->vf_cfg[vf].vf_pmac_id =
2293                                                         BE_INVALID_PMAC_ID;
2294                                 vf++;
2295                         }
2296                 }
2297         } else {
2298                 status = be_cmd_mac_addr_query(adapter, mac,
2299                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300                 if (!status) {
2301                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2302                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2303                 }
2304         }
2305
2306         status = be_tx_queues_create(adapter);
2307         if (status != 0)
2308                 goto if_destroy;
2309
2310         status = be_rx_queues_create(adapter);
2311         if (status != 0)
2312                 goto tx_qs_destroy;
2313
2314         status = be_mcc_queues_create(adapter);
2315         if (status != 0)
2316                 goto rx_qs_destroy;
2317
2318         adapter->link_speed = -1;
2319
2320         return 0;
2321
2322         be_mcc_queues_destroy(adapter);
2323 rx_qs_destroy:
2324         be_rx_queues_destroy(adapter);
2325 tx_qs_destroy:
2326         be_tx_queues_destroy(adapter);
2327 if_destroy:
2328         if (be_physfn(adapter) && adapter->sriov_enabled)
2329                 for (vf = 0; vf < num_vfs; vf++)
2330                         if (adapter->vf_cfg[vf].vf_if_handle)
2331                                 be_cmd_if_destroy(adapter,
2332                                         adapter->vf_cfg[vf].vf_if_handle,
2333                                         vf + 1);
2334         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2335 do_none:
2336         return status;
2337 }
2338
2339 static int be_clear(struct be_adapter *adapter)
2340 {
2341         int vf;
2342
2343         if (be_physfn(adapter) && adapter->sriov_enabled)
2344                 be_vf_eth_addr_rem(adapter);
2345
2346         be_mcc_queues_destroy(adapter);
2347         be_rx_queues_destroy(adapter);
2348         be_tx_queues_destroy(adapter);
2349         adapter->eq_next_idx = 0;
2350
2351         if (be_physfn(adapter) && adapter->sriov_enabled)
2352                 for (vf = 0; vf < num_vfs; vf++)
2353                         if (adapter->vf_cfg[vf].vf_if_handle)
2354                                 be_cmd_if_destroy(adapter,
2355                                         adapter->vf_cfg[vf].vf_if_handle,
2356                                         vf + 1);
2357
2358         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2359
2360         /* tell fw we're done with firing cmds */
2361         be_cmd_fw_clean(adapter);
2362         return 0;
2363 }
2364
2365
2366 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2367 static bool be_flash_redboot(struct be_adapter *adapter,
2368                         const u8 *p, u32 img_start, int image_size,
2369                         int hdr_size)
2370 {
2371         u32 crc_offset;
2372         u8 flashed_crc[4];
2373         int status;
2374
2375         crc_offset = hdr_size + img_start + image_size - 4;
2376
2377         p += crc_offset;
2378
2379         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2380                         (image_size - 4));
2381         if (status) {
2382                 dev_err(&adapter->pdev->dev,
2383                 "could not get crc from flash, not flashing redboot\n");
2384                 return false;
2385         }
2386
2387         /*update redboot only if crc does not match*/
2388         if (!memcmp(flashed_crc, p, 4))
2389                 return false;
2390         else
2391                 return true;
2392 }
2393
2394 static int be_flash_data(struct be_adapter *adapter,
2395                         const struct firmware *fw,
2396                         struct be_dma_mem *flash_cmd, int num_of_images)
2397
2398 {
2399         int status = 0, i, filehdr_size = 0;
2400         u32 total_bytes = 0, flash_op;
2401         int num_bytes;
2402         const u8 *p = fw->data;
2403         struct be_cmd_write_flashrom *req = flash_cmd->va;
2404         const struct flash_comp *pflashcomp;
2405         int num_comp;
2406
2407         static const struct flash_comp gen3_flash_types[9] = {
2408                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2409                         FLASH_IMAGE_MAX_SIZE_g3},
2410                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2411                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2412                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2413                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2414                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2415                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2416                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2417                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2418                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2419                         FLASH_IMAGE_MAX_SIZE_g3},
2420                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2421                         FLASH_IMAGE_MAX_SIZE_g3},
2422                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2423                         FLASH_IMAGE_MAX_SIZE_g3},
2424                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2425                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2426         };
2427         static const struct flash_comp gen2_flash_types[8] = {
2428                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2429                         FLASH_IMAGE_MAX_SIZE_g2},
2430                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2431                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2432                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2433                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2434                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2435                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2436                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2437                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2438                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2439                         FLASH_IMAGE_MAX_SIZE_g2},
2440                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2441                         FLASH_IMAGE_MAX_SIZE_g2},
2442                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2443                          FLASH_IMAGE_MAX_SIZE_g2}
2444         };
2445
2446         if (adapter->generation == BE_GEN3) {
2447                 pflashcomp = gen3_flash_types;
2448                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2449                 num_comp = ARRAY_SIZE(gen3_flash_types);
2450         } else {
2451                 pflashcomp = gen2_flash_types;
2452                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2453                 num_comp = ARRAY_SIZE(gen2_flash_types);
2454         }
2455         for (i = 0; i < num_comp; i++) {
2456                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2457                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2458                         continue;
2459                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2460                         (!be_flash_redboot(adapter, fw->data,
2461                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2462                         (num_of_images * sizeof(struct image_hdr)))))
2463                         continue;
2464                 p = fw->data;
2465                 p += filehdr_size + pflashcomp[i].offset
2466                         + (num_of_images * sizeof(struct image_hdr));
2467         if (p + pflashcomp[i].size > fw->data + fw->size)
2468                 return -1;
2469         total_bytes = pflashcomp[i].size;
2470                 while (total_bytes) {
2471                         if (total_bytes > 32*1024)
2472                                 num_bytes = 32*1024;
2473                         else
2474                                 num_bytes = total_bytes;
2475                         total_bytes -= num_bytes;
2476
2477                         if (!total_bytes)
2478                                 flash_op = FLASHROM_OPER_FLASH;
2479                         else
2480                                 flash_op = FLASHROM_OPER_SAVE;
2481                         memcpy(req->params.data_buf, p, num_bytes);
2482                         p += num_bytes;
2483                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2484                                 pflashcomp[i].optype, flash_op, num_bytes);
2485                         if (status) {
2486                                 dev_err(&adapter->pdev->dev,
2487                                         "cmd to write to flash rom failed.\n");
2488                                 return -1;
2489                         }
2490                         yield();
2491                 }
2492         }
2493         return 0;
2494 }
2495
2496 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2497 {
2498         if (fhdr == NULL)
2499                 return 0;
2500         if (fhdr->build[0] == '3')
2501                 return BE_GEN3;
2502         else if (fhdr->build[0] == '2')
2503                 return BE_GEN2;
2504         else
2505                 return 0;
2506 }
2507
2508 int be_load_fw(struct be_adapter *adapter, u8 *func)
2509 {
2510         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2511         const struct firmware *fw;
2512         struct flash_file_hdr_g2 *fhdr;
2513         struct flash_file_hdr_g3 *fhdr3;
2514         struct image_hdr *img_hdr_ptr = NULL;
2515         struct be_dma_mem flash_cmd;
2516         int status, i = 0, num_imgs = 0;
2517         const u8 *p;
2518
2519         if (!netif_running(adapter->netdev)) {
2520                 dev_err(&adapter->pdev->dev,
2521                         "Firmware load not allowed (interface is down)\n");
2522                 return -EPERM;
2523         }
2524
2525         strcpy(fw_file, func);
2526
2527         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2528         if (status)
2529                 goto fw_exit;
2530
2531         p = fw->data;
2532         fhdr = (struct flash_file_hdr_g2 *) p;
2533         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2534
2535         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2536         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2537                                           &flash_cmd.dma, GFP_KERNEL);
2538         if (!flash_cmd.va) {
2539                 status = -ENOMEM;
2540                 dev_err(&adapter->pdev->dev,
2541                         "Memory allocation failure while flashing\n");
2542                 goto fw_exit;
2543         }
2544
2545         if ((adapter->generation == BE_GEN3) &&
2546                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2547                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2548                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2549                 for (i = 0; i < num_imgs; i++) {
2550                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2551                                         (sizeof(struct flash_file_hdr_g3) +
2552                                          i * sizeof(struct image_hdr)));
2553                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2554                                 status = be_flash_data(adapter, fw, &flash_cmd,
2555                                                         num_imgs);
2556                 }
2557         } else if ((adapter->generation == BE_GEN2) &&
2558                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2559                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2560         } else {
2561                 dev_err(&adapter->pdev->dev,
2562                         "UFI and Interface are not compatible for flashing\n");
2563                 status = -1;
2564         }
2565
2566         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2567                           flash_cmd.dma);
2568         if (status) {
2569                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2570                 goto fw_exit;
2571         }
2572
2573         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2574
2575 fw_exit:
2576         release_firmware(fw);
2577         return status;
2578 }
2579
2580 static struct net_device_ops be_netdev_ops = {
2581         .ndo_open               = be_open,
2582         .ndo_stop               = be_close,
2583         .ndo_start_xmit         = be_xmit,
2584         .ndo_set_rx_mode        = be_set_multicast_list,
2585         .ndo_set_mac_address    = be_mac_addr_set,
2586         .ndo_change_mtu         = be_change_mtu,
2587         .ndo_validate_addr      = eth_validate_addr,
2588         .ndo_vlan_rx_register   = be_vlan_register,
2589         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2590         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2591         .ndo_set_vf_mac         = be_set_vf_mac,
2592         .ndo_set_vf_vlan        = be_set_vf_vlan,
2593         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2594         .ndo_get_vf_config      = be_get_vf_config
2595 };
2596
2597 static void be_netdev_init(struct net_device *netdev)
2598 {
2599         struct be_adapter *adapter = netdev_priv(netdev);
2600         struct be_rx_obj *rxo;
2601         int i;
2602
2603         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2604                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2605                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2606                 NETIF_F_GRO | NETIF_F_TSO6;
2607
2608         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2609                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2610
2611         if (lancer_chip(adapter))
2612                 netdev->vlan_features |= NETIF_F_TSO6;
2613
2614         netdev->flags |= IFF_MULTICAST;
2615
2616         adapter->rx_csum = true;
2617
2618         /* Default settings for Rx and Tx flow control */
2619         adapter->rx_fc = true;
2620         adapter->tx_fc = true;
2621
2622         netif_set_gso_max_size(netdev, 65535);
2623
2624         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2625
2626         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2627
2628         for_all_rx_queues(adapter, rxo, i)
2629                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2630                                 BE_NAPI_WEIGHT);
2631
2632         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2633                 BE_NAPI_WEIGHT);
2634 }
2635
2636 static void be_unmap_pci_bars(struct be_adapter *adapter)
2637 {
2638         if (adapter->csr)
2639                 iounmap(adapter->csr);
2640         if (adapter->db)
2641                 iounmap(adapter->db);
2642         if (adapter->pcicfg && be_physfn(adapter))
2643                 iounmap(adapter->pcicfg);
2644 }
2645
2646 static int be_map_pci_bars(struct be_adapter *adapter)
2647 {
2648         u8 __iomem *addr;
2649         int pcicfg_reg, db_reg;
2650
2651         if (lancer_chip(adapter)) {
2652                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2653                         pci_resource_len(adapter->pdev, 0));
2654                 if (addr == NULL)
2655                         return -ENOMEM;
2656                 adapter->db = addr;
2657                 return 0;
2658         }
2659
2660         if (be_physfn(adapter)) {
2661                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2662                                 pci_resource_len(adapter->pdev, 2));
2663                 if (addr == NULL)
2664                         return -ENOMEM;
2665                 adapter->csr = addr;
2666         }
2667
2668         if (adapter->generation == BE_GEN2) {
2669                 pcicfg_reg = 1;
2670                 db_reg = 4;
2671         } else {
2672                 pcicfg_reg = 0;
2673                 if (be_physfn(adapter))
2674                         db_reg = 4;
2675                 else
2676                         db_reg = 0;
2677         }
2678         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2679                                 pci_resource_len(adapter->pdev, db_reg));
2680         if (addr == NULL)
2681                 goto pci_map_err;
2682         adapter->db = addr;
2683
2684         if (be_physfn(adapter)) {
2685                 addr = ioremap_nocache(
2686                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2687                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2688                 if (addr == NULL)
2689                         goto pci_map_err;
2690                 adapter->pcicfg = addr;
2691         } else
2692                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2693
2694         return 0;
2695 pci_map_err:
2696         be_unmap_pci_bars(adapter);
2697         return -ENOMEM;
2698 }
2699
2700
2701 static void be_ctrl_cleanup(struct be_adapter *adapter)
2702 {
2703         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2704
2705         be_unmap_pci_bars(adapter);
2706
2707         if (mem->va)
2708                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2709                                   mem->dma);
2710
2711         mem = &adapter->mc_cmd_mem;
2712         if (mem->va)
2713                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2714                                   mem->dma);
2715 }
2716
2717 static int be_ctrl_init(struct be_adapter *adapter)
2718 {
2719         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2720         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2721         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2722         int status;
2723
2724         status = be_map_pci_bars(adapter);
2725         if (status)
2726                 goto done;
2727
2728         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2729         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2730                                                 mbox_mem_alloc->size,
2731                                                 &mbox_mem_alloc->dma,
2732                                                 GFP_KERNEL);
2733         if (!mbox_mem_alloc->va) {
2734                 status = -ENOMEM;
2735                 goto unmap_pci_bars;
2736         }
2737
2738         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2739         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2740         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2741         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2742
2743         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2744         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2745                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2746                                             GFP_KERNEL);
2747         if (mc_cmd_mem->va == NULL) {
2748                 status = -ENOMEM;
2749                 goto free_mbox;
2750         }
2751         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2752
2753         mutex_init(&adapter->mbox_lock);
2754         spin_lock_init(&adapter->mcc_lock);
2755         spin_lock_init(&adapter->mcc_cq_lock);
2756
2757         init_completion(&adapter->flash_compl);
2758         pci_save_state(adapter->pdev);
2759         return 0;
2760
2761 free_mbox:
2762         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2763                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2764
2765 unmap_pci_bars:
2766         be_unmap_pci_bars(adapter);
2767
2768 done:
2769         return status;
2770 }
2771
2772 static void be_stats_cleanup(struct be_adapter *adapter)
2773 {
2774         struct be_dma_mem *cmd = &adapter->stats_cmd;
2775
2776         if (cmd->va)
2777                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2778                                   cmd->va, cmd->dma);
2779 }
2780
2781 static int be_stats_init(struct be_adapter *adapter)
2782 {
2783         struct be_dma_mem *cmd = &adapter->stats_cmd;
2784
2785         cmd->size = sizeof(struct be_cmd_req_get_stats);
2786         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2787                                      GFP_KERNEL);
2788         if (cmd->va == NULL)
2789                 return -1;
2790         memset(cmd->va, 0, cmd->size);
2791         return 0;
2792 }
2793
2794 static void __devexit be_remove(struct pci_dev *pdev)
2795 {
2796         struct be_adapter *adapter = pci_get_drvdata(pdev);
2797
2798         if (!adapter)
2799                 return;
2800
2801         cancel_delayed_work_sync(&adapter->work);
2802
2803         unregister_netdev(adapter->netdev);
2804
2805         be_clear(adapter);
2806
2807         be_stats_cleanup(adapter);
2808
2809         be_ctrl_cleanup(adapter);
2810
2811         be_sriov_disable(adapter);
2812
2813         be_msix_disable(adapter);
2814
2815         pci_set_drvdata(pdev, NULL);
2816         pci_release_regions(pdev);
2817         pci_disable_device(pdev);
2818
2819         free_netdev(adapter->netdev);
2820 }
2821
2822 static int be_get_config(struct be_adapter *adapter)
2823 {
2824         int status;
2825         u8 mac[ETH_ALEN];
2826
2827         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2828         if (status)
2829                 return status;
2830
2831         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2832                         &adapter->function_mode, &adapter->function_caps);
2833         if (status)
2834                 return status;
2835
2836         memset(mac, 0, ETH_ALEN);
2837
2838         if (be_physfn(adapter)) {
2839                 status = be_cmd_mac_addr_query(adapter, mac,
2840                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2841
2842                 if (status)
2843                         return status;
2844
2845                 if (!is_valid_ether_addr(mac))
2846                         return -EADDRNOTAVAIL;
2847
2848                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2849                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2850         }
2851
2852         if (adapter->function_mode & 0x400)
2853                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2854         else
2855                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2856
2857         status = be_cmd_get_cntl_attributes(adapter);
2858         if (status)
2859                 return status;
2860
2861         be_cmd_check_native_mode(adapter);
2862         return 0;
2863 }
2864
2865 static int be_dev_family_check(struct be_adapter *adapter)
2866 {
2867         struct pci_dev *pdev = adapter->pdev;
2868         u32 sli_intf = 0, if_type;
2869
2870         switch (pdev->device) {
2871         case BE_DEVICE_ID1:
2872         case OC_DEVICE_ID1:
2873                 adapter->generation = BE_GEN2;
2874                 break;
2875         case BE_DEVICE_ID2:
2876         case OC_DEVICE_ID2:
2877                 adapter->generation = BE_GEN3;
2878                 break;
2879         case OC_DEVICE_ID3:
2880                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2881                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2882                                                 SLI_INTF_IF_TYPE_SHIFT;
2883
2884                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2885                         if_type != 0x02) {
2886                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2887                         return -EINVAL;
2888                 }
2889                 if (num_vfs > 0) {
2890                         dev_err(&pdev->dev, "VFs not supported\n");
2891                         return -EINVAL;
2892                 }
2893                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2894                                          SLI_INTF_FAMILY_SHIFT);
2895                 adapter->generation = BE_GEN3;
2896                 break;
2897         default:
2898                 adapter->generation = 0;
2899         }
2900         return 0;
2901 }
2902
2903 static int lancer_wait_ready(struct be_adapter *adapter)
2904 {
2905 #define SLIPORT_READY_TIMEOUT 500
2906         u32 sliport_status;
2907         int status = 0, i;
2908
2909         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2910                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2911                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2912                         break;
2913
2914                 msleep(20);
2915         }
2916
2917         if (i == SLIPORT_READY_TIMEOUT)
2918                 status = -1;
2919
2920         return status;
2921 }
2922
2923 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2924 {
2925         int status;
2926         u32 sliport_status, err, reset_needed;
2927         status = lancer_wait_ready(adapter);
2928         if (!status) {
2929                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2930                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2931                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2932                 if (err && reset_needed) {
2933                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2934                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2935
2936                         /* check adapter has corrected the error */
2937                         status = lancer_wait_ready(adapter);
2938                         sliport_status = ioread32(adapter->db +
2939                                                         SLIPORT_STATUS_OFFSET);
2940                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2941                                                 SLIPORT_STATUS_RN_MASK);
2942                         if (status || sliport_status)
2943                                 status = -1;
2944                 } else if (err || reset_needed) {
2945                         status = -1;
2946                 }
2947         }
2948         return status;
2949 }
2950
2951 static int __devinit be_probe(struct pci_dev *pdev,
2952                         const struct pci_device_id *pdev_id)
2953 {
2954         int status = 0;
2955         struct be_adapter *adapter;
2956         struct net_device *netdev;
2957
2958         status = pci_enable_device(pdev);
2959         if (status)
2960                 goto do_none;
2961
2962         status = pci_request_regions(pdev, DRV_NAME);
2963         if (status)
2964                 goto disable_dev;
2965         pci_set_master(pdev);
2966
2967         netdev = alloc_etherdev(sizeof(struct be_adapter));
2968         if (netdev == NULL) {
2969                 status = -ENOMEM;
2970                 goto rel_reg;
2971         }
2972         adapter = netdev_priv(netdev);
2973         adapter->pdev = pdev;
2974         pci_set_drvdata(pdev, adapter);
2975
2976         status = be_dev_family_check(adapter);
2977         if (status)
2978                 goto free_netdev;
2979
2980         adapter->netdev = netdev;
2981         SET_NETDEV_DEV(netdev, &pdev->dev);
2982
2983         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2984         if (!status) {
2985                 netdev->features |= NETIF_F_HIGHDMA;
2986         } else {
2987                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2988                 if (status) {
2989                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2990                         goto free_netdev;
2991                 }
2992         }
2993
2994         be_sriov_enable(adapter);
2995
2996         status = be_ctrl_init(adapter);
2997         if (status)
2998                 goto free_netdev;
2999
3000         if (lancer_chip(adapter)) {
3001                 status = lancer_test_and_set_rdy_state(adapter);
3002                 if (status) {
3003                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3004                         goto free_netdev;
3005                 }
3006         }
3007
3008         /* sync up with fw's ready state */
3009         if (be_physfn(adapter)) {
3010                 status = be_cmd_POST(adapter);
3011                 if (status)
3012                         goto ctrl_clean;
3013         }
3014
3015         /* tell fw we're ready to fire cmds */
3016         status = be_cmd_fw_init(adapter);
3017         if (status)
3018                 goto ctrl_clean;
3019
3020         status = be_cmd_reset_function(adapter);
3021         if (status)
3022                 goto ctrl_clean;
3023
3024         status = be_stats_init(adapter);
3025         if (status)
3026                 goto ctrl_clean;
3027
3028         status = be_get_config(adapter);
3029         if (status)
3030                 goto stats_clean;
3031
3032         be_msix_enable(adapter);
3033
3034         INIT_DELAYED_WORK(&adapter->work, be_worker);
3035
3036         status = be_setup(adapter);
3037         if (status)
3038                 goto msix_disable;
3039
3040         be_netdev_init(netdev);
3041         status = register_netdev(netdev);
3042         if (status != 0)
3043                 goto unsetup;
3044         netif_carrier_off(netdev);
3045
3046         if (be_physfn(adapter) && adapter->sriov_enabled) {
3047                 status = be_vf_eth_addr_config(adapter);
3048                 if (status)
3049                         goto unreg_netdev;
3050         }
3051
3052         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3053         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3054         return 0;
3055
3056 unreg_netdev:
3057         unregister_netdev(netdev);
3058 unsetup:
3059         be_clear(adapter);
3060 msix_disable:
3061         be_msix_disable(adapter);
3062 stats_clean:
3063         be_stats_cleanup(adapter);
3064 ctrl_clean:
3065         be_ctrl_cleanup(adapter);
3066 free_netdev:
3067         be_sriov_disable(adapter);
3068         free_netdev(netdev);
3069         pci_set_drvdata(pdev, NULL);
3070 rel_reg:
3071         pci_release_regions(pdev);
3072 disable_dev:
3073         pci_disable_device(pdev);
3074 do_none:
3075         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3076         return status;
3077 }
3078
3079 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3080 {
3081         struct be_adapter *adapter = pci_get_drvdata(pdev);
3082         struct net_device *netdev =  adapter->netdev;
3083
3084         cancel_delayed_work_sync(&adapter->work);
3085         if (adapter->wol)
3086                 be_setup_wol(adapter, true);
3087
3088         netif_device_detach(netdev);
3089         if (netif_running(netdev)) {
3090                 rtnl_lock();
3091                 be_close(netdev);
3092                 rtnl_unlock();
3093         }
3094         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3095         be_clear(adapter);
3096
3097         be_msix_disable(adapter);
3098         pci_save_state(pdev);
3099         pci_disable_device(pdev);
3100         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3101         return 0;
3102 }
3103
3104 static int be_resume(struct pci_dev *pdev)
3105 {
3106         int status = 0;
3107         struct be_adapter *adapter = pci_get_drvdata(pdev);
3108         struct net_device *netdev =  adapter->netdev;
3109
3110         netif_device_detach(netdev);
3111
3112         status = pci_enable_device(pdev);
3113         if (status)
3114                 return status;
3115
3116         pci_set_power_state(pdev, 0);
3117         pci_restore_state(pdev);
3118
3119         be_msix_enable(adapter);
3120         /* tell fw we're ready to fire cmds */
3121         status = be_cmd_fw_init(adapter);
3122         if (status)
3123                 return status;
3124
3125         be_setup(adapter);
3126         if (netif_running(netdev)) {
3127                 rtnl_lock();
3128                 be_open(netdev);
3129                 rtnl_unlock();
3130         }
3131         netif_device_attach(netdev);
3132
3133         if (adapter->wol)
3134                 be_setup_wol(adapter, false);
3135
3136         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3137         return 0;
3138 }
3139
3140 /*
3141  * An FLR will stop BE from DMAing any data.
3142  */
3143 static void be_shutdown(struct pci_dev *pdev)
3144 {
3145         struct be_adapter *adapter = pci_get_drvdata(pdev);
3146
3147         if (!adapter)
3148                 return;
3149
3150         if (netif_running(adapter->netdev))
3151                 cancel_delayed_work_sync(&adapter->work);
3152
3153         netif_device_detach(adapter->netdev);
3154
3155         be_cmd_reset_function(adapter);
3156
3157         if (adapter->wol)
3158                 be_setup_wol(adapter, true);
3159
3160         pci_disable_device(pdev);
3161 }
3162
3163 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3164                                 pci_channel_state_t state)
3165 {
3166         struct be_adapter *adapter = pci_get_drvdata(pdev);
3167         struct net_device *netdev =  adapter->netdev;
3168
3169         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3170
3171         adapter->eeh_err = true;
3172
3173         netif_device_detach(netdev);
3174
3175         if (netif_running(netdev)) {
3176                 rtnl_lock();
3177                 be_close(netdev);
3178                 rtnl_unlock();
3179         }
3180         be_clear(adapter);
3181
3182         if (state == pci_channel_io_perm_failure)
3183                 return PCI_ERS_RESULT_DISCONNECT;
3184
3185         pci_disable_device(pdev);
3186
3187         return PCI_ERS_RESULT_NEED_RESET;
3188 }
3189
3190 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3191 {
3192         struct be_adapter *adapter = pci_get_drvdata(pdev);
3193         int status;
3194
3195         dev_info(&adapter->pdev->dev, "EEH reset\n");
3196         adapter->eeh_err = false;
3197
3198         status = pci_enable_device(pdev);
3199         if (status)
3200                 return PCI_ERS_RESULT_DISCONNECT;
3201
3202         pci_set_master(pdev);
3203         pci_set_power_state(pdev, 0);
3204         pci_restore_state(pdev);
3205
3206         /* Check if card is ok and fw is ready */
3207         status = be_cmd_POST(adapter);
3208         if (status)
3209                 return PCI_ERS_RESULT_DISCONNECT;
3210
3211         return PCI_ERS_RESULT_RECOVERED;
3212 }
3213
3214 static void be_eeh_resume(struct pci_dev *pdev)
3215 {
3216         int status = 0;
3217         struct be_adapter *adapter = pci_get_drvdata(pdev);
3218         struct net_device *netdev =  adapter->netdev;
3219
3220         dev_info(&adapter->pdev->dev, "EEH resume\n");
3221
3222         pci_save_state(pdev);
3223
3224         /* tell fw we're ready to fire cmds */
3225         status = be_cmd_fw_init(adapter);
3226         if (status)
3227                 goto err;
3228
3229         status = be_setup(adapter);
3230         if (status)
3231                 goto err;
3232
3233         if (netif_running(netdev)) {
3234                 status = be_open(netdev);
3235                 if (status)
3236                         goto err;
3237         }
3238         netif_device_attach(netdev);
3239         return;
3240 err:
3241         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3242 }
3243
3244 static struct pci_error_handlers be_eeh_handlers = {
3245         .error_detected = be_eeh_err_detected,
3246         .slot_reset = be_eeh_reset,
3247         .resume = be_eeh_resume,
3248 };
3249
3250 static struct pci_driver be_driver = {
3251         .name = DRV_NAME,
3252         .id_table = be_dev_ids,
3253         .probe = be_probe,
3254         .remove = be_remove,
3255         .suspend = be_suspend,
3256         .resume = be_resume,
3257         .shutdown = be_shutdown,
3258         .err_handler = &be_eeh_handlers
3259 };
3260
3261 static int __init be_init_module(void)
3262 {
3263         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3264             rx_frag_size != 2048) {
3265                 printk(KERN_WARNING DRV_NAME
3266                         " : Module param rx_frag_size must be 2048/4096/8192."
3267                         " Using 2048\n");
3268                 rx_frag_size = 2048;
3269         }
3270
3271         if (num_vfs > 32) {
3272                 printk(KERN_WARNING DRV_NAME
3273                         " : Module param num_vfs must not be greater than 32."
3274                         "Using 32\n");
3275                 num_vfs = 32;
3276         }
3277
3278         return pci_register_driver(&be_driver);
3279 }
3280 module_init(be_init_module);
3281
3282 static void __exit be_exit_module(void)
3283 {
3284         pci_unregister_driver(&be_driver);
3285 }
3286 module_exit(be_exit_module);