Merge branch 'rmobile-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121         return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126         struct be_dma_mem *mem = &q->dma_mem;
127         if (mem->va)
128                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129                                   mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133                 u16 len, u16 entry_size)
134 {
135         struct be_dma_mem *mem = &q->dma_mem;
136
137         memset(q, 0, sizeof(*q));
138         q->len = len;
139         q->entry_size = entry_size;
140         mem->size = len * entry_size;
141         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142                                      GFP_KERNEL);
143         if (!mem->va)
144                 return -1;
145         memset(mem->va, 0, mem->size);
146         return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152         u32 reg = ioread32(addr);
153         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155         if (adapter->eeh_err)
156                 return;
157
158         if (!enabled && enable)
159                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else if (enabled && !enable)
161                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else
163                 return;
164
165         iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170         u32 val = 0;
171         val |= qid & DB_RQ_RING_ID_MASK;
172         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174         wmb();
175         iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_TXULP_RING_ID_MASK;
182         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189                 bool arm, bool clear_int, u16 num_popped)
190 {
191         u32 val = 0;
192         val |= qid & DB_EQ_RING_ID_MASK;
193         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196         if (adapter->eeh_err)
197                 return;
198
199         if (arm)
200                 val |= 1 << DB_EQ_REARM_SHIFT;
201         if (clear_int)
202                 val |= 1 << DB_EQ_CLR_SHIFT;
203         val |= 1 << DB_EQ_EVNT_SHIFT;
204         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205         iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_CQ_RING_ID_MASK;
212         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_err)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_CQ_REARM_SHIFT;
220         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221         iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226         struct be_adapter *adapter = netdev_priv(netdev);
227         struct sockaddr *addr = p;
228         int status = 0;
229
230         if (!is_valid_ether_addr(addr->sa_data))
231                 return -EADDRNOTAVAIL;
232
233         /* MAC addr configuration will be done in hardware for VFs
234          * by their corresponding PFs. Just copy to netdev addr here
235          */
236         if (!be_physfn(adapter))
237                 goto netdev_addr;
238
239         status = be_cmd_pmac_del(adapter, adapter->if_handle,
240                                 adapter->pmac_id, 0);
241         if (status)
242                 return status;
243
244         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245                                 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247         if (!status)
248                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250         return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257         struct be_port_rxf_stats *port_stats =
258                         &rxf_stats->port[adapter->port_num];
259         struct net_device_stats *dev_stats = &adapter->netdev->stats;
260         struct be_erx_stats *erx_stats = &hw_stats->erx;
261         struct be_rx_obj *rxo;
262         int i;
263
264         memset(dev_stats, 0, sizeof(*dev_stats));
265         for_all_rx_queues(adapter, rxo, i) {
266                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269                 /*  no space in linux buffers: best possible approximation */
270                 dev_stats->rx_dropped +=
271                         erx_stats->rx_drops_no_fragments[rxo->q.id];
272         }
273
274         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277         /* bad pkts received */
278         dev_stats->rx_errors = port_stats->rx_crc_errors +
279                 port_stats->rx_alignment_symbol_errors +
280                 port_stats->rx_in_range_errors +
281                 port_stats->rx_out_range_errors +
282                 port_stats->rx_frame_too_long +
283                 port_stats->rx_dropped_too_small +
284                 port_stats->rx_dropped_too_short +
285                 port_stats->rx_dropped_header_too_small +
286                 port_stats->rx_dropped_tcp_length +
287                 port_stats->rx_dropped_runt +
288                 port_stats->rx_tcp_checksum_errs +
289                 port_stats->rx_ip_checksum_errs +
290                 port_stats->rx_udp_checksum_errs;
291
292         /* detailed rx errors */
293         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294                 port_stats->rx_out_range_errors +
295                 port_stats->rx_frame_too_long;
296
297         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299         /* frame alignment errors */
300         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302         /* receiver fifo overrun */
303         /* drops_no_pbuf is no per i/f, it's per BE card */
304         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305                                         port_stats->rx_input_fifo_overflow +
306                                         rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311         struct net_device *netdev = adapter->netdev;
312
313         /* If link came up or went down */
314         if (adapter->link_up != link_up) {
315                 adapter->link_speed = -1;
316                 if (link_up) {
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_carrier_off(netdev);
321                         printk(KERN_INFO "%s: Link down\n", netdev->name);
322                 }
323                 adapter->link_up = link_up;
324         }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330         struct be_eq_obj *rx_eq = &rxo->rx_eq;
331         struct be_rx_stats *stats = &rxo->stats;
332         ulong now = jiffies;
333         u32 eqd;
334
335         if (!rx_eq->enable_aic)
336                 return;
337
338         /* Wrapped around */
339         if (time_before(now, stats->rx_fps_jiffies)) {
340                 stats->rx_fps_jiffies = now;
341                 return;
342         }
343
344         /* Update once a second */
345         if ((now - stats->rx_fps_jiffies) < HZ)
346                 return;
347
348         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349                         ((now - stats->rx_fps_jiffies) / HZ);
350
351         stats->rx_fps_jiffies = now;
352         stats->prev_rx_frags = stats->rx_frags;
353         eqd = stats->rx_fps / 110000;
354         eqd = eqd << 3;
355         if (eqd > rx_eq->max_eqd)
356                 eqd = rx_eq->max_eqd;
357         if (eqd < rx_eq->min_eqd)
358                 eqd = rx_eq->min_eqd;
359         if (eqd < 10)
360                 eqd = 0;
361         if (eqd != rx_eq->cur_eqd)
362                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364         rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369         u64 rate = bytes;
370
371         do_div(rate, ticks / HZ);
372         rate <<= 3;                     /* bytes/sec -> bits/sec */
373         do_div(rate, 1000000ul);        /* MB/Sec */
374
375         return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380         struct be_tx_stats *stats = tx_stats(adapter);
381         ulong now = jiffies;
382
383         /* Wrapped around? */
384         if (time_before(now, stats->be_tx_jiffies)) {
385                 stats->be_tx_jiffies = now;
386                 return;
387         }
388
389         /* Update tx rate once in two seconds */
390         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392                                                   - stats->be_tx_bytes_prev,
393                                                  now - stats->be_tx_jiffies);
394                 stats->be_tx_jiffies = now;
395                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396         }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402         struct be_tx_stats *stats = tx_stats(adapter);
403         stats->be_tx_reqs++;
404         stats->be_tx_wrbs += wrb_cnt;
405         stats->be_tx_bytes += copied;
406         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407         if (stopped)
408                 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413                                                                 bool *dummy)
414 {
415         int cnt = (skb->len > skb->data_len);
416
417         cnt += skb_shinfo(skb)->nr_frags;
418
419         /* to account for hdr wrb */
420         cnt++;
421         if (lancer_chip(adapter) || !(cnt & 1)) {
422                 *dummy = false;
423         } else {
424                 /* add a dummy to make it an even num */
425                 cnt++;
426                 *dummy = true;
427         }
428         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429         return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434         wrb->frag_pa_hi = upper_32_bits(addr);
435         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442         u8 vlan_prio = 0;
443         u16 vlan_tag = 0;
444
445         memset(hdr, 0, sizeof(*hdr));
446
447         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449         if (skb_is_gso(skb)) {
450                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452                         hdr, skb_shinfo(skb)->gso_size);
453                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455                 if (lancer_chip(adapter) && adapter->sli_family  ==
456                                                         LANCER_A0_SLI_FAMILY) {
457                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458                         if (is_tcp_pkt(skb))
459                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460                                                                 tcpcs, hdr, 1);
461                         else if (is_udp_pkt(skb))
462                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463                                                                 udpcs, hdr, 1);
464                 }
465         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466                 if (is_tcp_pkt(skb))
467                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468                 else if (is_udp_pkt(skb))
469                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470         }
471
472         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474                 vlan_tag = vlan_tx_tag_get(skb);
475                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476                 /* If vlan priority provided by OS is NOT in available bmap */
477                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479                                         adapter->recommended_prio;
480                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481         }
482
483         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490                 bool unmap_single)
491 {
492         dma_addr_t dma;
493
494         be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497         if (wrb->frag_len) {
498                 if (unmap_single)
499                         dma_unmap_single(dev, dma, wrb->frag_len,
500                                          DMA_TO_DEVICE);
501                 else
502                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503         }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509         dma_addr_t busaddr;
510         int i, copied = 0;
511         struct device *dev = &adapter->pdev->dev;
512         struct sk_buff *first_skb = skb;
513         struct be_queue_info *txq = &adapter->tx_obj.q;
514         struct be_eth_wrb *wrb;
515         struct be_eth_hdr_wrb *hdr;
516         bool map_single = false;
517         u16 map_head;
518
519         hdr = queue_head_node(txq);
520         queue_head_inc(txq);
521         map_head = txq->head;
522
523         if (skb->len > skb->data_len) {
524                 int len = skb_headlen(skb);
525                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526                 if (dma_mapping_error(dev, busaddr))
527                         goto dma_err;
528                 map_single = true;
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, busaddr, len);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533                 copied += len;
534         }
535
536         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537                 struct skb_frag_struct *frag =
538                         &skb_shinfo(skb)->frags[i];
539                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540                                        frag->size, DMA_TO_DEVICE);
541                 if (dma_mapping_error(dev, busaddr))
542                         goto dma_err;
543                 wrb = queue_head_node(txq);
544                 wrb_fill(wrb, busaddr, frag->size);
545                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546                 queue_head_inc(txq);
547                 copied += frag->size;
548         }
549
550         if (dummy_wrb) {
551                 wrb = queue_head_node(txq);
552                 wrb_fill(wrb, 0, 0);
553                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554                 queue_head_inc(txq);
555         }
556
557         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558         be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560         return copied;
561 dma_err:
562         txq->head = map_head;
563         while (copied) {
564                 wrb = queue_head_node(txq);
565                 unmap_tx_frag(dev, wrb, map_single);
566                 map_single = false;
567                 copied -= wrb->frag_len;
568                 queue_head_inc(txq);
569         }
570         return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574                         struct net_device *netdev)
575 {
576         struct be_adapter *adapter = netdev_priv(netdev);
577         struct be_tx_obj *tx_obj = &adapter->tx_obj;
578         struct be_queue_info *txq = &tx_obj->q;
579         u32 wrb_cnt = 0, copied = 0;
580         u32 start = txq->head;
581         bool dummy_wrb, stopped = false;
582
583         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586         if (copied) {
587                 /* record the sent skb in the sent_skb table */
588                 BUG_ON(tx_obj->sent_skb_list[start]);
589                 tx_obj->sent_skb_list[start] = skb;
590
591                 /* Ensure txq has space for the next skb; Else stop the queue
592                  * *BEFORE* ringing the tx doorbell, so that we serialze the
593                  * tx compls of the current transmit which'll wake up the queue
594                  */
595                 atomic_add(wrb_cnt, &txq->used);
596                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597                                                                 txq->len) {
598                         netif_stop_queue(netdev);
599                         stopped = true;
600                 }
601
602                 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604                 be_tx_stats_update(adapter, wrb_cnt, copied,
605                                 skb_shinfo(skb)->gso_segs, stopped);
606         } else {
607                 txq->head = start;
608                 dev_kfree_skb_any(skb);
609         }
610         return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615         struct be_adapter *adapter = netdev_priv(netdev);
616         if (new_mtu < BE_MIN_MTU ||
617                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618                                         (ETH_HLEN + ETH_FCS_LEN))) {
619                 dev_info(&adapter->pdev->dev,
620                         "MTU must be between %d and %d bytes\n",
621                         BE_MIN_MTU,
622                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623                 return -EINVAL;
624         }
625         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626                         netdev->mtu, new_mtu);
627         netdev->mtu = new_mtu;
628         return 0;
629 }
630
631 /*
632  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633  * If the user configures more, place BE in vlan promiscuous mode.
634  */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637         u16 vtag[BE_NUM_VLANS_SUPPORTED];
638         u16 ntags = 0, i;
639         int status = 0;
640         u32 if_handle;
641
642         if (vf) {
643                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646         }
647
648         if (adapter->vlans_added <= adapter->max_vlans)  {
649                 /* Construct VLAN Table to give to HW */
650                 for (i = 0; i < VLAN_N_VID; i++) {
651                         if (adapter->vlan_tag[i]) {
652                                 vtag[ntags] = cpu_to_le16(i);
653                                 ntags++;
654                         }
655                 }
656                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657                                         vtag, ntags, 1, 0);
658         } else {
659                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660                                         NULL, 0, 1, 1);
661         }
662
663         return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668         struct be_adapter *adapter = netdev_priv(netdev);
669
670         adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675         struct be_adapter *adapter = netdev_priv(netdev);
676
677         adapter->vlans_added++;
678         if (!be_physfn(adapter))
679                 return;
680
681         adapter->vlan_tag[vid] = 1;
682         if (adapter->vlans_added <= (adapter->max_vlans + 1))
683                 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688         struct be_adapter *adapter = netdev_priv(netdev);
689
690         adapter->vlans_added--;
691         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693         if (!be_physfn(adapter))
694                 return;
695
696         adapter->vlan_tag[vid] = 0;
697         if (adapter->vlans_added <= adapter->max_vlans)
698                 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703         struct be_adapter *adapter = netdev_priv(netdev);
704
705         if (netdev->flags & IFF_PROMISC) {
706                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707                 adapter->promiscuous = true;
708                 goto done;
709         }
710
711         /* BE was previously in promiscuous mode; disable it */
712         if (adapter->promiscuous) {
713                 adapter->promiscuous = false;
714                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715         }
716
717         /* Enable multicast promisc if num configured exceeds what we support */
718         if (netdev->flags & IFF_ALLMULTI ||
719             netdev_mc_count(netdev) > BE_MAX_MC) {
720                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721                                 &adapter->mc_cmd_mem);
722                 goto done;
723         }
724
725         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726                 &adapter->mc_cmd_mem);
727 done:
728         return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         int status;
735
736         if (!adapter->sriov_enabled)
737                 return -EPERM;
738
739         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740                 return -EINVAL;
741
742         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743                 status = be_cmd_pmac_del(adapter,
744                                         adapter->vf_cfg[vf].vf_if_handle,
745                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747         status = be_cmd_pmac_add(adapter, mac,
748                                 adapter->vf_cfg[vf].vf_if_handle,
749                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751         if (status)
752                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753                                 mac, vf);
754         else
755                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757         return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761                         struct ifla_vf_info *vi)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764
765         if (!adapter->sriov_enabled)
766                 return -EPERM;
767
768         if (vf >= num_vfs)
769                 return -EINVAL;
770
771         vi->vf = vf;
772         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774         vi->qos = 0;
775         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777         return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781                         int vf, u16 vlan, u8 qos)
782 {
783         struct be_adapter *adapter = netdev_priv(netdev);
784         int status = 0;
785
786         if (!adapter->sriov_enabled)
787                 return -EPERM;
788
789         if ((vf >= num_vfs) || (vlan > 4095))
790                 return -EINVAL;
791
792         if (vlan) {
793                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794                 adapter->vlans_added++;
795         } else {
796                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797                 adapter->vlans_added--;
798         }
799
800         status = be_vid_config(adapter, true, vf);
801
802         if (status)
803                 dev_info(&adapter->pdev->dev,
804                                 "VLAN %d config on VF %d failed\n", vlan, vf);
805         return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809                         int vf, int rate)
810 {
811         struct be_adapter *adapter = netdev_priv(netdev);
812         int status = 0;
813
814         if (!adapter->sriov_enabled)
815                 return -EPERM;
816
817         if ((vf >= num_vfs) || (rate < 0))
818                 return -EINVAL;
819
820         if (rate > 10000)
821                 rate = 10000;
822
823         adapter->vf_cfg[vf].vf_tx_rate = rate;
824         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826         if (status)
827                 dev_info(&adapter->pdev->dev,
828                                 "tx rate %d on VF %d failed\n", rate, vf);
829         return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835         ulong now = jiffies;
836
837         /* Wrapped around */
838         if (time_before(now, stats->rx_jiffies)) {
839                 stats->rx_jiffies = now;
840                 return;
841         }
842
843         /* Update the rate once in two seconds */
844         if ((now - stats->rx_jiffies) < 2 * HZ)
845                 return;
846
847         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848                                 now - stats->rx_jiffies);
849         stats->rx_jiffies = now;
850         stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854                 struct be_rx_compl_info *rxcp)
855 {
856         struct be_rx_stats *stats = &rxo->stats;
857
858         stats->rx_compl++;
859         stats->rx_frags += rxcp->num_rcvd;
860         stats->rx_bytes += rxcp->pkt_size;
861         stats->rx_pkts++;
862         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
863                 stats->rx_mcast_pkts++;
864         if (rxcp->err)
865                 stats->rxcp_err++;
866 }
867
868 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
869 {
870         /* L4 checksum is not reliable for non TCP/UDP packets.
871          * Also ignore ipcksm for ipv6 pkts */
872         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
873                                 (rxcp->ip_csum || rxcp->ipv6);
874 }
875
876 static struct be_rx_page_info *
877 get_rx_page_info(struct be_adapter *adapter,
878                 struct be_rx_obj *rxo,
879                 u16 frag_idx)
880 {
881         struct be_rx_page_info *rx_page_info;
882         struct be_queue_info *rxq = &rxo->q;
883
884         rx_page_info = &rxo->page_info_tbl[frag_idx];
885         BUG_ON(!rx_page_info->page);
886
887         if (rx_page_info->last_page_user) {
888                 dma_unmap_page(&adapter->pdev->dev,
889                                dma_unmap_addr(rx_page_info, bus),
890                                adapter->big_page_size, DMA_FROM_DEVICE);
891                 rx_page_info->last_page_user = false;
892         }
893
894         atomic_dec(&rxq->used);
895         return rx_page_info;
896 }
897
898 /* Throwaway the data in the Rx completion */
899 static void be_rx_compl_discard(struct be_adapter *adapter,
900                 struct be_rx_obj *rxo,
901                 struct be_rx_compl_info *rxcp)
902 {
903         struct be_queue_info *rxq = &rxo->q;
904         struct be_rx_page_info *page_info;
905         u16 i, num_rcvd = rxcp->num_rcvd;
906
907         for (i = 0; i < num_rcvd; i++) {
908                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
909                 put_page(page_info->page);
910                 memset(page_info, 0, sizeof(*page_info));
911                 index_inc(&rxcp->rxq_idx, rxq->len);
912         }
913 }
914
915 /*
916  * skb_fill_rx_data forms a complete skb for an ether frame
917  * indicated by rxcp.
918  */
919 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
920                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
921 {
922         struct be_queue_info *rxq = &rxo->q;
923         struct be_rx_page_info *page_info;
924         u16 i, j;
925         u16 hdr_len, curr_frag_len, remaining;
926         u8 *start;
927
928         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
929         start = page_address(page_info->page) + page_info->page_offset;
930         prefetch(start);
931
932         /* Copy data in the first descriptor of this completion */
933         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
934
935         /* Copy the header portion into skb_data */
936         hdr_len = min(BE_HDR_LEN, curr_frag_len);
937         memcpy(skb->data, start, hdr_len);
938         skb->len = curr_frag_len;
939         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
940                 /* Complete packet has now been moved to data */
941                 put_page(page_info->page);
942                 skb->data_len = 0;
943                 skb->tail += curr_frag_len;
944         } else {
945                 skb_shinfo(skb)->nr_frags = 1;
946                 skb_shinfo(skb)->frags[0].page = page_info->page;
947                 skb_shinfo(skb)->frags[0].page_offset =
948                                         page_info->page_offset + hdr_len;
949                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
950                 skb->data_len = curr_frag_len - hdr_len;
951                 skb->tail += hdr_len;
952         }
953         page_info->page = NULL;
954
955         if (rxcp->pkt_size <= rx_frag_size) {
956                 BUG_ON(rxcp->num_rcvd != 1);
957                 return;
958         }
959
960         /* More frags present for this completion */
961         index_inc(&rxcp->rxq_idx, rxq->len);
962         remaining = rxcp->pkt_size - curr_frag_len;
963         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
964                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
965                 curr_frag_len = min(remaining, rx_frag_size);
966
967                 /* Coalesce all frags from the same physical page in one slot */
968                 if (page_info->page_offset == 0) {
969                         /* Fresh page */
970                         j++;
971                         skb_shinfo(skb)->frags[j].page = page_info->page;
972                         skb_shinfo(skb)->frags[j].page_offset =
973                                                         page_info->page_offset;
974                         skb_shinfo(skb)->frags[j].size = 0;
975                         skb_shinfo(skb)->nr_frags++;
976                 } else {
977                         put_page(page_info->page);
978                 }
979
980                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
981                 skb->len += curr_frag_len;
982                 skb->data_len += curr_frag_len;
983
984                 remaining -= curr_frag_len;
985                 index_inc(&rxcp->rxq_idx, rxq->len);
986                 page_info->page = NULL;
987         }
988         BUG_ON(j > MAX_SKB_FRAGS);
989 }
990
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993                         struct be_rx_obj *rxo,
994                         struct be_rx_compl_info *rxcp)
995 {
996         struct sk_buff *skb;
997
998         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
999         if (unlikely(!skb)) {
1000                 if (net_ratelimit())
1001                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1002                 be_rx_compl_discard(adapter, rxo, rxcp);
1003                 return;
1004         }
1005
1006         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007
1008         if (likely(adapter->rx_csum && csum_passed(rxcp)))
1009                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010         else
1011                 skb_checksum_none_assert(skb);
1012
1013         skb->truesize = skb->len + sizeof(struct sk_buff);
1014         skb->protocol = eth_type_trans(skb, adapter->netdev);
1015
1016         if (unlikely(rxcp->vlanf)) {
1017                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1018                         kfree_skb(skb);
1019                         return;
1020                 }
1021                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1022         } else {
1023                 netif_receive_skb(skb);
1024         }
1025 }
1026
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029                 struct be_rx_obj *rxo,
1030                 struct be_rx_compl_info *rxcp)
1031 {
1032         struct be_rx_page_info *page_info;
1033         struct sk_buff *skb = NULL;
1034         struct be_queue_info *rxq = &rxo->q;
1035         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1036         u16 remaining, curr_frag_len;
1037         u16 i, j;
1038
1039         skb = napi_get_frags(&eq_obj->napi);
1040         if (!skb) {
1041                 be_rx_compl_discard(adapter, rxo, rxcp);
1042                 return;
1043         }
1044
1045         remaining = rxcp->pkt_size;
1046         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049                 curr_frag_len = min(remaining, rx_frag_size);
1050
1051                 /* Coalesce all frags from the same physical page in one slot */
1052                 if (i == 0 || page_info->page_offset == 0) {
1053                         /* First frag or Fresh page */
1054                         j++;
1055                         skb_shinfo(skb)->frags[j].page = page_info->page;
1056                         skb_shinfo(skb)->frags[j].page_offset =
1057                                                         page_info->page_offset;
1058                         skb_shinfo(skb)->frags[j].size = 0;
1059                 } else {
1060                         put_page(page_info->page);
1061                 }
1062                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064                 remaining -= curr_frag_len;
1065                 index_inc(&rxcp->rxq_idx, rxq->len);
1066                 memset(page_info, 0, sizeof(*page_info));
1067         }
1068         BUG_ON(j > MAX_SKB_FRAGS);
1069
1070         skb_shinfo(skb)->nr_frags = j + 1;
1071         skb->len = rxcp->pkt_size;
1072         skb->data_len = rxcp->pkt_size;
1073         skb->truesize += rxcp->pkt_size;
1074         skb->ip_summed = CHECKSUM_UNNECESSARY;
1075
1076         if (likely(!rxcp->vlanf))
1077                 napi_gro_frags(&eq_obj->napi);
1078         else
1079                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080 }
1081
1082 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083                                 struct be_eth_rx_compl *compl,
1084                                 struct be_rx_compl_info *rxcp)
1085 {
1086         rxcp->pkt_size =
1087                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1092         rxcp->ip_csum =
1093                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094         rxcp->l4_csum =
1095                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096         rxcp->ipv6 =
1097                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098         rxcp->rxq_idx =
1099                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100         rxcp->num_rcvd =
1101                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102         rxcp->pkt_type =
1103                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1105         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
1106 }
1107
1108 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1109                                 struct be_eth_rx_compl *compl,
1110                                 struct be_rx_compl_info *rxcp)
1111 {
1112         rxcp->pkt_size =
1113                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1114         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1115         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1116         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1117         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1118         rxcp->ip_csum =
1119                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1120         rxcp->l4_csum =
1121                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1122         rxcp->ipv6 =
1123                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1124         rxcp->rxq_idx =
1125                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1126         rxcp->num_rcvd =
1127                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1128         rxcp->pkt_type =
1129                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1130         rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1131         rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
1132 }
1133
1134 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1135 {
1136         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1137         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1138         struct be_adapter *adapter = rxo->adapter;
1139
1140         /* For checking the valid bit it is Ok to use either definition as the
1141          * valid bit is at the same position in both v0 and v1 Rx compl */
1142         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1143                 return NULL;
1144
1145         rmb();
1146         be_dws_le_to_cpu(compl, sizeof(*compl));
1147
1148         if (adapter->be3_native)
1149                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1150         else
1151                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1152
1153         /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1154         if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1155                 rxcp->vlanf = 0;
1156
1157         if (!lancer_chip(adapter))
1158                 rxcp->vid = swab16(rxcp->vid);
1159
1160         if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
1161                 rxcp->vlanf = 0;
1162
1163         /* As the compl has been parsed, reset it; we wont touch it again */
1164         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1165
1166         queue_tail_inc(&rxo->cq);
1167         return rxcp;
1168 }
1169
1170 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1171 {
1172         u32 order = get_order(size);
1173
1174         if (order > 0)
1175                 gfp |= __GFP_COMP;
1176         return  alloc_pages(gfp, order);
1177 }
1178
1179 /*
1180  * Allocate a page, split it to fragments of size rx_frag_size and post as
1181  * receive buffers to BE
1182  */
1183 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1184 {
1185         struct be_adapter *adapter = rxo->adapter;
1186         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1187         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1188         struct be_queue_info *rxq = &rxo->q;
1189         struct page *pagep = NULL;
1190         struct be_eth_rx_d *rxd;
1191         u64 page_dmaaddr = 0, frag_dmaaddr;
1192         u32 posted, page_offset = 0;
1193
1194         page_info = &rxo->page_info_tbl[rxq->head];
1195         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1196                 if (!pagep) {
1197                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1198                         if (unlikely(!pagep)) {
1199                                 rxo->stats.rx_post_fail++;
1200                                 break;
1201                         }
1202                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1203                                                     0, adapter->big_page_size,
1204                                                     DMA_FROM_DEVICE);
1205                         page_info->page_offset = 0;
1206                 } else {
1207                         get_page(pagep);
1208                         page_info->page_offset = page_offset + rx_frag_size;
1209                 }
1210                 page_offset = page_info->page_offset;
1211                 page_info->page = pagep;
1212                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1213                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1214
1215                 rxd = queue_head_node(rxq);
1216                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1217                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1218
1219                 /* Any space left in the current big page for another frag? */
1220                 if ((page_offset + rx_frag_size + rx_frag_size) >
1221                                         adapter->big_page_size) {
1222                         pagep = NULL;
1223                         page_info->last_page_user = true;
1224                 }
1225
1226                 prev_page_info = page_info;
1227                 queue_head_inc(rxq);
1228                 page_info = &page_info_tbl[rxq->head];
1229         }
1230         if (pagep)
1231                 prev_page_info->last_page_user = true;
1232
1233         if (posted) {
1234                 atomic_add(posted, &rxq->used);
1235                 be_rxq_notify(adapter, rxq->id, posted);
1236         } else if (atomic_read(&rxq->used) == 0) {
1237                 /* Let be_worker replenish when memory is available */
1238                 rxo->rx_post_starved = true;
1239         }
1240 }
1241
1242 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1243 {
1244         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1245
1246         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1247                 return NULL;
1248
1249         rmb();
1250         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1251
1252         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1253
1254         queue_tail_inc(tx_cq);
1255         return txcp;
1256 }
1257
1258 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1259 {
1260         struct be_queue_info *txq = &adapter->tx_obj.q;
1261         struct be_eth_wrb *wrb;
1262         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1263         struct sk_buff *sent_skb;
1264         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1265         bool unmap_skb_hdr = true;
1266
1267         sent_skb = sent_skbs[txq->tail];
1268         BUG_ON(!sent_skb);
1269         sent_skbs[txq->tail] = NULL;
1270
1271         /* skip header wrb */
1272         queue_tail_inc(txq);
1273
1274         do {
1275                 cur_index = txq->tail;
1276                 wrb = queue_tail_node(txq);
1277                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1278                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1279                 unmap_skb_hdr = false;
1280
1281                 num_wrbs++;
1282                 queue_tail_inc(txq);
1283         } while (cur_index != last_index);
1284
1285         atomic_sub(num_wrbs, &txq->used);
1286
1287         kfree_skb(sent_skb);
1288 }
1289
1290 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1291 {
1292         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1293
1294         if (!eqe->evt)
1295                 return NULL;
1296
1297         rmb();
1298         eqe->evt = le32_to_cpu(eqe->evt);
1299         queue_tail_inc(&eq_obj->q);
1300         return eqe;
1301 }
1302
1303 static int event_handle(struct be_adapter *adapter,
1304                         struct be_eq_obj *eq_obj)
1305 {
1306         struct be_eq_entry *eqe;
1307         u16 num = 0;
1308
1309         while ((eqe = event_get(eq_obj)) != NULL) {
1310                 eqe->evt = 0;
1311                 num++;
1312         }
1313
1314         /* Deal with any spurious interrupts that come
1315          * without events
1316          */
1317         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1318         if (num)
1319                 napi_schedule(&eq_obj->napi);
1320
1321         return num;
1322 }
1323
1324 /* Just read and notify events without processing them.
1325  * Used at the time of destroying event queues */
1326 static void be_eq_clean(struct be_adapter *adapter,
1327                         struct be_eq_obj *eq_obj)
1328 {
1329         struct be_eq_entry *eqe;
1330         u16 num = 0;
1331
1332         while ((eqe = event_get(eq_obj)) != NULL) {
1333                 eqe->evt = 0;
1334                 num++;
1335         }
1336
1337         if (num)
1338                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1339 }
1340
1341 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1342 {
1343         struct be_rx_page_info *page_info;
1344         struct be_queue_info *rxq = &rxo->q;
1345         struct be_queue_info *rx_cq = &rxo->cq;
1346         struct be_rx_compl_info *rxcp;
1347         u16 tail;
1348
1349         /* First cleanup pending rx completions */
1350         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1351                 be_rx_compl_discard(adapter, rxo, rxcp);
1352                 be_cq_notify(adapter, rx_cq->id, false, 1);
1353         }
1354
1355         /* Then free posted rx buffer that were not used */
1356         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1357         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1358                 page_info = get_rx_page_info(adapter, rxo, tail);
1359                 put_page(page_info->page);
1360                 memset(page_info, 0, sizeof(*page_info));
1361         }
1362         BUG_ON(atomic_read(&rxq->used));
1363 }
1364
1365 static void be_tx_compl_clean(struct be_adapter *adapter)
1366 {
1367         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1368         struct be_queue_info *txq = &adapter->tx_obj.q;
1369         struct be_eth_tx_compl *txcp;
1370         u16 end_idx, cmpl = 0, timeo = 0;
1371         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1372         struct sk_buff *sent_skb;
1373         bool dummy_wrb;
1374
1375         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1376         do {
1377                 while ((txcp = be_tx_compl_get(tx_cq))) {
1378                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1379                                         wrb_index, txcp);
1380                         be_tx_compl_process(adapter, end_idx);
1381                         cmpl++;
1382                 }
1383                 if (cmpl) {
1384                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1385                         cmpl = 0;
1386                 }
1387
1388                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1389                         break;
1390
1391                 mdelay(1);
1392         } while (true);
1393
1394         if (atomic_read(&txq->used))
1395                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1396                         atomic_read(&txq->used));
1397
1398         /* free posted tx for which compls will never arrive */
1399         while (atomic_read(&txq->used)) {
1400                 sent_skb = sent_skbs[txq->tail];
1401                 end_idx = txq->tail;
1402                 index_adv(&end_idx,
1403                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404                         txq->len);
1405                 be_tx_compl_process(adapter, end_idx);
1406         }
1407 }
1408
1409 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1410 {
1411         struct be_queue_info *q;
1412
1413         q = &adapter->mcc_obj.q;
1414         if (q->created)
1415                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1416         be_queue_free(adapter, q);
1417
1418         q = &adapter->mcc_obj.cq;
1419         if (q->created)
1420                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1421         be_queue_free(adapter, q);
1422 }
1423
1424 /* Must be called only after TX qs are created as MCC shares TX EQ */
1425 static int be_mcc_queues_create(struct be_adapter *adapter)
1426 {
1427         struct be_queue_info *q, *cq;
1428
1429         /* Alloc MCC compl queue */
1430         cq = &adapter->mcc_obj.cq;
1431         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1432                         sizeof(struct be_mcc_compl)))
1433                 goto err;
1434
1435         /* Ask BE to create MCC compl queue; share TX's eq */
1436         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1437                 goto mcc_cq_free;
1438
1439         /* Alloc MCC queue */
1440         q = &adapter->mcc_obj.q;
1441         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1442                 goto mcc_cq_destroy;
1443
1444         /* Ask BE to create MCC queue */
1445         if (be_cmd_mccq_create(adapter, q, cq))
1446                 goto mcc_q_free;
1447
1448         return 0;
1449
1450 mcc_q_free:
1451         be_queue_free(adapter, q);
1452 mcc_cq_destroy:
1453         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1454 mcc_cq_free:
1455         be_queue_free(adapter, cq);
1456 err:
1457         return -1;
1458 }
1459
1460 static void be_tx_queues_destroy(struct be_adapter *adapter)
1461 {
1462         struct be_queue_info *q;
1463
1464         q = &adapter->tx_obj.q;
1465         if (q->created)
1466                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1467         be_queue_free(adapter, q);
1468
1469         q = &adapter->tx_obj.cq;
1470         if (q->created)
1471                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1472         be_queue_free(adapter, q);
1473
1474         /* Clear any residual events */
1475         be_eq_clean(adapter, &adapter->tx_eq);
1476
1477         q = &adapter->tx_eq.q;
1478         if (q->created)
1479                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1480         be_queue_free(adapter, q);
1481 }
1482
1483 static int be_tx_queues_create(struct be_adapter *adapter)
1484 {
1485         struct be_queue_info *eq, *q, *cq;
1486
1487         adapter->tx_eq.max_eqd = 0;
1488         adapter->tx_eq.min_eqd = 0;
1489         adapter->tx_eq.cur_eqd = 96;
1490         adapter->tx_eq.enable_aic = false;
1491         /* Alloc Tx Event queue */
1492         eq = &adapter->tx_eq.q;
1493         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494                 return -1;
1495
1496         /* Ask BE to create Tx Event queue */
1497         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498                 goto tx_eq_free;
1499
1500         adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501
1502
1503         /* Alloc TX eth compl queue */
1504         cq = &adapter->tx_obj.cq;
1505         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1506                         sizeof(struct be_eth_tx_compl)))
1507                 goto tx_eq_destroy;
1508
1509         /* Ask BE to create Tx eth compl queue */
1510         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1511                 goto tx_cq_free;
1512
1513         /* Alloc TX eth queue */
1514         q = &adapter->tx_obj.q;
1515         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516                 goto tx_cq_destroy;
1517
1518         /* Ask BE to create Tx eth queue */
1519         if (be_cmd_txq_create(adapter, q, cq))
1520                 goto tx_q_free;
1521         return 0;
1522
1523 tx_q_free:
1524         be_queue_free(adapter, q);
1525 tx_cq_destroy:
1526         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1527 tx_cq_free:
1528         be_queue_free(adapter, cq);
1529 tx_eq_destroy:
1530         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1531 tx_eq_free:
1532         be_queue_free(adapter, eq);
1533         return -1;
1534 }
1535
1536 static void be_rx_queues_destroy(struct be_adapter *adapter)
1537 {
1538         struct be_queue_info *q;
1539         struct be_rx_obj *rxo;
1540         int i;
1541
1542         for_all_rx_queues(adapter, rxo, i) {
1543                 q = &rxo->q;
1544                 if (q->created) {
1545                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1546                         /* After the rxq is invalidated, wait for a grace time
1547                          * of 1ms for all dma to end and the flush compl to
1548                          * arrive
1549                          */
1550                         mdelay(1);
1551                         be_rx_q_clean(adapter, rxo);
1552                 }
1553                 be_queue_free(adapter, q);
1554
1555                 q = &rxo->cq;
1556                 if (q->created)
1557                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1558                 be_queue_free(adapter, q);
1559
1560                 /* Clear any residual events */
1561                 q = &rxo->rx_eq.q;
1562                 if (q->created) {
1563                         be_eq_clean(adapter, &rxo->rx_eq);
1564                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1565                 }
1566                 be_queue_free(adapter, q);
1567         }
1568 }
1569
1570 static int be_rx_queues_create(struct be_adapter *adapter)
1571 {
1572         struct be_queue_info *eq, *q, *cq;
1573         struct be_rx_obj *rxo;
1574         int rc, i;
1575
1576         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1577         for_all_rx_queues(adapter, rxo, i) {
1578                 rxo->adapter = adapter;
1579                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580                 rxo->rx_eq.enable_aic = true;
1581
1582                 /* EQ */
1583                 eq = &rxo->rx_eq.q;
1584                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585                                         sizeof(struct be_eq_entry));
1586                 if (rc)
1587                         goto err;
1588
1589                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590                 if (rc)
1591                         goto err;
1592
1593                 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
1595                 /* CQ */
1596                 cq = &rxo->cq;
1597                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598                                 sizeof(struct be_eth_rx_compl));
1599                 if (rc)
1600                         goto err;
1601
1602                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603                 if (rc)
1604                         goto err;
1605                 /* Rx Q */
1606                 q = &rxo->q;
1607                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608                                 sizeof(struct be_eth_rx_d));
1609                 if (rc)
1610                         goto err;
1611
1612                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615                 if (rc)
1616                         goto err;
1617         }
1618
1619         if (be_multi_rxq(adapter)) {
1620                 u8 rsstable[MAX_RSS_QS];
1621
1622                 for_all_rss_queues(adapter, rxo, i)
1623                         rsstable[i] = rxo->rss_id;
1624
1625                 rc = be_cmd_rss_config(adapter, rsstable,
1626                         adapter->num_rx_qs - 1);
1627                 if (rc)
1628                         goto err;
1629         }
1630
1631         return 0;
1632 err:
1633         be_rx_queues_destroy(adapter);
1634         return -1;
1635 }
1636
1637 static bool event_peek(struct be_eq_obj *eq_obj)
1638 {
1639         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640         if (!eqe->evt)
1641                 return false;
1642         else
1643                 return true;
1644 }
1645
1646 static irqreturn_t be_intx(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649         struct be_rx_obj *rxo;
1650         int isr, i, tx = 0 , rx = 0;
1651
1652         if (lancer_chip(adapter)) {
1653                 if (event_peek(&adapter->tx_eq))
1654                         tx = event_handle(adapter, &adapter->tx_eq);
1655                 for_all_rx_queues(adapter, rxo, i) {
1656                         if (event_peek(&rxo->rx_eq))
1657                                 rx |= event_handle(adapter, &rxo->rx_eq);
1658                 }
1659
1660                 if (!(tx || rx))
1661                         return IRQ_NONE;
1662
1663         } else {
1664                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666                 if (!isr)
1667                         return IRQ_NONE;
1668
1669                 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670                         event_handle(adapter, &adapter->tx_eq);
1671
1672                 for_all_rx_queues(adapter, rxo, i) {
1673                         if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674                                 event_handle(adapter, &rxo->rx_eq);
1675                 }
1676         }
1677
1678         return IRQ_HANDLED;
1679 }
1680
1681 static irqreturn_t be_msix_rx(int irq, void *dev)
1682 {
1683         struct be_rx_obj *rxo = dev;
1684         struct be_adapter *adapter = rxo->adapter;
1685
1686         event_handle(adapter, &rxo->rx_eq);
1687
1688         return IRQ_HANDLED;
1689 }
1690
1691 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1692 {
1693         struct be_adapter *adapter = dev;
1694
1695         event_handle(adapter, &adapter->tx_eq);
1696
1697         return IRQ_HANDLED;
1698 }
1699
1700 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1701 {
1702         return (rxcp->tcpf && !rxcp->err) ? true : false;
1703 }
1704
1705 static int be_poll_rx(struct napi_struct *napi, int budget)
1706 {
1707         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1708         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1709         struct be_adapter *adapter = rxo->adapter;
1710         struct be_queue_info *rx_cq = &rxo->cq;
1711         struct be_rx_compl_info *rxcp;
1712         u32 work_done;
1713
1714         rxo->stats.rx_polls++;
1715         for (work_done = 0; work_done < budget; work_done++) {
1716                 rxcp = be_rx_compl_get(rxo);
1717                 if (!rxcp)
1718                         break;
1719
1720                 /* Ignore flush completions */
1721                 if (rxcp->num_rcvd) {
1722                         if (do_gro(rxcp))
1723                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1724                         else
1725                                 be_rx_compl_process(adapter, rxo, rxcp);
1726                 }
1727                 be_rx_stats_update(rxo, rxcp);
1728         }
1729
1730         /* Refill the queue */
1731         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1732                 be_post_rx_frags(rxo, GFP_ATOMIC);
1733
1734         /* All consumed */
1735         if (work_done < budget) {
1736                 napi_complete(napi);
1737                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1738         } else {
1739                 /* More to be consumed; continue with interrupts disabled */
1740                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1741         }
1742         return work_done;
1743 }
1744
1745 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1746  * For TX/MCC we don't honour budget; consume everything
1747  */
1748 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1749 {
1750         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1751         struct be_adapter *adapter =
1752                 container_of(tx_eq, struct be_adapter, tx_eq);
1753         struct be_queue_info *txq = &adapter->tx_obj.q;
1754         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1755         struct be_eth_tx_compl *txcp;
1756         int tx_compl = 0, mcc_compl, status = 0;
1757         u16 end_idx;
1758
1759         while ((txcp = be_tx_compl_get(tx_cq))) {
1760                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1761                                 wrb_index, txcp);
1762                 be_tx_compl_process(adapter, end_idx);
1763                 tx_compl++;
1764         }
1765
1766         mcc_compl = be_process_mcc(adapter, &status);
1767
1768         napi_complete(napi);
1769
1770         if (mcc_compl) {
1771                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1772                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1773         }
1774
1775         if (tx_compl) {
1776                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1777
1778                 /* As Tx wrbs have been freed up, wake up netdev queue if
1779                  * it was stopped due to lack of tx wrbs.
1780                  */
1781                 if (netif_queue_stopped(adapter->netdev) &&
1782                         atomic_read(&txq->used) < txq->len / 2) {
1783                         netif_wake_queue(adapter->netdev);
1784                 }
1785
1786                 tx_stats(adapter)->be_tx_events++;
1787                 tx_stats(adapter)->be_tx_compl += tx_compl;
1788         }
1789
1790         return 1;
1791 }
1792
1793 void be_detect_dump_ue(struct be_adapter *adapter)
1794 {
1795         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1796         u32 i;
1797
1798         pci_read_config_dword(adapter->pdev,
1799                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1800         pci_read_config_dword(adapter->pdev,
1801                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1802         pci_read_config_dword(adapter->pdev,
1803                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1804         pci_read_config_dword(adapter->pdev,
1805                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1806
1807         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1808         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1809
1810         if (ue_status_lo || ue_status_hi) {
1811                 adapter->ue_detected = true;
1812                 adapter->eeh_err = true;
1813                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1814         }
1815
1816         if (ue_status_lo) {
1817                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1818                         if (ue_status_lo & 1)
1819                                 dev_err(&adapter->pdev->dev,
1820                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1821                 }
1822         }
1823         if (ue_status_hi) {
1824                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1825                         if (ue_status_hi & 1)
1826                                 dev_err(&adapter->pdev->dev,
1827                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1828                 }
1829         }
1830
1831 }
1832
1833 static void be_worker(struct work_struct *work)
1834 {
1835         struct be_adapter *adapter =
1836                 container_of(work, struct be_adapter, work.work);
1837         struct be_rx_obj *rxo;
1838         int i;
1839
1840         /* when interrupts are not yet enabled, just reap any pending
1841         * mcc completions */
1842         if (!netif_running(adapter->netdev)) {
1843                 int mcc_compl, status = 0;
1844
1845                 mcc_compl = be_process_mcc(adapter, &status);
1846
1847                 if (mcc_compl) {
1848                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1849                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1850                 }
1851
1852                 if (!adapter->ue_detected && !lancer_chip(adapter))
1853                         be_detect_dump_ue(adapter);
1854
1855                 goto reschedule;
1856         }
1857
1858         if (!adapter->stats_cmd_sent)
1859                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1860
1861         be_tx_rate_update(adapter);
1862
1863         for_all_rx_queues(adapter, rxo, i) {
1864                 be_rx_rate_update(rxo);
1865                 be_rx_eqd_update(adapter, rxo);
1866
1867                 if (rxo->rx_post_starved) {
1868                         rxo->rx_post_starved = false;
1869                         be_post_rx_frags(rxo, GFP_KERNEL);
1870                 }
1871         }
1872         if (!adapter->ue_detected && !lancer_chip(adapter))
1873                 be_detect_dump_ue(adapter);
1874
1875 reschedule:
1876         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1877 }
1878
1879 static void be_msix_disable(struct be_adapter *adapter)
1880 {
1881         if (adapter->msix_enabled) {
1882                 pci_disable_msix(adapter->pdev);
1883                 adapter->msix_enabled = false;
1884         }
1885 }
1886
1887 static int be_num_rxqs_get(struct be_adapter *adapter)
1888 {
1889         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1890                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1891                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1892         } else {
1893                 dev_warn(&adapter->pdev->dev,
1894                         "No support for multiple RX queues\n");
1895                 return 1;
1896         }
1897 }
1898
1899 static void be_msix_enable(struct be_adapter *adapter)
1900 {
1901 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1902         int i, status;
1903
1904         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1905
1906         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1907                 adapter->msix_entries[i].entry = i;
1908
1909         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1910                         adapter->num_rx_qs + 1);
1911         if (status == 0) {
1912                 goto done;
1913         } else if (status >= BE_MIN_MSIX_VECTORS) {
1914                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1915                                 status) == 0) {
1916                         adapter->num_rx_qs = status - 1;
1917                         dev_warn(&adapter->pdev->dev,
1918                                 "Could alloc only %d MSIx vectors. "
1919                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1920                         goto done;
1921                 }
1922         }
1923         return;
1924 done:
1925         adapter->msix_enabled = true;
1926 }
1927
1928 static void be_sriov_enable(struct be_adapter *adapter)
1929 {
1930         be_check_sriov_fn_type(adapter);
1931 #ifdef CONFIG_PCI_IOV
1932         if (be_physfn(adapter) && num_vfs) {
1933                 int status;
1934
1935                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1936                 adapter->sriov_enabled = status ? false : true;
1937         }
1938 #endif
1939 }
1940
1941 static void be_sriov_disable(struct be_adapter *adapter)
1942 {
1943 #ifdef CONFIG_PCI_IOV
1944         if (adapter->sriov_enabled) {
1945                 pci_disable_sriov(adapter->pdev);
1946                 adapter->sriov_enabled = false;
1947         }
1948 #endif
1949 }
1950
1951 static inline int be_msix_vec_get(struct be_adapter *adapter,
1952                                         struct be_eq_obj *eq_obj)
1953 {
1954         return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1955 }
1956
1957 static int be_request_irq(struct be_adapter *adapter,
1958                 struct be_eq_obj *eq_obj,
1959                 void *handler, char *desc, void *context)
1960 {
1961         struct net_device *netdev = adapter->netdev;
1962         int vec;
1963
1964         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1965         vec = be_msix_vec_get(adapter, eq_obj);
1966         return request_irq(vec, handler, 0, eq_obj->desc, context);
1967 }
1968
1969 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1970                         void *context)
1971 {
1972         int vec = be_msix_vec_get(adapter, eq_obj);
1973         free_irq(vec, context);
1974 }
1975
1976 static int be_msix_register(struct be_adapter *adapter)
1977 {
1978         struct be_rx_obj *rxo;
1979         int status, i;
1980         char qname[10];
1981
1982         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1983                                 adapter);
1984         if (status)
1985                 goto err;
1986
1987         for_all_rx_queues(adapter, rxo, i) {
1988                 sprintf(qname, "rxq%d", i);
1989                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1990                                 qname, rxo);
1991                 if (status)
1992                         goto err_msix;
1993         }
1994
1995         return 0;
1996
1997 err_msix:
1998         be_free_irq(adapter, &adapter->tx_eq, adapter);
1999
2000         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2001                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2002
2003 err:
2004         dev_warn(&adapter->pdev->dev,
2005                 "MSIX Request IRQ failed - err %d\n", status);
2006         pci_disable_msix(adapter->pdev);
2007         adapter->msix_enabled = false;
2008         return status;
2009 }
2010
2011 static int be_irq_register(struct be_adapter *adapter)
2012 {
2013         struct net_device *netdev = adapter->netdev;
2014         int status;
2015
2016         if (adapter->msix_enabled) {
2017                 status = be_msix_register(adapter);
2018                 if (status == 0)
2019                         goto done;
2020                 /* INTx is not supported for VF */
2021                 if (!be_physfn(adapter))
2022                         return status;
2023         }
2024
2025         /* INTx */
2026         netdev->irq = adapter->pdev->irq;
2027         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2028                         adapter);
2029         if (status) {
2030                 dev_err(&adapter->pdev->dev,
2031                         "INTx request IRQ failed - err %d\n", status);
2032                 return status;
2033         }
2034 done:
2035         adapter->isr_registered = true;
2036         return 0;
2037 }
2038
2039 static void be_irq_unregister(struct be_adapter *adapter)
2040 {
2041         struct net_device *netdev = adapter->netdev;
2042         struct be_rx_obj *rxo;
2043         int i;
2044
2045         if (!adapter->isr_registered)
2046                 return;
2047
2048         /* INTx */
2049         if (!adapter->msix_enabled) {
2050                 free_irq(netdev->irq, adapter);
2051                 goto done;
2052         }
2053
2054         /* MSIx */
2055         be_free_irq(adapter, &adapter->tx_eq, adapter);
2056
2057         for_all_rx_queues(adapter, rxo, i)
2058                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2059
2060 done:
2061         adapter->isr_registered = false;
2062 }
2063
2064 static int be_close(struct net_device *netdev)
2065 {
2066         struct be_adapter *adapter = netdev_priv(netdev);
2067         struct be_rx_obj *rxo;
2068         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2069         int vec, i;
2070
2071         be_async_mcc_disable(adapter);
2072
2073         netif_carrier_off(netdev);
2074         adapter->link_up = false;
2075
2076         if (!lancer_chip(adapter))
2077                 be_intr_set(adapter, false);
2078
2079         for_all_rx_queues(adapter, rxo, i)
2080                 napi_disable(&rxo->rx_eq.napi);
2081
2082         napi_disable(&tx_eq->napi);
2083
2084         if (lancer_chip(adapter)) {
2085                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2086                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2087                 for_all_rx_queues(adapter, rxo, i)
2088                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2089         }
2090
2091         if (adapter->msix_enabled) {
2092                 vec = be_msix_vec_get(adapter, tx_eq);
2093                 synchronize_irq(vec);
2094
2095                 for_all_rx_queues(adapter, rxo, i) {
2096                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2097                         synchronize_irq(vec);
2098                 }
2099         } else {
2100                 synchronize_irq(netdev->irq);
2101         }
2102         be_irq_unregister(adapter);
2103
2104         /* Wait for all pending tx completions to arrive so that
2105          * all tx skbs are freed.
2106          */
2107         be_tx_compl_clean(adapter);
2108
2109         return 0;
2110 }
2111
2112 static int be_open(struct net_device *netdev)
2113 {
2114         struct be_adapter *adapter = netdev_priv(netdev);
2115         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2116         struct be_rx_obj *rxo;
2117         bool link_up;
2118         int status, i;
2119         u8 mac_speed;
2120         u16 link_speed;
2121
2122         for_all_rx_queues(adapter, rxo, i) {
2123                 be_post_rx_frags(rxo, GFP_KERNEL);
2124                 napi_enable(&rxo->rx_eq.napi);
2125         }
2126         napi_enable(&tx_eq->napi);
2127
2128         be_irq_register(adapter);
2129
2130         if (!lancer_chip(adapter))
2131                 be_intr_set(adapter, true);
2132
2133         /* The evt queues are created in unarmed state; arm them */
2134         for_all_rx_queues(adapter, rxo, i) {
2135                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2136                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2137         }
2138         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2139
2140         /* Now that interrupts are on we can process async mcc */
2141         be_async_mcc_enable(adapter);
2142
2143         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2144                         &link_speed);
2145         if (status)
2146                 goto err;
2147         be_link_status_update(adapter, link_up);
2148
2149         if (be_physfn(adapter)) {
2150                 status = be_vid_config(adapter, false, 0);
2151                 if (status)
2152                         goto err;
2153
2154                 status = be_cmd_set_flow_control(adapter,
2155                                 adapter->tx_fc, adapter->rx_fc);
2156                 if (status)
2157                         goto err;
2158         }
2159
2160         return 0;
2161 err:
2162         be_close(adapter->netdev);
2163         return -EIO;
2164 }
2165
2166 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2167 {
2168         struct be_dma_mem cmd;
2169         int status = 0;
2170         u8 mac[ETH_ALEN];
2171
2172         memset(mac, 0, ETH_ALEN);
2173
2174         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2175         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2176                                     GFP_KERNEL);
2177         if (cmd.va == NULL)
2178                 return -1;
2179         memset(cmd.va, 0, cmd.size);
2180
2181         if (enable) {
2182                 status = pci_write_config_dword(adapter->pdev,
2183                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2184                 if (status) {
2185                         dev_err(&adapter->pdev->dev,
2186                                 "Could not enable Wake-on-lan\n");
2187                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2188                                           cmd.dma);
2189                         return status;
2190                 }
2191                 status = be_cmd_enable_magic_wol(adapter,
2192                                 adapter->netdev->dev_addr, &cmd);
2193                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2194                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2195         } else {
2196                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2197                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2198                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2199         }
2200
2201         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2202         return status;
2203 }
2204
2205 /*
2206  * Generate a seed MAC address from the PF MAC Address using jhash.
2207  * MAC Address for VFs are assigned incrementally starting from the seed.
2208  * These addresses are programmed in the ASIC by the PF and the VF driver
2209  * queries for the MAC address during its probe.
2210  */
2211 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2212 {
2213         u32 vf = 0;
2214         int status = 0;
2215         u8 mac[ETH_ALEN];
2216
2217         be_vf_eth_addr_generate(adapter, mac);
2218
2219         for (vf = 0; vf < num_vfs; vf++) {
2220                 status = be_cmd_pmac_add(adapter, mac,
2221                                         adapter->vf_cfg[vf].vf_if_handle,
2222                                         &adapter->vf_cfg[vf].vf_pmac_id,
2223                                         vf + 1);
2224                 if (status)
2225                         dev_err(&adapter->pdev->dev,
2226                                 "Mac address add failed for VF %d\n", vf);
2227                 else
2228                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2229
2230                 mac[5] += 1;
2231         }
2232         return status;
2233 }
2234
2235 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2236 {
2237         u32 vf;
2238
2239         for (vf = 0; vf < num_vfs; vf++) {
2240                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2241                         be_cmd_pmac_del(adapter,
2242                                         adapter->vf_cfg[vf].vf_if_handle,
2243                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2244         }
2245 }
2246
2247 static int be_setup(struct be_adapter *adapter)
2248 {
2249         struct net_device *netdev = adapter->netdev;
2250         u32 cap_flags, en_flags, vf = 0;
2251         int status;
2252         u8 mac[ETH_ALEN];
2253
2254         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2255                                 BE_IF_FLAGS_BROADCAST |
2256                                 BE_IF_FLAGS_MULTICAST;
2257
2258         if (be_physfn(adapter)) {
2259                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2260                                 BE_IF_FLAGS_PROMISCUOUS |
2261                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2262                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2263
2264                 if (be_multi_rxq(adapter)) {
2265                         cap_flags |= BE_IF_FLAGS_RSS;
2266                         en_flags |= BE_IF_FLAGS_RSS;
2267                 }
2268         }
2269
2270         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2271                         netdev->dev_addr, false/* pmac_invalid */,
2272                         &adapter->if_handle, &adapter->pmac_id, 0);
2273         if (status != 0)
2274                 goto do_none;
2275
2276         if (be_physfn(adapter)) {
2277                 if (adapter->sriov_enabled) {
2278                         while (vf < num_vfs) {
2279                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2280                                                         BE_IF_FLAGS_BROADCAST;
2281                                 status = be_cmd_if_create(adapter, cap_flags,
2282                                         en_flags, mac, true,
2283                                         &adapter->vf_cfg[vf].vf_if_handle,
2284                                         NULL, vf+1);
2285                                 if (status) {
2286                                         dev_err(&adapter->pdev->dev,
2287                                         "Interface Create failed for VF %d\n",
2288                                         vf);
2289                                         goto if_destroy;
2290                                 }
2291                                 adapter->vf_cfg[vf].vf_pmac_id =
2292                                                         BE_INVALID_PMAC_ID;
2293                                 vf++;
2294                         }
2295                 }
2296         } else {
2297                 status = be_cmd_mac_addr_query(adapter, mac,
2298                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2299                 if (!status) {
2300                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2301                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2302                 }
2303         }
2304
2305         status = be_tx_queues_create(adapter);
2306         if (status != 0)
2307                 goto if_destroy;
2308
2309         status = be_rx_queues_create(adapter);
2310         if (status != 0)
2311                 goto tx_qs_destroy;
2312
2313         status = be_mcc_queues_create(adapter);
2314         if (status != 0)
2315                 goto rx_qs_destroy;
2316
2317         adapter->link_speed = -1;
2318
2319         return 0;
2320
2321         be_mcc_queues_destroy(adapter);
2322 rx_qs_destroy:
2323         be_rx_queues_destroy(adapter);
2324 tx_qs_destroy:
2325         be_tx_queues_destroy(adapter);
2326 if_destroy:
2327         if (be_physfn(adapter) && adapter->sriov_enabled)
2328                 for (vf = 0; vf < num_vfs; vf++)
2329                         if (adapter->vf_cfg[vf].vf_if_handle)
2330                                 be_cmd_if_destroy(adapter,
2331                                         adapter->vf_cfg[vf].vf_if_handle,
2332                                         vf + 1);
2333         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2334 do_none:
2335         return status;
2336 }
2337
2338 static int be_clear(struct be_adapter *adapter)
2339 {
2340         int vf;
2341
2342         if (be_physfn(adapter) && adapter->sriov_enabled)
2343                 be_vf_eth_addr_rem(adapter);
2344
2345         be_mcc_queues_destroy(adapter);
2346         be_rx_queues_destroy(adapter);
2347         be_tx_queues_destroy(adapter);
2348
2349         if (be_physfn(adapter) && adapter->sriov_enabled)
2350                 for (vf = 0; vf < num_vfs; vf++)
2351                         if (adapter->vf_cfg[vf].vf_if_handle)
2352                                 be_cmd_if_destroy(adapter,
2353                                         adapter->vf_cfg[vf].vf_if_handle,
2354                                         vf + 1);
2355
2356         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2357
2358         /* tell fw we're done with firing cmds */
2359         be_cmd_fw_clean(adapter);
2360         return 0;
2361 }
2362
2363
2364 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2365 static bool be_flash_redboot(struct be_adapter *adapter,
2366                         const u8 *p, u32 img_start, int image_size,
2367                         int hdr_size)
2368 {
2369         u32 crc_offset;
2370         u8 flashed_crc[4];
2371         int status;
2372
2373         crc_offset = hdr_size + img_start + image_size - 4;
2374
2375         p += crc_offset;
2376
2377         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2378                         (image_size - 4));
2379         if (status) {
2380                 dev_err(&adapter->pdev->dev,
2381                 "could not get crc from flash, not flashing redboot\n");
2382                 return false;
2383         }
2384
2385         /*update redboot only if crc does not match*/
2386         if (!memcmp(flashed_crc, p, 4))
2387                 return false;
2388         else
2389                 return true;
2390 }
2391
2392 static int be_flash_data(struct be_adapter *adapter,
2393                         const struct firmware *fw,
2394                         struct be_dma_mem *flash_cmd, int num_of_images)
2395
2396 {
2397         int status = 0, i, filehdr_size = 0;
2398         u32 total_bytes = 0, flash_op;
2399         int num_bytes;
2400         const u8 *p = fw->data;
2401         struct be_cmd_write_flashrom *req = flash_cmd->va;
2402         const struct flash_comp *pflashcomp;
2403         int num_comp;
2404
2405         static const struct flash_comp gen3_flash_types[9] = {
2406                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2407                         FLASH_IMAGE_MAX_SIZE_g3},
2408                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2409                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2410                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2411                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2412                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2413                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2414                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2415                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2416                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2417                         FLASH_IMAGE_MAX_SIZE_g3},
2418                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2419                         FLASH_IMAGE_MAX_SIZE_g3},
2420                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2421                         FLASH_IMAGE_MAX_SIZE_g3},
2422                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2423                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2424         };
2425         static const struct flash_comp gen2_flash_types[8] = {
2426                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2427                         FLASH_IMAGE_MAX_SIZE_g2},
2428                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2429                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2430                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2431                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2432                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2433                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2434                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2435                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2436                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2437                         FLASH_IMAGE_MAX_SIZE_g2},
2438                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2439                         FLASH_IMAGE_MAX_SIZE_g2},
2440                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2441                          FLASH_IMAGE_MAX_SIZE_g2}
2442         };
2443
2444         if (adapter->generation == BE_GEN3) {
2445                 pflashcomp = gen3_flash_types;
2446                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2447                 num_comp = ARRAY_SIZE(gen3_flash_types);
2448         } else {
2449                 pflashcomp = gen2_flash_types;
2450                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2451                 num_comp = ARRAY_SIZE(gen2_flash_types);
2452         }
2453         for (i = 0; i < num_comp; i++) {
2454                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2455                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2456                         continue;
2457                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2458                         (!be_flash_redboot(adapter, fw->data,
2459                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2460                         (num_of_images * sizeof(struct image_hdr)))))
2461                         continue;
2462                 p = fw->data;
2463                 p += filehdr_size + pflashcomp[i].offset
2464                         + (num_of_images * sizeof(struct image_hdr));
2465         if (p + pflashcomp[i].size > fw->data + fw->size)
2466                 return -1;
2467         total_bytes = pflashcomp[i].size;
2468                 while (total_bytes) {
2469                         if (total_bytes > 32*1024)
2470                                 num_bytes = 32*1024;
2471                         else
2472                                 num_bytes = total_bytes;
2473                         total_bytes -= num_bytes;
2474
2475                         if (!total_bytes)
2476                                 flash_op = FLASHROM_OPER_FLASH;
2477                         else
2478                                 flash_op = FLASHROM_OPER_SAVE;
2479                         memcpy(req->params.data_buf, p, num_bytes);
2480                         p += num_bytes;
2481                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2482                                 pflashcomp[i].optype, flash_op, num_bytes);
2483                         if (status) {
2484                                 dev_err(&adapter->pdev->dev,
2485                                         "cmd to write to flash rom failed.\n");
2486                                 return -1;
2487                         }
2488                         yield();
2489                 }
2490         }
2491         return 0;
2492 }
2493
2494 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2495 {
2496         if (fhdr == NULL)
2497                 return 0;
2498         if (fhdr->build[0] == '3')
2499                 return BE_GEN3;
2500         else if (fhdr->build[0] == '2')
2501                 return BE_GEN2;
2502         else
2503                 return 0;
2504 }
2505
2506 int be_load_fw(struct be_adapter *adapter, u8 *func)
2507 {
2508         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2509         const struct firmware *fw;
2510         struct flash_file_hdr_g2 *fhdr;
2511         struct flash_file_hdr_g3 *fhdr3;
2512         struct image_hdr *img_hdr_ptr = NULL;
2513         struct be_dma_mem flash_cmd;
2514         int status, i = 0, num_imgs = 0;
2515         const u8 *p;
2516
2517         if (!netif_running(adapter->netdev)) {
2518                 dev_err(&adapter->pdev->dev,
2519                         "Firmware load not allowed (interface is down)\n");
2520                 return -EPERM;
2521         }
2522
2523         strcpy(fw_file, func);
2524
2525         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2526         if (status)
2527                 goto fw_exit;
2528
2529         p = fw->data;
2530         fhdr = (struct flash_file_hdr_g2 *) p;
2531         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2532
2533         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2534         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2535                                           &flash_cmd.dma, GFP_KERNEL);
2536         if (!flash_cmd.va) {
2537                 status = -ENOMEM;
2538                 dev_err(&adapter->pdev->dev,
2539                         "Memory allocation failure while flashing\n");
2540                 goto fw_exit;
2541         }
2542
2543         if ((adapter->generation == BE_GEN3) &&
2544                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2545                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2546                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2547                 for (i = 0; i < num_imgs; i++) {
2548                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2549                                         (sizeof(struct flash_file_hdr_g3) +
2550                                          i * sizeof(struct image_hdr)));
2551                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2552                                 status = be_flash_data(adapter, fw, &flash_cmd,
2553                                                         num_imgs);
2554                 }
2555         } else if ((adapter->generation == BE_GEN2) &&
2556                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2557                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2558         } else {
2559                 dev_err(&adapter->pdev->dev,
2560                         "UFI and Interface are not compatible for flashing\n");
2561                 status = -1;
2562         }
2563
2564         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2565                           flash_cmd.dma);
2566         if (status) {
2567                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2568                 goto fw_exit;
2569         }
2570
2571         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2572
2573 fw_exit:
2574         release_firmware(fw);
2575         return status;
2576 }
2577
2578 static struct net_device_ops be_netdev_ops = {
2579         .ndo_open               = be_open,
2580         .ndo_stop               = be_close,
2581         .ndo_start_xmit         = be_xmit,
2582         .ndo_set_rx_mode        = be_set_multicast_list,
2583         .ndo_set_mac_address    = be_mac_addr_set,
2584         .ndo_change_mtu         = be_change_mtu,
2585         .ndo_validate_addr      = eth_validate_addr,
2586         .ndo_vlan_rx_register   = be_vlan_register,
2587         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2588         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2589         .ndo_set_vf_mac         = be_set_vf_mac,
2590         .ndo_set_vf_vlan        = be_set_vf_vlan,
2591         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2592         .ndo_get_vf_config      = be_get_vf_config
2593 };
2594
2595 static void be_netdev_init(struct net_device *netdev)
2596 {
2597         struct be_adapter *adapter = netdev_priv(netdev);
2598         struct be_rx_obj *rxo;
2599         int i;
2600
2601         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2602                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2603                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2604                 NETIF_F_GRO | NETIF_F_TSO6;
2605
2606         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2607                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2608
2609         if (lancer_chip(adapter))
2610                 netdev->vlan_features |= NETIF_F_TSO6;
2611
2612         netdev->flags |= IFF_MULTICAST;
2613
2614         adapter->rx_csum = true;
2615
2616         /* Default settings for Rx and Tx flow control */
2617         adapter->rx_fc = true;
2618         adapter->tx_fc = true;
2619
2620         netif_set_gso_max_size(netdev, 65535);
2621
2622         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2623
2624         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2625
2626         for_all_rx_queues(adapter, rxo, i)
2627                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2628                                 BE_NAPI_WEIGHT);
2629
2630         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2631                 BE_NAPI_WEIGHT);
2632 }
2633
2634 static void be_unmap_pci_bars(struct be_adapter *adapter)
2635 {
2636         if (adapter->csr)
2637                 iounmap(adapter->csr);
2638         if (adapter->db)
2639                 iounmap(adapter->db);
2640         if (adapter->pcicfg && be_physfn(adapter))
2641                 iounmap(adapter->pcicfg);
2642 }
2643
2644 static int be_map_pci_bars(struct be_adapter *adapter)
2645 {
2646         u8 __iomem *addr;
2647         int pcicfg_reg, db_reg;
2648
2649         if (lancer_chip(adapter)) {
2650                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2651                         pci_resource_len(adapter->pdev, 0));
2652                 if (addr == NULL)
2653                         return -ENOMEM;
2654                 adapter->db = addr;
2655                 return 0;
2656         }
2657
2658         if (be_physfn(adapter)) {
2659                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2660                                 pci_resource_len(adapter->pdev, 2));
2661                 if (addr == NULL)
2662                         return -ENOMEM;
2663                 adapter->csr = addr;
2664         }
2665
2666         if (adapter->generation == BE_GEN2) {
2667                 pcicfg_reg = 1;
2668                 db_reg = 4;
2669         } else {
2670                 pcicfg_reg = 0;
2671                 if (be_physfn(adapter))
2672                         db_reg = 4;
2673                 else
2674                         db_reg = 0;
2675         }
2676         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2677                                 pci_resource_len(adapter->pdev, db_reg));
2678         if (addr == NULL)
2679                 goto pci_map_err;
2680         adapter->db = addr;
2681
2682         if (be_physfn(adapter)) {
2683                 addr = ioremap_nocache(
2684                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2685                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2686                 if (addr == NULL)
2687                         goto pci_map_err;
2688                 adapter->pcicfg = addr;
2689         } else
2690                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2691
2692         return 0;
2693 pci_map_err:
2694         be_unmap_pci_bars(adapter);
2695         return -ENOMEM;
2696 }
2697
2698
2699 static void be_ctrl_cleanup(struct be_adapter *adapter)
2700 {
2701         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2702
2703         be_unmap_pci_bars(adapter);
2704
2705         if (mem->va)
2706                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2707                                   mem->dma);
2708
2709         mem = &adapter->mc_cmd_mem;
2710         if (mem->va)
2711                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2712                                   mem->dma);
2713 }
2714
2715 static int be_ctrl_init(struct be_adapter *adapter)
2716 {
2717         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2718         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2719         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2720         int status;
2721
2722         status = be_map_pci_bars(adapter);
2723         if (status)
2724                 goto done;
2725
2726         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2727         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2728                                                 mbox_mem_alloc->size,
2729                                                 &mbox_mem_alloc->dma,
2730                                                 GFP_KERNEL);
2731         if (!mbox_mem_alloc->va) {
2732                 status = -ENOMEM;
2733                 goto unmap_pci_bars;
2734         }
2735
2736         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2737         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2738         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2739         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2740
2741         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2742         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2743                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2744                                             GFP_KERNEL);
2745         if (mc_cmd_mem->va == NULL) {
2746                 status = -ENOMEM;
2747                 goto free_mbox;
2748         }
2749         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2750
2751         mutex_init(&adapter->mbox_lock);
2752         spin_lock_init(&adapter->mcc_lock);
2753         spin_lock_init(&adapter->mcc_cq_lock);
2754
2755         init_completion(&adapter->flash_compl);
2756         pci_save_state(adapter->pdev);
2757         return 0;
2758
2759 free_mbox:
2760         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2761                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2762
2763 unmap_pci_bars:
2764         be_unmap_pci_bars(adapter);
2765
2766 done:
2767         return status;
2768 }
2769
2770 static void be_stats_cleanup(struct be_adapter *adapter)
2771 {
2772         struct be_dma_mem *cmd = &adapter->stats_cmd;
2773
2774         if (cmd->va)
2775                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2776                                   cmd->va, cmd->dma);
2777 }
2778
2779 static int be_stats_init(struct be_adapter *adapter)
2780 {
2781         struct be_dma_mem *cmd = &adapter->stats_cmd;
2782
2783         cmd->size = sizeof(struct be_cmd_req_get_stats);
2784         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2785                                      GFP_KERNEL);
2786         if (cmd->va == NULL)
2787                 return -1;
2788         memset(cmd->va, 0, cmd->size);
2789         return 0;
2790 }
2791
2792 static void __devexit be_remove(struct pci_dev *pdev)
2793 {
2794         struct be_adapter *adapter = pci_get_drvdata(pdev);
2795
2796         if (!adapter)
2797                 return;
2798
2799         cancel_delayed_work_sync(&adapter->work);
2800
2801         unregister_netdev(adapter->netdev);
2802
2803         be_clear(adapter);
2804
2805         be_stats_cleanup(adapter);
2806
2807         be_ctrl_cleanup(adapter);
2808
2809         be_sriov_disable(adapter);
2810
2811         be_msix_disable(adapter);
2812
2813         pci_set_drvdata(pdev, NULL);
2814         pci_release_regions(pdev);
2815         pci_disable_device(pdev);
2816
2817         free_netdev(adapter->netdev);
2818 }
2819
2820 static int be_get_config(struct be_adapter *adapter)
2821 {
2822         int status;
2823         u8 mac[ETH_ALEN];
2824
2825         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2826         if (status)
2827                 return status;
2828
2829         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2830                         &adapter->function_mode, &adapter->function_caps);
2831         if (status)
2832                 return status;
2833
2834         memset(mac, 0, ETH_ALEN);
2835
2836         if (be_physfn(adapter)) {
2837                 status = be_cmd_mac_addr_query(adapter, mac,
2838                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2839
2840                 if (status)
2841                         return status;
2842
2843                 if (!is_valid_ether_addr(mac))
2844                         return -EADDRNOTAVAIL;
2845
2846                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2847                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2848         }
2849
2850         if (adapter->function_mode & 0x400)
2851                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2852         else
2853                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2854
2855         status = be_cmd_get_cntl_attributes(adapter);
2856         if (status)
2857                 return status;
2858
2859         be_cmd_check_native_mode(adapter);
2860         return 0;
2861 }
2862
2863 static int be_dev_family_check(struct be_adapter *adapter)
2864 {
2865         struct pci_dev *pdev = adapter->pdev;
2866         u32 sli_intf = 0, if_type;
2867
2868         switch (pdev->device) {
2869         case BE_DEVICE_ID1:
2870         case OC_DEVICE_ID1:
2871                 adapter->generation = BE_GEN2;
2872                 break;
2873         case BE_DEVICE_ID2:
2874         case OC_DEVICE_ID2:
2875                 adapter->generation = BE_GEN3;
2876                 break;
2877         case OC_DEVICE_ID3:
2878                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2879                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2880                                                 SLI_INTF_IF_TYPE_SHIFT;
2881
2882                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2883                         if_type != 0x02) {
2884                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2885                         return -EINVAL;
2886                 }
2887                 if (num_vfs > 0) {
2888                         dev_err(&pdev->dev, "VFs not supported\n");
2889                         return -EINVAL;
2890                 }
2891                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2892                                          SLI_INTF_FAMILY_SHIFT);
2893                 adapter->generation = BE_GEN3;
2894                 break;
2895         default:
2896                 adapter->generation = 0;
2897         }
2898         return 0;
2899 }
2900
2901 static int lancer_wait_ready(struct be_adapter *adapter)
2902 {
2903 #define SLIPORT_READY_TIMEOUT 500
2904         u32 sliport_status;
2905         int status = 0, i;
2906
2907         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2908                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2909                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2910                         break;
2911
2912                 msleep(20);
2913         }
2914
2915         if (i == SLIPORT_READY_TIMEOUT)
2916                 status = -1;
2917
2918         return status;
2919 }
2920
2921 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2922 {
2923         int status;
2924         u32 sliport_status, err, reset_needed;
2925         status = lancer_wait_ready(adapter);
2926         if (!status) {
2927                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2928                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2929                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2930                 if (err && reset_needed) {
2931                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2932                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2933
2934                         /* check adapter has corrected the error */
2935                         status = lancer_wait_ready(adapter);
2936                         sliport_status = ioread32(adapter->db +
2937                                                         SLIPORT_STATUS_OFFSET);
2938                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2939                                                 SLIPORT_STATUS_RN_MASK);
2940                         if (status || sliport_status)
2941                                 status = -1;
2942                 } else if (err || reset_needed) {
2943                         status = -1;
2944                 }
2945         }
2946         return status;
2947 }
2948
2949 static int __devinit be_probe(struct pci_dev *pdev,
2950                         const struct pci_device_id *pdev_id)
2951 {
2952         int status = 0;
2953         struct be_adapter *adapter;
2954         struct net_device *netdev;
2955
2956         status = pci_enable_device(pdev);
2957         if (status)
2958                 goto do_none;
2959
2960         status = pci_request_regions(pdev, DRV_NAME);
2961         if (status)
2962                 goto disable_dev;
2963         pci_set_master(pdev);
2964
2965         netdev = alloc_etherdev(sizeof(struct be_adapter));
2966         if (netdev == NULL) {
2967                 status = -ENOMEM;
2968                 goto rel_reg;
2969         }
2970         adapter = netdev_priv(netdev);
2971         adapter->pdev = pdev;
2972         pci_set_drvdata(pdev, adapter);
2973
2974         status = be_dev_family_check(adapter);
2975         if (status)
2976                 goto free_netdev;
2977
2978         adapter->netdev = netdev;
2979         SET_NETDEV_DEV(netdev, &pdev->dev);
2980
2981         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2982         if (!status) {
2983                 netdev->features |= NETIF_F_HIGHDMA;
2984         } else {
2985                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2986                 if (status) {
2987                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2988                         goto free_netdev;
2989                 }
2990         }
2991
2992         be_sriov_enable(adapter);
2993
2994         status = be_ctrl_init(adapter);
2995         if (status)
2996                 goto free_netdev;
2997
2998         if (lancer_chip(adapter)) {
2999                 status = lancer_test_and_set_rdy_state(adapter);
3000                 if (status) {
3001                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3002                         goto free_netdev;
3003                 }
3004         }
3005
3006         /* sync up with fw's ready state */
3007         if (be_physfn(adapter)) {
3008                 status = be_cmd_POST(adapter);
3009                 if (status)
3010                         goto ctrl_clean;
3011         }
3012
3013         /* tell fw we're ready to fire cmds */
3014         status = be_cmd_fw_init(adapter);
3015         if (status)
3016                 goto ctrl_clean;
3017
3018         status = be_cmd_reset_function(adapter);
3019         if (status)
3020                 goto ctrl_clean;
3021
3022         status = be_stats_init(adapter);
3023         if (status)
3024                 goto ctrl_clean;
3025
3026         status = be_get_config(adapter);
3027         if (status)
3028                 goto stats_clean;
3029
3030         be_msix_enable(adapter);
3031
3032         INIT_DELAYED_WORK(&adapter->work, be_worker);
3033
3034         status = be_setup(adapter);
3035         if (status)
3036                 goto msix_disable;
3037
3038         be_netdev_init(netdev);
3039         status = register_netdev(netdev);
3040         if (status != 0)
3041                 goto unsetup;
3042         netif_carrier_off(netdev);
3043
3044         if (be_physfn(adapter) && adapter->sriov_enabled) {
3045                 status = be_vf_eth_addr_config(adapter);
3046                 if (status)
3047                         goto unreg_netdev;
3048         }
3049
3050         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3051         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3052         return 0;
3053
3054 unreg_netdev:
3055         unregister_netdev(netdev);
3056 unsetup:
3057         be_clear(adapter);
3058 msix_disable:
3059         be_msix_disable(adapter);
3060 stats_clean:
3061         be_stats_cleanup(adapter);
3062 ctrl_clean:
3063         be_ctrl_cleanup(adapter);
3064 free_netdev:
3065         be_sriov_disable(adapter);
3066         free_netdev(netdev);
3067         pci_set_drvdata(pdev, NULL);
3068 rel_reg:
3069         pci_release_regions(pdev);
3070 disable_dev:
3071         pci_disable_device(pdev);
3072 do_none:
3073         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3074         return status;
3075 }
3076
3077 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3078 {
3079         struct be_adapter *adapter = pci_get_drvdata(pdev);
3080         struct net_device *netdev =  adapter->netdev;
3081
3082         cancel_delayed_work_sync(&adapter->work);
3083         if (adapter->wol)
3084                 be_setup_wol(adapter, true);
3085
3086         netif_device_detach(netdev);
3087         if (netif_running(netdev)) {
3088                 rtnl_lock();
3089                 be_close(netdev);
3090                 rtnl_unlock();
3091         }
3092         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3093         be_clear(adapter);
3094
3095         be_msix_disable(adapter);
3096         pci_save_state(pdev);
3097         pci_disable_device(pdev);
3098         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3099         return 0;
3100 }
3101
3102 static int be_resume(struct pci_dev *pdev)
3103 {
3104         int status = 0;
3105         struct be_adapter *adapter = pci_get_drvdata(pdev);
3106         struct net_device *netdev =  adapter->netdev;
3107
3108         netif_device_detach(netdev);
3109
3110         status = pci_enable_device(pdev);
3111         if (status)
3112                 return status;
3113
3114         pci_set_power_state(pdev, 0);
3115         pci_restore_state(pdev);
3116
3117         be_msix_enable(adapter);
3118         /* tell fw we're ready to fire cmds */
3119         status = be_cmd_fw_init(adapter);
3120         if (status)
3121                 return status;
3122
3123         be_setup(adapter);
3124         if (netif_running(netdev)) {
3125                 rtnl_lock();
3126                 be_open(netdev);
3127                 rtnl_unlock();
3128         }
3129         netif_device_attach(netdev);
3130
3131         if (adapter->wol)
3132                 be_setup_wol(adapter, false);
3133
3134         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3135         return 0;
3136 }
3137
3138 /*
3139  * An FLR will stop BE from DMAing any data.
3140  */
3141 static void be_shutdown(struct pci_dev *pdev)
3142 {
3143         struct be_adapter *adapter = pci_get_drvdata(pdev);
3144         struct net_device *netdev =  adapter->netdev;
3145
3146         if (netif_running(netdev))
3147                 cancel_delayed_work_sync(&adapter->work);
3148
3149         netif_device_detach(netdev);
3150
3151         be_cmd_reset_function(adapter);
3152
3153         if (adapter->wol)
3154                 be_setup_wol(adapter, true);
3155
3156         pci_disable_device(pdev);
3157 }
3158
3159 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3160                                 pci_channel_state_t state)
3161 {
3162         struct be_adapter *adapter = pci_get_drvdata(pdev);
3163         struct net_device *netdev =  adapter->netdev;
3164
3165         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3166
3167         adapter->eeh_err = true;
3168
3169         netif_device_detach(netdev);
3170
3171         if (netif_running(netdev)) {
3172                 rtnl_lock();
3173                 be_close(netdev);
3174                 rtnl_unlock();
3175         }
3176         be_clear(adapter);
3177
3178         if (state == pci_channel_io_perm_failure)
3179                 return PCI_ERS_RESULT_DISCONNECT;
3180
3181         pci_disable_device(pdev);
3182
3183         return PCI_ERS_RESULT_NEED_RESET;
3184 }
3185
3186 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3187 {
3188         struct be_adapter *adapter = pci_get_drvdata(pdev);
3189         int status;
3190
3191         dev_info(&adapter->pdev->dev, "EEH reset\n");
3192         adapter->eeh_err = false;
3193
3194         status = pci_enable_device(pdev);
3195         if (status)
3196                 return PCI_ERS_RESULT_DISCONNECT;
3197
3198         pci_set_master(pdev);
3199         pci_set_power_state(pdev, 0);
3200         pci_restore_state(pdev);
3201
3202         /* Check if card is ok and fw is ready */
3203         status = be_cmd_POST(adapter);
3204         if (status)
3205                 return PCI_ERS_RESULT_DISCONNECT;
3206
3207         return PCI_ERS_RESULT_RECOVERED;
3208 }
3209
3210 static void be_eeh_resume(struct pci_dev *pdev)
3211 {
3212         int status = 0;
3213         struct be_adapter *adapter = pci_get_drvdata(pdev);
3214         struct net_device *netdev =  adapter->netdev;
3215
3216         dev_info(&adapter->pdev->dev, "EEH resume\n");
3217
3218         pci_save_state(pdev);
3219
3220         /* tell fw we're ready to fire cmds */
3221         status = be_cmd_fw_init(adapter);
3222         if (status)
3223                 goto err;
3224
3225         status = be_setup(adapter);
3226         if (status)
3227                 goto err;
3228
3229         if (netif_running(netdev)) {
3230                 status = be_open(netdev);
3231                 if (status)
3232                         goto err;
3233         }
3234         netif_device_attach(netdev);
3235         return;
3236 err:
3237         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3238 }
3239
3240 static struct pci_error_handlers be_eeh_handlers = {
3241         .error_detected = be_eeh_err_detected,
3242         .slot_reset = be_eeh_reset,
3243         .resume = be_eeh_resume,
3244 };
3245
3246 static struct pci_driver be_driver = {
3247         .name = DRV_NAME,
3248         .id_table = be_dev_ids,
3249         .probe = be_probe,
3250         .remove = be_remove,
3251         .suspend = be_suspend,
3252         .resume = be_resume,
3253         .shutdown = be_shutdown,
3254         .err_handler = &be_eeh_handlers
3255 };
3256
3257 static int __init be_init_module(void)
3258 {
3259         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3260             rx_frag_size != 2048) {
3261                 printk(KERN_WARNING DRV_NAME
3262                         " : Module param rx_frag_size must be 2048/4096/8192."
3263                         " Using 2048\n");
3264                 rx_frag_size = 2048;
3265         }
3266
3267         if (num_vfs > 32) {
3268                 printk(KERN_WARNING DRV_NAME
3269                         " : Module param num_vfs must not be greater than 32."
3270                         "Using 32\n");
3271                 num_vfs = 32;
3272         }
3273
3274         return pci_register_driver(&be_driver);
3275 }
3276 module_init(be_init_module);
3277
3278 static void __exit be_exit_module(void)
3279 {
3280         pci_unregister_driver(&be_driver);
3281 }
3282 module_exit(be_exit_module);