f3e97ab3321d5636067a507b83be2f2d778eef2f
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 module_param(rx_frag_size, uint, S_IRUGO);
30 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
35         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
36         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
37         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
38         { 0 }
39 };
40 MODULE_DEVICE_TABLE(pci, be_dev_ids);
41
42 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
43 {
44         struct be_dma_mem *mem = &q->dma_mem;
45         if (mem->va)
46                 pci_free_consistent(adapter->pdev, mem->size,
47                         mem->va, mem->dma);
48 }
49
50 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
51                 u16 len, u16 entry_size)
52 {
53         struct be_dma_mem *mem = &q->dma_mem;
54
55         memset(q, 0, sizeof(*q));
56         q->len = len;
57         q->entry_size = entry_size;
58         mem->size = len * entry_size;
59         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
60         if (!mem->va)
61                 return -1;
62         memset(mem->va, 0, mem->size);
63         return 0;
64 }
65
66 static void be_intr_set(struct be_adapter *adapter, bool enable)
67 {
68         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
69         u32 reg = ioread32(addr);
70         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
71
72         if (!enabled && enable)
73                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
74         else if (enabled && !enable)
75                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
76         else
77                 return;
78
79         iowrite32(reg, addr);
80 }
81
82 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
83 {
84         u32 val = 0;
85         val |= qid & DB_RQ_RING_ID_MASK;
86         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
87         iowrite32(val, adapter->db + DB_RQ_OFFSET);
88 }
89
90 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
91 {
92         u32 val = 0;
93         val |= qid & DB_TXULP_RING_ID_MASK;
94         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
95         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
96 }
97
98 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
99                 bool arm, bool clear_int, u16 num_popped)
100 {
101         u32 val = 0;
102         val |= qid & DB_EQ_RING_ID_MASK;
103         if (arm)
104                 val |= 1 << DB_EQ_REARM_SHIFT;
105         if (clear_int)
106                 val |= 1 << DB_EQ_CLR_SHIFT;
107         val |= 1 << DB_EQ_EVNT_SHIFT;
108         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
109         iowrite32(val, adapter->db + DB_EQ_OFFSET);
110 }
111
112 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
113 {
114         u32 val = 0;
115         val |= qid & DB_CQ_RING_ID_MASK;
116         if (arm)
117                 val |= 1 << DB_CQ_REARM_SHIFT;
118         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
119         iowrite32(val, adapter->db + DB_CQ_OFFSET);
120 }
121
122 static int be_mac_addr_set(struct net_device *netdev, void *p)
123 {
124         struct be_adapter *adapter = netdev_priv(netdev);
125         struct sockaddr *addr = p;
126         int status = 0;
127
128         if (!is_valid_ether_addr(addr->sa_data))
129                 return -EADDRNOTAVAIL;
130
131         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
132         if (status)
133                 return status;
134
135         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
136                         adapter->if_handle, &adapter->pmac_id);
137         if (!status)
138                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
139
140         return status;
141 }
142
143 void netdev_stats_update(struct be_adapter *adapter)
144 {
145         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
146         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
147         struct be_port_rxf_stats *port_stats =
148                         &rxf_stats->port[adapter->port_num];
149         struct net_device_stats *dev_stats = &adapter->netdev->stats;
150         struct be_erx_stats *erx_stats = &hw_stats->erx;
151
152         dev_stats->rx_packets = port_stats->rx_total_frames;
153         dev_stats->tx_packets = port_stats->tx_unicastframes +
154                 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
155         dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
156                                 (u64) port_stats->rx_bytes_lsd;
157         dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
158                                 (u64) port_stats->tx_bytes_lsd;
159
160         /* bad pkts received */
161         dev_stats->rx_errors = port_stats->rx_crc_errors +
162                 port_stats->rx_alignment_symbol_errors +
163                 port_stats->rx_in_range_errors +
164                 port_stats->rx_out_range_errors +
165                 port_stats->rx_frame_too_long +
166                 port_stats->rx_dropped_too_small +
167                 port_stats->rx_dropped_too_short +
168                 port_stats->rx_dropped_header_too_small +
169                 port_stats->rx_dropped_tcp_length +
170                 port_stats->rx_dropped_runt +
171                 port_stats->rx_tcp_checksum_errs +
172                 port_stats->rx_ip_checksum_errs +
173                 port_stats->rx_udp_checksum_errs;
174
175         /*  no space in linux buffers: best possible approximation */
176         dev_stats->rx_dropped =
177                 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
178
179         /* detailed rx errors */
180         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
181                 port_stats->rx_out_range_errors +
182                 port_stats->rx_frame_too_long;
183
184         /* receive ring buffer overflow */
185         dev_stats->rx_over_errors = 0;
186
187         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
188
189         /* frame alignment errors */
190         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
191
192         /* receiver fifo overrun */
193         /* drops_no_pbuf is no per i/f, it's per BE card */
194         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
195                                         port_stats->rx_input_fifo_overflow +
196                                         rxf_stats->rx_drops_no_pbuf;
197         /* receiver missed packetd */
198         dev_stats->rx_missed_errors = 0;
199
200         /*  packet transmit problems */
201         dev_stats->tx_errors = 0;
202
203         /* no space available in linux */
204         dev_stats->tx_dropped = 0;
205
206         dev_stats->multicast = port_stats->rx_multicast_frames;
207         dev_stats->collisions = 0;
208
209         /* detailed tx_errors */
210         dev_stats->tx_aborted_errors = 0;
211         dev_stats->tx_carrier_errors = 0;
212         dev_stats->tx_fifo_errors = 0;
213         dev_stats->tx_heartbeat_errors = 0;
214         dev_stats->tx_window_errors = 0;
215 }
216
217 void be_link_status_update(struct be_adapter *adapter, bool link_up)
218 {
219         struct net_device *netdev = adapter->netdev;
220
221         /* If link came up or went down */
222         if (adapter->link_up != link_up) {
223                 if (link_up) {
224                         netif_start_queue(netdev);
225                         netif_carrier_on(netdev);
226                         printk(KERN_INFO "%s: Link up\n", netdev->name);
227                 } else {
228                         netif_stop_queue(netdev);
229                         netif_carrier_off(netdev);
230                         printk(KERN_INFO "%s: Link down\n", netdev->name);
231                 }
232                 adapter->link_up = link_up;
233         }
234 }
235
236 /* Update the EQ delay n BE based on the RX frags consumed / sec */
237 static void be_rx_eqd_update(struct be_adapter *adapter)
238 {
239         struct be_eq_obj *rx_eq = &adapter->rx_eq;
240         struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
241         ulong now = jiffies;
242         u32 eqd;
243
244         if (!rx_eq->enable_aic)
245                 return;
246
247         /* Wrapped around */
248         if (time_before(now, stats->rx_fps_jiffies)) {
249                 stats->rx_fps_jiffies = now;
250                 return;
251         }
252
253         /* Update once a second */
254         if ((now - stats->rx_fps_jiffies) < HZ)
255                 return;
256
257         stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
258                         ((now - stats->rx_fps_jiffies) / HZ);
259
260         stats->rx_fps_jiffies = now;
261         stats->be_prev_rx_frags = stats->be_rx_frags;
262         eqd = stats->be_rx_fps / 110000;
263         eqd = eqd << 3;
264         if (eqd > rx_eq->max_eqd)
265                 eqd = rx_eq->max_eqd;
266         if (eqd < rx_eq->min_eqd)
267                 eqd = rx_eq->min_eqd;
268         if (eqd < 10)
269                 eqd = 0;
270         if (eqd != rx_eq->cur_eqd)
271                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
272
273         rx_eq->cur_eqd = eqd;
274 }
275
276 static struct net_device_stats *be_get_stats(struct net_device *dev)
277 {
278         return &dev->stats;
279 }
280
281 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
282 {
283         u64 rate = bytes;
284
285         do_div(rate, ticks / HZ);
286         rate <<= 3;                     /* bytes/sec -> bits/sec */
287         do_div(rate, 1000000ul);        /* MB/Sec */
288
289         return rate;
290 }
291
292 static void be_tx_rate_update(struct be_adapter *adapter)
293 {
294         struct be_drvr_stats *stats = drvr_stats(adapter);
295         ulong now = jiffies;
296
297         /* Wrapped around? */
298         if (time_before(now, stats->be_tx_jiffies)) {
299                 stats->be_tx_jiffies = now;
300                 return;
301         }
302
303         /* Update tx rate once in two seconds */
304         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
305                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
306                                                   - stats->be_tx_bytes_prev,
307                                                  now - stats->be_tx_jiffies);
308                 stats->be_tx_jiffies = now;
309                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
310         }
311 }
312
313 static void be_tx_stats_update(struct be_adapter *adapter,
314                         u32 wrb_cnt, u32 copied, bool stopped)
315 {
316         struct be_drvr_stats *stats = drvr_stats(adapter);
317         stats->be_tx_reqs++;
318         stats->be_tx_wrbs += wrb_cnt;
319         stats->be_tx_bytes += copied;
320         if (stopped)
321                 stats->be_tx_stops++;
322 }
323
324 /* Determine number of WRB entries needed to xmit data in an skb */
325 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
326 {
327         int cnt = (skb->len > skb->data_len);
328
329         cnt += skb_shinfo(skb)->nr_frags;
330
331         /* to account for hdr wrb */
332         cnt++;
333         if (cnt & 1) {
334                 /* add a dummy to make it an even num */
335                 cnt++;
336                 *dummy = true;
337         } else
338                 *dummy = false;
339         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
340         return cnt;
341 }
342
343 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
344 {
345         wrb->frag_pa_hi = upper_32_bits(addr);
346         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
347         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
348 }
349
350 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
351                 bool vlan, u32 wrb_cnt, u32 len)
352 {
353         memset(hdr, 0, sizeof(*hdr));
354
355         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
356
357         if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
358                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
359                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
360                         hdr, skb_shinfo(skb)->gso_size);
361         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
362                 if (is_tcp_pkt(skb))
363                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
364                 else if (is_udp_pkt(skb))
365                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
366         }
367
368         if (vlan && vlan_tx_tag_present(skb)) {
369                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
370                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
371                         hdr, vlan_tx_tag_get(skb));
372         }
373
374         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
375         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
376         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
377         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
378 }
379
380
381 static int make_tx_wrbs(struct be_adapter *adapter,
382                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
383 {
384         u64 busaddr;
385         u32 i, copied = 0;
386         struct pci_dev *pdev = adapter->pdev;
387         struct sk_buff *first_skb = skb;
388         struct be_queue_info *txq = &adapter->tx_obj.q;
389         struct be_eth_wrb *wrb;
390         struct be_eth_hdr_wrb *hdr;
391
392         hdr = queue_head_node(txq);
393         atomic_add(wrb_cnt, &txq->used);
394         queue_head_inc(txq);
395
396         if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
397                 dev_err(&pdev->dev, "TX DMA mapping failed\n");
398                 return 0;
399         }
400
401         if (skb->len > skb->data_len) {
402                 int len = skb->len - skb->data_len;
403                 wrb = queue_head_node(txq);
404                 busaddr = skb_shinfo(skb)->dma_head;
405                 wrb_fill(wrb, busaddr, len);
406                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
407                 queue_head_inc(txq);
408                 copied += len;
409         }
410
411         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
412                 struct skb_frag_struct *frag =
413                         &skb_shinfo(skb)->frags[i];
414
415                 busaddr = skb_shinfo(skb)->dma_maps[i];
416                 wrb = queue_head_node(txq);
417                 wrb_fill(wrb, busaddr, frag->size);
418                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
419                 queue_head_inc(txq);
420                 copied += frag->size;
421         }
422
423         if (dummy_wrb) {
424                 wrb = queue_head_node(txq);
425                 wrb_fill(wrb, 0, 0);
426                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
427                 queue_head_inc(txq);
428         }
429
430         wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
431                 wrb_cnt, copied);
432         be_dws_cpu_to_le(hdr, sizeof(*hdr));
433
434         return copied;
435 }
436
437 static netdev_tx_t be_xmit(struct sk_buff *skb,
438                         struct net_device *netdev)
439 {
440         struct be_adapter *adapter = netdev_priv(netdev);
441         struct be_tx_obj *tx_obj = &adapter->tx_obj;
442         struct be_queue_info *txq = &tx_obj->q;
443         u32 wrb_cnt = 0, copied = 0;
444         u32 start = txq->head;
445         bool dummy_wrb, stopped = false;
446
447         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
448
449         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
450         if (copied) {
451                 /* record the sent skb in the sent_skb table */
452                 BUG_ON(tx_obj->sent_skb_list[start]);
453                 tx_obj->sent_skb_list[start] = skb;
454
455                 /* Ensure txq has space for the next skb; Else stop the queue
456                  * *BEFORE* ringing the tx doorbell, so that we serialze the
457                  * tx compls of the current transmit which'll wake up the queue
458                  */
459                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
460                                                                 txq->len) {
461                         netif_stop_queue(netdev);
462                         stopped = true;
463                 }
464
465                 be_txq_notify(adapter, txq->id, wrb_cnt);
466
467                 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
468         } else {
469                 txq->head = start;
470                 dev_kfree_skb_any(skb);
471         }
472         return NETDEV_TX_OK;
473 }
474
475 static int be_change_mtu(struct net_device *netdev, int new_mtu)
476 {
477         struct be_adapter *adapter = netdev_priv(netdev);
478         if (new_mtu < BE_MIN_MTU ||
479                         new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
480                 dev_info(&adapter->pdev->dev,
481                         "MTU must be between %d and %d bytes\n",
482                         BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
483                 return -EINVAL;
484         }
485         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
486                         netdev->mtu, new_mtu);
487         netdev->mtu = new_mtu;
488         return 0;
489 }
490
491 /*
492  * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
493  * program them in BE.  If more than BE_NUM_VLANS_SUPPORTED are configured,
494  * set the BE in promiscuous VLAN mode.
495  */
496 static int be_vid_config(struct be_adapter *adapter)
497 {
498         u16 vtag[BE_NUM_VLANS_SUPPORTED];
499         u16 ntags = 0, i;
500         int status;
501
502         if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED)  {
503                 /* Construct VLAN Table to give to HW */
504                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
505                         if (adapter->vlan_tag[i]) {
506                                 vtag[ntags] = cpu_to_le16(i);
507                                 ntags++;
508                         }
509                 }
510                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
511                                         vtag, ntags, 1, 0);
512         } else {
513                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
514                                         NULL, 0, 1, 1);
515         }
516         return status;
517 }
518
519 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
520 {
521         struct be_adapter *adapter = netdev_priv(netdev);
522         struct be_eq_obj *rx_eq = &adapter->rx_eq;
523         struct be_eq_obj *tx_eq = &adapter->tx_eq;
524
525         be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
526         be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
527         adapter->vlan_grp = grp;
528         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
529         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
530 }
531
532 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
533 {
534         struct be_adapter *adapter = netdev_priv(netdev);
535
536         adapter->num_vlans++;
537         adapter->vlan_tag[vid] = 1;
538
539         be_vid_config(adapter);
540 }
541
542 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
543 {
544         struct be_adapter *adapter = netdev_priv(netdev);
545
546         adapter->num_vlans--;
547         adapter->vlan_tag[vid] = 0;
548
549         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
550         be_vid_config(adapter);
551 }
552
553 static void be_set_multicast_list(struct net_device *netdev)
554 {
555         struct be_adapter *adapter = netdev_priv(netdev);
556
557         if (netdev->flags & IFF_PROMISC) {
558                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
559                 adapter->promiscuous = true;
560                 goto done;
561         }
562
563         /* BE was previously in promiscous mode; disable it */
564         if (adapter->promiscuous) {
565                 adapter->promiscuous = false;
566                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
567         }
568
569         /* Enable multicast promisc if num configured exceeds what we support */
570         if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
571                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
572                                 &adapter->mc_cmd_mem);
573                 goto done;
574         }
575
576         be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
577                 netdev->mc_count, &adapter->mc_cmd_mem);
578 done:
579         return;
580 }
581
582 static void be_rx_rate_update(struct be_adapter *adapter)
583 {
584         struct be_drvr_stats *stats = drvr_stats(adapter);
585         ulong now = jiffies;
586
587         /* Wrapped around */
588         if (time_before(now, stats->be_rx_jiffies)) {
589                 stats->be_rx_jiffies = now;
590                 return;
591         }
592
593         /* Update the rate once in two seconds */
594         if ((now - stats->be_rx_jiffies) < 2 * HZ)
595                 return;
596
597         stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
598                                           - stats->be_rx_bytes_prev,
599                                          now - stats->be_rx_jiffies);
600         stats->be_rx_jiffies = now;
601         stats->be_rx_bytes_prev = stats->be_rx_bytes;
602 }
603
604 static void be_rx_stats_update(struct be_adapter *adapter,
605                 u32 pktsize, u16 numfrags)
606 {
607         struct be_drvr_stats *stats = drvr_stats(adapter);
608
609         stats->be_rx_compl++;
610         stats->be_rx_frags += numfrags;
611         stats->be_rx_bytes += pktsize;
612 }
613
614 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
615 {
616         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
617
618         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
619         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
620         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
621         if (ip_version) {
622                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
623                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
624         }
625         ipv6_chk = (ip_version && (tcpf || udpf));
626
627         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
628 }
629
630 static struct be_rx_page_info *
631 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
632 {
633         struct be_rx_page_info *rx_page_info;
634         struct be_queue_info *rxq = &adapter->rx_obj.q;
635
636         rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
637         BUG_ON(!rx_page_info->page);
638
639         if (rx_page_info->last_page_user)
640                 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
641                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
642
643         atomic_dec(&rxq->used);
644         return rx_page_info;
645 }
646
647 /* Throwaway the data in the Rx completion */
648 static void be_rx_compl_discard(struct be_adapter *adapter,
649                         struct be_eth_rx_compl *rxcp)
650 {
651         struct be_queue_info *rxq = &adapter->rx_obj.q;
652         struct be_rx_page_info *page_info;
653         u16 rxq_idx, i, num_rcvd;
654
655         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
656         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
657
658         for (i = 0; i < num_rcvd; i++) {
659                 page_info = get_rx_page_info(adapter, rxq_idx);
660                 put_page(page_info->page);
661                 memset(page_info, 0, sizeof(*page_info));
662                 index_inc(&rxq_idx, rxq->len);
663         }
664 }
665
666 /*
667  * skb_fill_rx_data forms a complete skb for an ether frame
668  * indicated by rxcp.
669  */
670 static void skb_fill_rx_data(struct be_adapter *adapter,
671                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
672 {
673         struct be_queue_info *rxq = &adapter->rx_obj.q;
674         struct be_rx_page_info *page_info;
675         u16 rxq_idx, i, num_rcvd, j;
676         u32 pktsize, hdr_len, curr_frag_len, size;
677         u8 *start;
678
679         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
680         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
681         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
682
683         page_info = get_rx_page_info(adapter, rxq_idx);
684
685         start = page_address(page_info->page) + page_info->page_offset;
686         prefetch(start);
687
688         /* Copy data in the first descriptor of this completion */
689         curr_frag_len = min(pktsize, rx_frag_size);
690
691         /* Copy the header portion into skb_data */
692         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
693         memcpy(skb->data, start, hdr_len);
694         skb->len = curr_frag_len;
695         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
696                 /* Complete packet has now been moved to data */
697                 put_page(page_info->page);
698                 skb->data_len = 0;
699                 skb->tail += curr_frag_len;
700         } else {
701                 skb_shinfo(skb)->nr_frags = 1;
702                 skb_shinfo(skb)->frags[0].page = page_info->page;
703                 skb_shinfo(skb)->frags[0].page_offset =
704                                         page_info->page_offset + hdr_len;
705                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
706                 skb->data_len = curr_frag_len - hdr_len;
707                 skb->tail += hdr_len;
708         }
709         memset(page_info, 0, sizeof(*page_info));
710
711         if (pktsize <= rx_frag_size) {
712                 BUG_ON(num_rcvd != 1);
713                 goto done;
714         }
715
716         /* More frags present for this completion */
717         size = pktsize;
718         for (i = 1, j = 0; i < num_rcvd; i++) {
719                 size -= curr_frag_len;
720                 index_inc(&rxq_idx, rxq->len);
721                 page_info = get_rx_page_info(adapter, rxq_idx);
722
723                 curr_frag_len = min(size, rx_frag_size);
724
725                 /* Coalesce all frags from the same physical page in one slot */
726                 if (page_info->page_offset == 0) {
727                         /* Fresh page */
728                         j++;
729                         skb_shinfo(skb)->frags[j].page = page_info->page;
730                         skb_shinfo(skb)->frags[j].page_offset =
731                                                         page_info->page_offset;
732                         skb_shinfo(skb)->frags[j].size = 0;
733                         skb_shinfo(skb)->nr_frags++;
734                 } else {
735                         put_page(page_info->page);
736                 }
737
738                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
739                 skb->len += curr_frag_len;
740                 skb->data_len += curr_frag_len;
741
742                 memset(page_info, 0, sizeof(*page_info));
743         }
744         BUG_ON(j > MAX_SKB_FRAGS);
745
746 done:
747         be_rx_stats_update(adapter, pktsize, num_rcvd);
748         return;
749 }
750
751 /* Process the RX completion indicated by rxcp when GRO is disabled */
752 static void be_rx_compl_process(struct be_adapter *adapter,
753                         struct be_eth_rx_compl *rxcp)
754 {
755         struct sk_buff *skb;
756         u32 vlanf, vid;
757         u8 vtm;
758
759         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
760         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
761
762         /* vlanf could be wrongly set in some cards.
763          * ignore if vtm is not set */
764         if ((adapter->cap == 0x400) && !vtm)
765                 vlanf = 0;
766
767         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
768         if (!skb) {
769                 if (net_ratelimit())
770                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
771                 be_rx_compl_discard(adapter, rxcp);
772                 return;
773         }
774
775         skb_fill_rx_data(adapter, skb, rxcp);
776
777         if (do_pkt_csum(rxcp, adapter->rx_csum))
778                 skb->ip_summed = CHECKSUM_NONE;
779         else
780                 skb->ip_summed = CHECKSUM_UNNECESSARY;
781
782         skb->truesize = skb->len + sizeof(struct sk_buff);
783         skb->protocol = eth_type_trans(skb, adapter->netdev);
784         skb->dev = adapter->netdev;
785
786         if (vlanf) {
787                 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
788                         kfree_skb(skb);
789                         return;
790                 }
791                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
792                 vid = be16_to_cpu(vid);
793                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
794         } else {
795                 netif_receive_skb(skb);
796         }
797
798         return;
799 }
800
801 /* Process the RX completion indicated by rxcp when GRO is enabled */
802 static void be_rx_compl_process_gro(struct be_adapter *adapter,
803                         struct be_eth_rx_compl *rxcp)
804 {
805         struct be_rx_page_info *page_info;
806         struct sk_buff *skb = NULL;
807         struct be_queue_info *rxq = &adapter->rx_obj.q;
808         struct be_eq_obj *eq_obj =  &adapter->rx_eq;
809         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
810         u16 i, rxq_idx = 0, vid, j;
811         u8 vtm;
812
813         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
814         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
815         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
816         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
817         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
818
819         /* vlanf could be wrongly set in some cards.
820          * ignore if vtm is not set */
821         if ((adapter->cap == 0x400) && !vtm)
822                 vlanf = 0;
823
824         skb = napi_get_frags(&eq_obj->napi);
825         if (!skb) {
826                 be_rx_compl_discard(adapter, rxcp);
827                 return;
828         }
829
830         remaining = pkt_size;
831         for (i = 0, j = -1; i < num_rcvd; i++) {
832                 page_info = get_rx_page_info(adapter, rxq_idx);
833
834                 curr_frag_len = min(remaining, rx_frag_size);
835
836                 /* Coalesce all frags from the same physical page in one slot */
837                 if (i == 0 || page_info->page_offset == 0) {
838                         /* First frag or Fresh page */
839                         j++;
840                         skb_shinfo(skb)->frags[j].page = page_info->page;
841                         skb_shinfo(skb)->frags[j].page_offset =
842                                                         page_info->page_offset;
843                         skb_shinfo(skb)->frags[j].size = 0;
844                 } else {
845                         put_page(page_info->page);
846                 }
847                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
848
849                 remaining -= curr_frag_len;
850                 index_inc(&rxq_idx, rxq->len);
851                 memset(page_info, 0, sizeof(*page_info));
852         }
853         BUG_ON(j > MAX_SKB_FRAGS);
854
855         skb_shinfo(skb)->nr_frags = j + 1;
856         skb->len = pkt_size;
857         skb->data_len = pkt_size;
858         skb->truesize += pkt_size;
859         skb->ip_summed = CHECKSUM_UNNECESSARY;
860
861         if (likely(!vlanf)) {
862                 napi_gro_frags(&eq_obj->napi);
863         } else {
864                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
865                 vid = be16_to_cpu(vid);
866
867                 if (!adapter->vlan_grp || adapter->num_vlans == 0)
868                         return;
869
870                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
871         }
872
873         be_rx_stats_update(adapter, pkt_size, num_rcvd);
874         return;
875 }
876
877 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
878 {
879         struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
880
881         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
882                 return NULL;
883
884         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
885
886         queue_tail_inc(&adapter->rx_obj.cq);
887         return rxcp;
888 }
889
890 /* To reset the valid bit, we need to reset the whole word as
891  * when walking the queue the valid entries are little-endian
892  * and invalid entries are host endian
893  */
894 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
895 {
896         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
897 }
898
899 static inline struct page *be_alloc_pages(u32 size)
900 {
901         gfp_t alloc_flags = GFP_ATOMIC;
902         u32 order = get_order(size);
903         if (order > 0)
904                 alloc_flags |= __GFP_COMP;
905         return  alloc_pages(alloc_flags, order);
906 }
907
908 /*
909  * Allocate a page, split it to fragments of size rx_frag_size and post as
910  * receive buffers to BE
911  */
912 static void be_post_rx_frags(struct be_adapter *adapter)
913 {
914         struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
915         struct be_rx_page_info *page_info = NULL;
916         struct be_queue_info *rxq = &adapter->rx_obj.q;
917         struct page *pagep = NULL;
918         struct be_eth_rx_d *rxd;
919         u64 page_dmaaddr = 0, frag_dmaaddr;
920         u32 posted, page_offset = 0;
921
922         page_info = &page_info_tbl[rxq->head];
923         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
924                 if (!pagep) {
925                         pagep = be_alloc_pages(adapter->big_page_size);
926                         if (unlikely(!pagep)) {
927                                 drvr_stats(adapter)->be_ethrx_post_fail++;
928                                 break;
929                         }
930                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
931                                                 adapter->big_page_size,
932                                                 PCI_DMA_FROMDEVICE);
933                         page_info->page_offset = 0;
934                 } else {
935                         get_page(pagep);
936                         page_info->page_offset = page_offset + rx_frag_size;
937                 }
938                 page_offset = page_info->page_offset;
939                 page_info->page = pagep;
940                 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
941                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
942
943                 rxd = queue_head_node(rxq);
944                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
945                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
946                 queue_head_inc(rxq);
947
948                 /* Any space left in the current big page for another frag? */
949                 if ((page_offset + rx_frag_size + rx_frag_size) >
950                                         adapter->big_page_size) {
951                         pagep = NULL;
952                         page_info->last_page_user = true;
953                 }
954                 page_info = &page_info_tbl[rxq->head];
955         }
956         if (pagep)
957                 page_info->last_page_user = true;
958
959         if (posted) {
960                 atomic_add(posted, &rxq->used);
961                 be_rxq_notify(adapter, rxq->id, posted);
962         } else if (atomic_read(&rxq->used) == 0) {
963                 /* Let be_worker replenish when memory is available */
964                 adapter->rx_post_starved = true;
965         }
966
967         return;
968 }
969
970 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
971 {
972         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
973
974         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
975                 return NULL;
976
977         be_dws_le_to_cpu(txcp, sizeof(*txcp));
978
979         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
980
981         queue_tail_inc(tx_cq);
982         return txcp;
983 }
984
985 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
986 {
987         struct be_queue_info *txq = &adapter->tx_obj.q;
988         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
989         struct sk_buff *sent_skb;
990         u16 cur_index, num_wrbs = 0;
991
992         cur_index = txq->tail;
993         sent_skb = sent_skbs[cur_index];
994         BUG_ON(!sent_skb);
995         sent_skbs[cur_index] = NULL;
996
997         do {
998                 cur_index = txq->tail;
999                 num_wrbs++;
1000                 queue_tail_inc(txq);
1001         } while (cur_index != last_index);
1002
1003         atomic_sub(num_wrbs, &txq->used);
1004         skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
1005         kfree_skb(sent_skb);
1006 }
1007
1008 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1009 {
1010         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1011
1012         if (!eqe->evt)
1013                 return NULL;
1014
1015         eqe->evt = le32_to_cpu(eqe->evt);
1016         queue_tail_inc(&eq_obj->q);
1017         return eqe;
1018 }
1019
1020 static int event_handle(struct be_adapter *adapter,
1021                         struct be_eq_obj *eq_obj)
1022 {
1023         struct be_eq_entry *eqe;
1024         u16 num = 0;
1025
1026         while ((eqe = event_get(eq_obj)) != NULL) {
1027                 eqe->evt = 0;
1028                 num++;
1029         }
1030
1031         /* Deal with any spurious interrupts that come
1032          * without events
1033          */
1034         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1035         if (num)
1036                 napi_schedule(&eq_obj->napi);
1037
1038         return num;
1039 }
1040
1041 /* Just read and notify events without processing them.
1042  * Used at the time of destroying event queues */
1043 static void be_eq_clean(struct be_adapter *adapter,
1044                         struct be_eq_obj *eq_obj)
1045 {
1046         struct be_eq_entry *eqe;
1047         u16 num = 0;
1048
1049         while ((eqe = event_get(eq_obj)) != NULL) {
1050                 eqe->evt = 0;
1051                 num++;
1052         }
1053
1054         if (num)
1055                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1056 }
1057
1058 static void be_rx_q_clean(struct be_adapter *adapter)
1059 {
1060         struct be_rx_page_info *page_info;
1061         struct be_queue_info *rxq = &adapter->rx_obj.q;
1062         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1063         struct be_eth_rx_compl *rxcp;
1064         u16 tail;
1065
1066         /* First cleanup pending rx completions */
1067         while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1068                 be_rx_compl_discard(adapter, rxcp);
1069                 be_rx_compl_reset(rxcp);
1070                 be_cq_notify(adapter, rx_cq->id, true, 1);
1071         }
1072
1073         /* Then free posted rx buffer that were not used */
1074         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1075         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1076                 page_info = get_rx_page_info(adapter, tail);
1077                 put_page(page_info->page);
1078                 memset(page_info, 0, sizeof(*page_info));
1079         }
1080         BUG_ON(atomic_read(&rxq->used));
1081 }
1082
1083 static void be_tx_compl_clean(struct be_adapter *adapter)
1084 {
1085         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1086         struct be_queue_info *txq = &adapter->tx_obj.q;
1087         struct be_eth_tx_compl *txcp;
1088         u16 end_idx, cmpl = 0, timeo = 0;
1089
1090         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1091         do {
1092                 while ((txcp = be_tx_compl_get(tx_cq))) {
1093                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1094                                         wrb_index, txcp);
1095                         be_tx_compl_process(adapter, end_idx);
1096                         cmpl++;
1097                 }
1098                 if (cmpl) {
1099                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1100                         cmpl = 0;
1101                 }
1102
1103                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1104                         break;
1105
1106                 mdelay(1);
1107         } while (true);
1108
1109         if (atomic_read(&txq->used))
1110                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1111                         atomic_read(&txq->used));
1112 }
1113
1114 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1115 {
1116         struct be_queue_info *q;
1117
1118         q = &adapter->mcc_obj.q;
1119         if (q->created)
1120                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1121         be_queue_free(adapter, q);
1122
1123         q = &adapter->mcc_obj.cq;
1124         if (q->created)
1125                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1126         be_queue_free(adapter, q);
1127 }
1128
1129 /* Must be called only after TX qs are created as MCC shares TX EQ */
1130 static int be_mcc_queues_create(struct be_adapter *adapter)
1131 {
1132         struct be_queue_info *q, *cq;
1133
1134         /* Alloc MCC compl queue */
1135         cq = &adapter->mcc_obj.cq;
1136         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1137                         sizeof(struct be_mcc_compl)))
1138                 goto err;
1139
1140         /* Ask BE to create MCC compl queue; share TX's eq */
1141         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1142                 goto mcc_cq_free;
1143
1144         /* Alloc MCC queue */
1145         q = &adapter->mcc_obj.q;
1146         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1147                 goto mcc_cq_destroy;
1148
1149         /* Ask BE to create MCC queue */
1150         if (be_cmd_mccq_create(adapter, q, cq))
1151                 goto mcc_q_free;
1152
1153         return 0;
1154
1155 mcc_q_free:
1156         be_queue_free(adapter, q);
1157 mcc_cq_destroy:
1158         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1159 mcc_cq_free:
1160         be_queue_free(adapter, cq);
1161 err:
1162         return -1;
1163 }
1164
1165 static void be_tx_queues_destroy(struct be_adapter *adapter)
1166 {
1167         struct be_queue_info *q;
1168
1169         q = &adapter->tx_obj.q;
1170         if (q->created)
1171                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1172         be_queue_free(adapter, q);
1173
1174         q = &adapter->tx_obj.cq;
1175         if (q->created)
1176                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1177         be_queue_free(adapter, q);
1178
1179         /* Clear any residual events */
1180         be_eq_clean(adapter, &adapter->tx_eq);
1181
1182         q = &adapter->tx_eq.q;
1183         if (q->created)
1184                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1185         be_queue_free(adapter, q);
1186 }
1187
1188 static int be_tx_queues_create(struct be_adapter *adapter)
1189 {
1190         struct be_queue_info *eq, *q, *cq;
1191
1192         adapter->tx_eq.max_eqd = 0;
1193         adapter->tx_eq.min_eqd = 0;
1194         adapter->tx_eq.cur_eqd = 96;
1195         adapter->tx_eq.enable_aic = false;
1196         /* Alloc Tx Event queue */
1197         eq = &adapter->tx_eq.q;
1198         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1199                 return -1;
1200
1201         /* Ask BE to create Tx Event queue */
1202         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1203                 goto tx_eq_free;
1204         /* Alloc TX eth compl queue */
1205         cq = &adapter->tx_obj.cq;
1206         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1207                         sizeof(struct be_eth_tx_compl)))
1208                 goto tx_eq_destroy;
1209
1210         /* Ask BE to create Tx eth compl queue */
1211         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1212                 goto tx_cq_free;
1213
1214         /* Alloc TX eth queue */
1215         q = &adapter->tx_obj.q;
1216         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1217                 goto tx_cq_destroy;
1218
1219         /* Ask BE to create Tx eth queue */
1220         if (be_cmd_txq_create(adapter, q, cq))
1221                 goto tx_q_free;
1222         return 0;
1223
1224 tx_q_free:
1225         be_queue_free(adapter, q);
1226 tx_cq_destroy:
1227         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1228 tx_cq_free:
1229         be_queue_free(adapter, cq);
1230 tx_eq_destroy:
1231         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1232 tx_eq_free:
1233         be_queue_free(adapter, eq);
1234         return -1;
1235 }
1236
1237 static void be_rx_queues_destroy(struct be_adapter *adapter)
1238 {
1239         struct be_queue_info *q;
1240
1241         q = &adapter->rx_obj.q;
1242         if (q->created) {
1243                 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1244                 be_rx_q_clean(adapter);
1245         }
1246         be_queue_free(adapter, q);
1247
1248         q = &adapter->rx_obj.cq;
1249         if (q->created)
1250                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1251         be_queue_free(adapter, q);
1252
1253         /* Clear any residual events */
1254         be_eq_clean(adapter, &adapter->rx_eq);
1255
1256         q = &adapter->rx_eq.q;
1257         if (q->created)
1258                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1259         be_queue_free(adapter, q);
1260 }
1261
1262 static int be_rx_queues_create(struct be_adapter *adapter)
1263 {
1264         struct be_queue_info *eq, *q, *cq;
1265         int rc;
1266
1267         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1268         adapter->rx_eq.max_eqd = BE_MAX_EQD;
1269         adapter->rx_eq.min_eqd = 0;
1270         adapter->rx_eq.cur_eqd = 0;
1271         adapter->rx_eq.enable_aic = true;
1272
1273         /* Alloc Rx Event queue */
1274         eq = &adapter->rx_eq.q;
1275         rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1276                                 sizeof(struct be_eq_entry));
1277         if (rc)
1278                 return rc;
1279
1280         /* Ask BE to create Rx Event queue */
1281         rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1282         if (rc)
1283                 goto rx_eq_free;
1284
1285         /* Alloc RX eth compl queue */
1286         cq = &adapter->rx_obj.cq;
1287         rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1288                         sizeof(struct be_eth_rx_compl));
1289         if (rc)
1290                 goto rx_eq_destroy;
1291
1292         /* Ask BE to create Rx eth compl queue */
1293         rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1294         if (rc)
1295                 goto rx_cq_free;
1296
1297         /* Alloc RX eth queue */
1298         q = &adapter->rx_obj.q;
1299         rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1300         if (rc)
1301                 goto rx_cq_destroy;
1302
1303         /* Ask BE to create Rx eth queue */
1304         rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1305                 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1306         if (rc)
1307                 goto rx_q_free;
1308
1309         return 0;
1310 rx_q_free:
1311         be_queue_free(adapter, q);
1312 rx_cq_destroy:
1313         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1314 rx_cq_free:
1315         be_queue_free(adapter, cq);
1316 rx_eq_destroy:
1317         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1318 rx_eq_free:
1319         be_queue_free(adapter, eq);
1320         return rc;
1321 }
1322
1323 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1324 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1325 {
1326         return eq_id - 8 * be_pci_func(adapter);
1327 }
1328
1329 static irqreturn_t be_intx(int irq, void *dev)
1330 {
1331         struct be_adapter *adapter = dev;
1332         int isr;
1333
1334         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1335                         be_pci_func(adapter) * CEV_ISR_SIZE);
1336         if (!isr)
1337                 return IRQ_NONE;
1338
1339         event_handle(adapter, &adapter->tx_eq);
1340         event_handle(adapter, &adapter->rx_eq);
1341
1342         return IRQ_HANDLED;
1343 }
1344
1345 static irqreturn_t be_msix_rx(int irq, void *dev)
1346 {
1347         struct be_adapter *adapter = dev;
1348
1349         event_handle(adapter, &adapter->rx_eq);
1350
1351         return IRQ_HANDLED;
1352 }
1353
1354 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1355 {
1356         struct be_adapter *adapter = dev;
1357
1358         event_handle(adapter, &adapter->tx_eq);
1359
1360         return IRQ_HANDLED;
1361 }
1362
1363 static inline bool do_gro(struct be_adapter *adapter,
1364                         struct be_eth_rx_compl *rxcp)
1365 {
1366         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1367         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1368
1369         if (err)
1370                 drvr_stats(adapter)->be_rxcp_err++;
1371
1372         return (tcp_frame && !err) ? true : false;
1373 }
1374
1375 int be_poll_rx(struct napi_struct *napi, int budget)
1376 {
1377         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1378         struct be_adapter *adapter =
1379                 container_of(rx_eq, struct be_adapter, rx_eq);
1380         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1381         struct be_eth_rx_compl *rxcp;
1382         u32 work_done;
1383
1384         adapter->stats.drvr_stats.be_rx_polls++;
1385         for (work_done = 0; work_done < budget; work_done++) {
1386                 rxcp = be_rx_compl_get(adapter);
1387                 if (!rxcp)
1388                         break;
1389
1390                 if (do_gro(adapter, rxcp))
1391                         be_rx_compl_process_gro(adapter, rxcp);
1392                 else
1393                         be_rx_compl_process(adapter, rxcp);
1394
1395                 be_rx_compl_reset(rxcp);
1396         }
1397
1398         /* Refill the queue */
1399         if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1400                 be_post_rx_frags(adapter);
1401
1402         /* All consumed */
1403         if (work_done < budget) {
1404                 napi_complete(napi);
1405                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1406         } else {
1407                 /* More to be consumed; continue with interrupts disabled */
1408                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1409         }
1410         return work_done;
1411 }
1412
1413 void be_process_tx(struct be_adapter *adapter)
1414 {
1415         struct be_queue_info *txq = &adapter->tx_obj.q;
1416         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1417         struct be_eth_tx_compl *txcp;
1418         u32 num_cmpl = 0;
1419         u16 end_idx;
1420
1421         while ((txcp = be_tx_compl_get(tx_cq))) {
1422                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1423                                         wrb_index, txcp);
1424                 be_tx_compl_process(adapter, end_idx);
1425                 num_cmpl++;
1426         }
1427
1428         if (num_cmpl) {
1429                 be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
1430
1431                 /* As Tx wrbs have been freed up, wake up netdev queue if
1432                  * it was stopped due to lack of tx wrbs.
1433                  */
1434                 if (netif_queue_stopped(adapter->netdev) &&
1435                         atomic_read(&txq->used) < txq->len / 2) {
1436                         netif_wake_queue(adapter->netdev);
1437                 }
1438
1439                 drvr_stats(adapter)->be_tx_events++;
1440                 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1441         }
1442 }
1443
1444 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1445  * For TX/MCC we don't honour budget; consume everything
1446  */
1447 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1448 {
1449         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1450         struct be_adapter *adapter =
1451                 container_of(tx_eq, struct be_adapter, tx_eq);
1452
1453         napi_complete(napi);
1454
1455         be_process_tx(adapter);
1456
1457         be_process_mcc(adapter);
1458
1459         return 1;
1460 }
1461
1462 static void be_worker(struct work_struct *work)
1463 {
1464         struct be_adapter *adapter =
1465                 container_of(work, struct be_adapter, work.work);
1466
1467         be_cmd_get_stats(adapter, &adapter->stats.cmd);
1468
1469         /* Set EQ delay */
1470         be_rx_eqd_update(adapter);
1471
1472         be_tx_rate_update(adapter);
1473         be_rx_rate_update(adapter);
1474
1475         if (adapter->rx_post_starved) {
1476                 adapter->rx_post_starved = false;
1477                 be_post_rx_frags(adapter);
1478         }
1479
1480         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1481 }
1482
1483 static void be_msix_disable(struct be_adapter *adapter)
1484 {
1485         if (adapter->msix_enabled) {
1486                 pci_disable_msix(adapter->pdev);
1487                 adapter->msix_enabled = false;
1488         }
1489 }
1490
1491 static void be_msix_enable(struct be_adapter *adapter)
1492 {
1493         int i, status;
1494
1495         for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1496                 adapter->msix_entries[i].entry = i;
1497
1498         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1499                 BE_NUM_MSIX_VECTORS);
1500         if (status == 0)
1501                 adapter->msix_enabled = true;
1502         return;
1503 }
1504
1505 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1506 {
1507         return adapter->msix_entries[
1508                         be_evt_bit_get(adapter, eq_id)].vector;
1509 }
1510
1511 static int be_request_irq(struct be_adapter *adapter,
1512                 struct be_eq_obj *eq_obj,
1513                 void *handler, char *desc)
1514 {
1515         struct net_device *netdev = adapter->netdev;
1516         int vec;
1517
1518         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1519         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1520         return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1521 }
1522
1523 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1524 {
1525         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1526         free_irq(vec, adapter);
1527 }
1528
1529 static int be_msix_register(struct be_adapter *adapter)
1530 {
1531         int status;
1532
1533         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1534         if (status)
1535                 goto err;
1536
1537         status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1538         if (status)
1539                 goto free_tx_irq;
1540
1541         return 0;
1542
1543 free_tx_irq:
1544         be_free_irq(adapter, &adapter->tx_eq);
1545 err:
1546         dev_warn(&adapter->pdev->dev,
1547                 "MSIX Request IRQ failed - err %d\n", status);
1548         pci_disable_msix(adapter->pdev);
1549         adapter->msix_enabled = false;
1550         return status;
1551 }
1552
1553 static int be_irq_register(struct be_adapter *adapter)
1554 {
1555         struct net_device *netdev = adapter->netdev;
1556         int status;
1557
1558         if (adapter->msix_enabled) {
1559                 status = be_msix_register(adapter);
1560                 if (status == 0)
1561                         goto done;
1562         }
1563
1564         /* INTx */
1565         netdev->irq = adapter->pdev->irq;
1566         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1567                         adapter);
1568         if (status) {
1569                 dev_err(&adapter->pdev->dev,
1570                         "INTx request IRQ failed - err %d\n", status);
1571                 return status;
1572         }
1573 done:
1574         adapter->isr_registered = true;
1575         return 0;
1576 }
1577
1578 static void be_irq_unregister(struct be_adapter *adapter)
1579 {
1580         struct net_device *netdev = adapter->netdev;
1581
1582         if (!adapter->isr_registered)
1583                 return;
1584
1585         /* INTx */
1586         if (!adapter->msix_enabled) {
1587                 free_irq(netdev->irq, adapter);
1588                 goto done;
1589         }
1590
1591         /* MSIx */
1592         be_free_irq(adapter, &adapter->tx_eq);
1593         be_free_irq(adapter, &adapter->rx_eq);
1594 done:
1595         adapter->isr_registered = false;
1596         return;
1597 }
1598
1599 static int be_open(struct net_device *netdev)
1600 {
1601         struct be_adapter *adapter = netdev_priv(netdev);
1602         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1603         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1604         bool link_up;
1605         int status;
1606         u8 mac_speed;
1607         u16 link_speed;
1608
1609         /* First time posting */
1610         be_post_rx_frags(adapter);
1611
1612         napi_enable(&rx_eq->napi);
1613         napi_enable(&tx_eq->napi);
1614
1615         be_irq_register(adapter);
1616
1617         be_intr_set(adapter, true);
1618
1619         /* The evt queues are created in unarmed state; arm them */
1620         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
1621         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1622
1623         /* Rx compl queue may be in unarmed state; rearm it */
1624         be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1625
1626         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1627                         &link_speed);
1628         if (status)
1629                 goto ret_sts;
1630         be_link_status_update(adapter, link_up);
1631
1632         status = be_vid_config(adapter);
1633         if (status)
1634                 goto ret_sts;
1635
1636         status = be_cmd_set_flow_control(adapter,
1637                                         adapter->tx_fc, adapter->rx_fc);
1638         if (status)
1639                 goto ret_sts;
1640
1641         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1642 ret_sts:
1643         return status;
1644 }
1645
1646 static int be_setup(struct be_adapter *adapter)
1647 {
1648         struct net_device *netdev = adapter->netdev;
1649         u32 cap_flags, en_flags;
1650         int status;
1651
1652         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1653                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
1654                         BE_IF_FLAGS_PROMISCUOUS |
1655                         BE_IF_FLAGS_PASS_L3L4_ERRORS;
1656         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1657                         BE_IF_FLAGS_PASS_L3L4_ERRORS;
1658
1659         status = be_cmd_if_create(adapter, cap_flags, en_flags,
1660                         netdev->dev_addr, false/* pmac_invalid */,
1661                         &adapter->if_handle, &adapter->pmac_id);
1662         if (status != 0)
1663                 goto do_none;
1664
1665         status = be_tx_queues_create(adapter);
1666         if (status != 0)
1667                 goto if_destroy;
1668
1669         status = be_rx_queues_create(adapter);
1670         if (status != 0)
1671                 goto tx_qs_destroy;
1672
1673         status = be_mcc_queues_create(adapter);
1674         if (status != 0)
1675                 goto rx_qs_destroy;
1676
1677         return 0;
1678
1679 rx_qs_destroy:
1680         be_rx_queues_destroy(adapter);
1681 tx_qs_destroy:
1682         be_tx_queues_destroy(adapter);
1683 if_destroy:
1684         be_cmd_if_destroy(adapter, adapter->if_handle);
1685 do_none:
1686         return status;
1687 }
1688
1689 static int be_clear(struct be_adapter *adapter)
1690 {
1691         be_mcc_queues_destroy(adapter);
1692         be_rx_queues_destroy(adapter);
1693         be_tx_queues_destroy(adapter);
1694
1695         be_cmd_if_destroy(adapter, adapter->if_handle);
1696
1697         /* tell fw we're done with firing cmds */
1698         be_cmd_fw_clean(adapter);
1699         return 0;
1700 }
1701
1702 static int be_close(struct net_device *netdev)
1703 {
1704         struct be_adapter *adapter = netdev_priv(netdev);
1705         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1706         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1707         int vec;
1708
1709         cancel_delayed_work_sync(&adapter->work);
1710
1711         netif_stop_queue(netdev);
1712         netif_carrier_off(netdev);
1713         adapter->link_up = false;
1714
1715         be_intr_set(adapter, false);
1716
1717         if (adapter->msix_enabled) {
1718                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1719                 synchronize_irq(vec);
1720                 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1721                 synchronize_irq(vec);
1722         } else {
1723                 synchronize_irq(netdev->irq);
1724         }
1725         be_irq_unregister(adapter);
1726
1727         napi_disable(&rx_eq->napi);
1728         napi_disable(&tx_eq->napi);
1729
1730         /* Wait for all pending tx completions to arrive so that
1731          * all tx skbs are freed.
1732          */
1733         be_tx_compl_clean(adapter);
1734
1735         return 0;
1736 }
1737
1738 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
1739 char flash_cookie[2][16] =      {"*** SE FLAS",
1740                                 "H DIRECTORY *** "};
1741
1742 static bool be_flash_redboot(struct be_adapter *adapter,
1743                         const u8 *p)
1744 {
1745         u32 crc_offset;
1746         u8 flashed_crc[4];
1747         int status;
1748         crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
1749                         + sizeof(struct flash_file_hdr) - 32*1024;
1750         p += crc_offset;
1751         status = be_cmd_get_flash_crc(adapter, flashed_crc);
1752         if (status) {
1753                 dev_err(&adapter->pdev->dev,
1754                 "could not get crc from flash, not flashing redboot\n");
1755                 return false;
1756         }
1757
1758         /*update redboot only if crc does not match*/
1759         if (!memcmp(flashed_crc, p, 4))
1760                 return false;
1761         else
1762                 return true;
1763
1764 }
1765
1766 static int be_flash_image(struct be_adapter *adapter,
1767                         const struct firmware *fw,
1768                         struct be_dma_mem *flash_cmd, u32 flash_type)
1769 {
1770         int status;
1771         u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
1772         int num_bytes;
1773         const u8 *p = fw->data;
1774         struct be_cmd_write_flashrom *req = flash_cmd->va;
1775
1776         switch (flash_type) {
1777         case FLASHROM_TYPE_ISCSI_ACTIVE:
1778                 image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
1779                 image_size = FLASH_IMAGE_MAX_SIZE;
1780                 break;
1781         case FLASHROM_TYPE_ISCSI_BACKUP:
1782                 image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
1783                 image_size = FLASH_IMAGE_MAX_SIZE;
1784                 break;
1785         case FLASHROM_TYPE_FCOE_FW_ACTIVE:
1786                 image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
1787                 image_size = FLASH_IMAGE_MAX_SIZE;
1788                 break;
1789         case FLASHROM_TYPE_FCOE_FW_BACKUP:
1790                 image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
1791                 image_size = FLASH_IMAGE_MAX_SIZE;
1792                 break;
1793         case FLASHROM_TYPE_BIOS:
1794                 image_offset = FLASH_iSCSI_BIOS_START;
1795                 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1796                 break;
1797         case FLASHROM_TYPE_FCOE_BIOS:
1798                 image_offset = FLASH_FCoE_BIOS_START;
1799                 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1800                 break;
1801         case FLASHROM_TYPE_PXE_BIOS:
1802                 image_offset = FLASH_PXE_BIOS_START;
1803                 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1804                 break;
1805         case FLASHROM_TYPE_REDBOOT:
1806                 if (!be_flash_redboot(adapter, fw->data))
1807                         return 0;
1808                 image_offset = FLASH_REDBOOT_ISM_START;
1809                 image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
1810                 break;
1811         default:
1812                 return 0;
1813         }
1814
1815         p += sizeof(struct flash_file_hdr) + image_offset;
1816         if (p + image_size > fw->data + fw->size)
1817                 return -1;
1818
1819         total_bytes = image_size;
1820
1821         while (total_bytes) {
1822                 if (total_bytes > 32*1024)
1823                         num_bytes = 32*1024;
1824                 else
1825                         num_bytes = total_bytes;
1826                 total_bytes -= num_bytes;
1827
1828                 if (!total_bytes)
1829                         flash_op = FLASHROM_OPER_FLASH;
1830                 else
1831                         flash_op = FLASHROM_OPER_SAVE;
1832                 memcpy(req->params.data_buf, p, num_bytes);
1833                 p += num_bytes;
1834                 status = be_cmd_write_flashrom(adapter, flash_cmd,
1835                                 flash_type, flash_op, num_bytes);
1836                 if (status) {
1837                         dev_err(&adapter->pdev->dev,
1838                         "cmd to write to flash rom failed. type/op %d/%d\n",
1839                         flash_type, flash_op);
1840                         return -1;
1841                 }
1842                 yield();
1843         }
1844
1845         return 0;
1846 }
1847
1848 int be_load_fw(struct be_adapter *adapter, u8 *func)
1849 {
1850         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
1851         const struct firmware *fw;
1852         struct flash_file_hdr *fhdr;
1853         struct flash_section_info *fsec = NULL;
1854         struct be_dma_mem flash_cmd;
1855         int status;
1856         const u8 *p;
1857         bool entry_found = false;
1858         int flash_type;
1859         char fw_ver[FW_VER_LEN];
1860         char fw_cfg;
1861
1862         status = be_cmd_get_fw_ver(adapter, fw_ver);
1863         if (status)
1864                 return status;
1865
1866         fw_cfg = *(fw_ver + 2);
1867         if (fw_cfg == '0')
1868                 fw_cfg = '1';
1869         strcpy(fw_file, func);
1870
1871         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
1872         if (status)
1873                 goto fw_exit;
1874
1875         p = fw->data;
1876         fhdr = (struct flash_file_hdr *) p;
1877         if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
1878                 dev_err(&adapter->pdev->dev,
1879                         "Firmware(%s) load error (signature did not match)\n",
1880                                 fw_file);
1881                 status = -1;
1882                 goto fw_exit;
1883         }
1884
1885         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
1886
1887         p += sizeof(struct flash_file_hdr);
1888         while (p < (fw->data + fw->size)) {
1889                 fsec = (struct flash_section_info *)p;
1890                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
1891                         entry_found = true;
1892                         break;
1893                 }
1894                 p += 32;
1895         }
1896
1897         if (!entry_found) {
1898                 status = -1;
1899                 dev_err(&adapter->pdev->dev,
1900                         "Flash cookie not found in firmware image\n");
1901                 goto fw_exit;
1902         }
1903
1904         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
1905         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
1906                                         &flash_cmd.dma);
1907         if (!flash_cmd.va) {
1908                 status = -ENOMEM;
1909                 dev_err(&adapter->pdev->dev,
1910                         "Memory allocation failure while flashing\n");
1911                 goto fw_exit;
1912         }
1913
1914         for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
1915                 flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
1916                 status = be_flash_image(adapter, fw, &flash_cmd,
1917                                 flash_type);
1918                 if (status)
1919                         break;
1920         }
1921
1922         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
1923                                 flash_cmd.dma);
1924         if (status) {
1925                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
1926                 goto fw_exit;
1927         }
1928
1929         dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
1930
1931 fw_exit:
1932         release_firmware(fw);
1933         return status;
1934 }
1935
1936 static struct net_device_ops be_netdev_ops = {
1937         .ndo_open               = be_open,
1938         .ndo_stop               = be_close,
1939         .ndo_start_xmit         = be_xmit,
1940         .ndo_get_stats          = be_get_stats,
1941         .ndo_set_rx_mode        = be_set_multicast_list,
1942         .ndo_set_mac_address    = be_mac_addr_set,
1943         .ndo_change_mtu         = be_change_mtu,
1944         .ndo_validate_addr      = eth_validate_addr,
1945         .ndo_vlan_rx_register   = be_vlan_register,
1946         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
1947         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
1948 };
1949
1950 static void be_netdev_init(struct net_device *netdev)
1951 {
1952         struct be_adapter *adapter = netdev_priv(netdev);
1953
1954         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1955                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1956                 NETIF_F_GRO;
1957
1958         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
1959
1960         netdev->flags |= IFF_MULTICAST;
1961
1962         adapter->rx_csum = true;
1963
1964         /* Default settings for Rx and Tx flow control */
1965         adapter->rx_fc = true;
1966         adapter->tx_fc = true;
1967
1968         netif_set_gso_max_size(netdev, 65535);
1969
1970         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1971
1972         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1973
1974         netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1975                 BE_NAPI_WEIGHT);
1976         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
1977                 BE_NAPI_WEIGHT);
1978
1979         netif_carrier_off(netdev);
1980         netif_stop_queue(netdev);
1981 }
1982
1983 static void be_unmap_pci_bars(struct be_adapter *adapter)
1984 {
1985         if (adapter->csr)
1986                 iounmap(adapter->csr);
1987         if (adapter->db)
1988                 iounmap(adapter->db);
1989         if (adapter->pcicfg)
1990                 iounmap(adapter->pcicfg);
1991 }
1992
1993 static int be_map_pci_bars(struct be_adapter *adapter)
1994 {
1995         u8 __iomem *addr;
1996
1997         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1998                         pci_resource_len(adapter->pdev, 2));
1999         if (addr == NULL)
2000                 return -ENOMEM;
2001         adapter->csr = addr;
2002
2003         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
2004                         128 * 1024);
2005         if (addr == NULL)
2006                 goto pci_map_err;
2007         adapter->db = addr;
2008
2009         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
2010                         pci_resource_len(adapter->pdev, 1));
2011         if (addr == NULL)
2012                 goto pci_map_err;
2013         adapter->pcicfg = addr;
2014
2015         return 0;
2016 pci_map_err:
2017         be_unmap_pci_bars(adapter);
2018         return -ENOMEM;
2019 }
2020
2021
2022 static void be_ctrl_cleanup(struct be_adapter *adapter)
2023 {
2024         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2025
2026         be_unmap_pci_bars(adapter);
2027
2028         if (mem->va)
2029                 pci_free_consistent(adapter->pdev, mem->size,
2030                         mem->va, mem->dma);
2031
2032         mem = &adapter->mc_cmd_mem;
2033         if (mem->va)
2034                 pci_free_consistent(adapter->pdev, mem->size,
2035                         mem->va, mem->dma);
2036 }
2037
2038 static int be_ctrl_init(struct be_adapter *adapter)
2039 {
2040         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2041         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2042         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2043         int status;
2044
2045         status = be_map_pci_bars(adapter);
2046         if (status)
2047                 goto done;
2048
2049         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2050         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2051                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2052         if (!mbox_mem_alloc->va) {
2053                 status = -ENOMEM;
2054                 goto unmap_pci_bars;
2055         }
2056
2057         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2058         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2059         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2060         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2061
2062         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2063         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2064                         &mc_cmd_mem->dma);
2065         if (mc_cmd_mem->va == NULL) {
2066                 status = -ENOMEM;
2067                 goto free_mbox;
2068         }
2069         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2070
2071         spin_lock_init(&adapter->mbox_lock);
2072         spin_lock_init(&adapter->mcc_lock);
2073         spin_lock_init(&adapter->mcc_cq_lock);
2074
2075         return 0;
2076
2077 free_mbox:
2078         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2079                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2080
2081 unmap_pci_bars:
2082         be_unmap_pci_bars(adapter);
2083
2084 done:
2085         return status;
2086 }
2087
2088 static void be_stats_cleanup(struct be_adapter *adapter)
2089 {
2090         struct be_stats_obj *stats = &adapter->stats;
2091         struct be_dma_mem *cmd = &stats->cmd;
2092
2093         if (cmd->va)
2094                 pci_free_consistent(adapter->pdev, cmd->size,
2095                         cmd->va, cmd->dma);
2096 }
2097
2098 static int be_stats_init(struct be_adapter *adapter)
2099 {
2100         struct be_stats_obj *stats = &adapter->stats;
2101         struct be_dma_mem *cmd = &stats->cmd;
2102
2103         cmd->size = sizeof(struct be_cmd_req_get_stats);
2104         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2105         if (cmd->va == NULL)
2106                 return -1;
2107         return 0;
2108 }
2109
2110 static void __devexit be_remove(struct pci_dev *pdev)
2111 {
2112         struct be_adapter *adapter = pci_get_drvdata(pdev);
2113
2114         if (!adapter)
2115                 return;
2116
2117         unregister_netdev(adapter->netdev);
2118
2119         be_clear(adapter);
2120
2121         be_stats_cleanup(adapter);
2122
2123         be_ctrl_cleanup(adapter);
2124
2125         be_msix_disable(adapter);
2126
2127         pci_set_drvdata(pdev, NULL);
2128         pci_release_regions(pdev);
2129         pci_disable_device(pdev);
2130
2131         free_netdev(adapter->netdev);
2132 }
2133
2134 static int be_get_config(struct be_adapter *adapter)
2135 {
2136         int status;
2137         u8 mac[ETH_ALEN];
2138
2139         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2140         if (status)
2141                 return status;
2142
2143         status = be_cmd_query_fw_cfg(adapter,
2144                                 &adapter->port_num, &adapter->cap);
2145         if (status)
2146                 return status;
2147
2148         memset(mac, 0, ETH_ALEN);
2149         status = be_cmd_mac_addr_query(adapter, mac,
2150                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2151         if (status)
2152                 return status;
2153
2154         if (!is_valid_ether_addr(mac))
2155                 return -EADDRNOTAVAIL;
2156
2157         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2158         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2159
2160         return 0;
2161 }
2162
2163 static int __devinit be_probe(struct pci_dev *pdev,
2164                         const struct pci_device_id *pdev_id)
2165 {
2166         int status = 0;
2167         struct be_adapter *adapter;
2168         struct net_device *netdev;
2169
2170         status = pci_enable_device(pdev);
2171         if (status)
2172                 goto do_none;
2173
2174         status = pci_request_regions(pdev, DRV_NAME);
2175         if (status)
2176                 goto disable_dev;
2177         pci_set_master(pdev);
2178
2179         netdev = alloc_etherdev(sizeof(struct be_adapter));
2180         if (netdev == NULL) {
2181                 status = -ENOMEM;
2182                 goto rel_reg;
2183         }
2184         adapter = netdev_priv(netdev);
2185         adapter->pdev = pdev;
2186         pci_set_drvdata(pdev, adapter);
2187         adapter->netdev = netdev;
2188         be_netdev_init(netdev);
2189         SET_NETDEV_DEV(netdev, &pdev->dev);
2190
2191         be_msix_enable(adapter);
2192
2193         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2194         if (!status) {
2195                 netdev->features |= NETIF_F_HIGHDMA;
2196         } else {
2197                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2198                 if (status) {
2199                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2200                         goto free_netdev;
2201                 }
2202         }
2203
2204         status = be_ctrl_init(adapter);
2205         if (status)
2206                 goto free_netdev;
2207
2208         /* sync up with fw's ready state */
2209         status = be_cmd_POST(adapter);
2210         if (status)
2211                 goto ctrl_clean;
2212
2213         /* tell fw we're ready to fire cmds */
2214         status = be_cmd_fw_init(adapter);
2215         if (status)
2216                 goto ctrl_clean;
2217
2218         status = be_cmd_reset_function(adapter);
2219         if (status)
2220                 goto ctrl_clean;
2221
2222         status = be_stats_init(adapter);
2223         if (status)
2224                 goto ctrl_clean;
2225
2226         status = be_get_config(adapter);
2227         if (status)
2228                 goto stats_clean;
2229
2230         INIT_DELAYED_WORK(&adapter->work, be_worker);
2231
2232         status = be_setup(adapter);
2233         if (status)
2234                 goto stats_clean;
2235
2236         status = register_netdev(netdev);
2237         if (status != 0)
2238                 goto unsetup;
2239
2240         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2241         return 0;
2242
2243 unsetup:
2244         be_clear(adapter);
2245 stats_clean:
2246         be_stats_cleanup(adapter);
2247 ctrl_clean:
2248         be_ctrl_cleanup(adapter);
2249 free_netdev:
2250         be_msix_disable(adapter);
2251         free_netdev(adapter->netdev);
2252         pci_set_drvdata(pdev, NULL);
2253 rel_reg:
2254         pci_release_regions(pdev);
2255 disable_dev:
2256         pci_disable_device(pdev);
2257 do_none:
2258         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2259         return status;
2260 }
2261
2262 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2263 {
2264         struct be_adapter *adapter = pci_get_drvdata(pdev);
2265         struct net_device *netdev =  adapter->netdev;
2266
2267         netif_device_detach(netdev);
2268         if (netif_running(netdev)) {
2269                 rtnl_lock();
2270                 be_close(netdev);
2271                 rtnl_unlock();
2272         }
2273         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2274         be_clear(adapter);
2275
2276         pci_save_state(pdev);
2277         pci_disable_device(pdev);
2278         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2279         return 0;
2280 }
2281
2282 static int be_resume(struct pci_dev *pdev)
2283 {
2284         int status = 0;
2285         struct be_adapter *adapter = pci_get_drvdata(pdev);
2286         struct net_device *netdev =  adapter->netdev;
2287
2288         netif_device_detach(netdev);
2289
2290         status = pci_enable_device(pdev);
2291         if (status)
2292                 return status;
2293
2294         pci_set_power_state(pdev, 0);
2295         pci_restore_state(pdev);
2296
2297         /* tell fw we're ready to fire cmds */
2298         status = be_cmd_fw_init(adapter);
2299         if (status)
2300                 return status;
2301
2302         be_setup(adapter);
2303         if (netif_running(netdev)) {
2304                 rtnl_lock();
2305                 be_open(netdev);
2306                 rtnl_unlock();
2307         }
2308         netif_device_attach(netdev);
2309         return 0;
2310 }
2311
2312 static struct pci_driver be_driver = {
2313         .name = DRV_NAME,
2314         .id_table = be_dev_ids,
2315         .probe = be_probe,
2316         .remove = be_remove,
2317         .suspend = be_suspend,
2318         .resume = be_resume
2319 };
2320
2321 static int __init be_init_module(void)
2322 {
2323         if (rx_frag_size != 8192 && rx_frag_size != 4096
2324                 && rx_frag_size != 2048) {
2325                 printk(KERN_WARNING DRV_NAME
2326                         " : Module param rx_frag_size must be 2048/4096/8192."
2327                         " Using 2048\n");
2328                 rx_frag_size = 2048;
2329         }
2330
2331         return pci_register_driver(&be_driver);
2332 }
2333 module_init(be_init_module);
2334
2335 static void __exit be_exit_module(void)
2336 {
2337         pci_unregister_driver(&be_driver);
2338 }
2339 module_exit(be_exit_module);