be2net: minor code optimizations
[pandora-kernel.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 module_param(rx_frag_size, uint, S_IRUGO);
30 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
35         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
36         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
37         { 0 }
38 };
39 MODULE_DEVICE_TABLE(pci, be_dev_ids);
40
41 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
42 {
43         struct be_dma_mem *mem = &q->dma_mem;
44         if (mem->va)
45                 pci_free_consistent(adapter->pdev, mem->size,
46                         mem->va, mem->dma);
47 }
48
49 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
50                 u16 len, u16 entry_size)
51 {
52         struct be_dma_mem *mem = &q->dma_mem;
53
54         memset(q, 0, sizeof(*q));
55         q->len = len;
56         q->entry_size = entry_size;
57         mem->size = len * entry_size;
58         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
59         if (!mem->va)
60                 return -1;
61         memset(mem->va, 0, mem->size);
62         return 0;
63 }
64
65 static void be_intr_set(struct be_adapter *adapter, bool enable)
66 {
67         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
68         u32 reg = ioread32(addr);
69         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
70
71         if (!enabled && enable)
72                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
73         else if (enabled && !enable)
74                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
75         else
76                 return;
77
78         iowrite32(reg, addr);
79 }
80
81 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
82 {
83         u32 val = 0;
84         val |= qid & DB_RQ_RING_ID_MASK;
85         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
86         iowrite32(val, adapter->db + DB_RQ_OFFSET);
87 }
88
89 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
90 {
91         u32 val = 0;
92         val |= qid & DB_TXULP_RING_ID_MASK;
93         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
94         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
95 }
96
97 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
98                 bool arm, bool clear_int, u16 num_popped)
99 {
100         u32 val = 0;
101         val |= qid & DB_EQ_RING_ID_MASK;
102         if (arm)
103                 val |= 1 << DB_EQ_REARM_SHIFT;
104         if (clear_int)
105                 val |= 1 << DB_EQ_CLR_SHIFT;
106         val |= 1 << DB_EQ_EVNT_SHIFT;
107         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
108         iowrite32(val, adapter->db + DB_EQ_OFFSET);
109 }
110
111 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
112 {
113         u32 val = 0;
114         val |= qid & DB_CQ_RING_ID_MASK;
115         if (arm)
116                 val |= 1 << DB_CQ_REARM_SHIFT;
117         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
118         iowrite32(val, adapter->db + DB_CQ_OFFSET);
119 }
120
121 static int be_mac_addr_set(struct net_device *netdev, void *p)
122 {
123         struct be_adapter *adapter = netdev_priv(netdev);
124         struct sockaddr *addr = p;
125         int status = 0;
126
127         if (!is_valid_ether_addr(addr->sa_data))
128                 return -EADDRNOTAVAIL;
129
130         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
131         if (status)
132                 return status;
133
134         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
135                         adapter->if_handle, &adapter->pmac_id);
136         if (!status)
137                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
138
139         return status;
140 }
141
142 void netdev_stats_update(struct be_adapter *adapter)
143 {
144         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
145         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
146         struct be_port_rxf_stats *port_stats =
147                         &rxf_stats->port[adapter->port_num];
148         struct net_device_stats *dev_stats = &adapter->netdev->stats;
149         struct be_erx_stats *erx_stats = &hw_stats->erx;
150
151         dev_stats->rx_packets = port_stats->rx_total_frames;
152         dev_stats->tx_packets = port_stats->tx_unicastframes +
153                 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
154         dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
155                                 (u64) port_stats->rx_bytes_lsd;
156         dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
157                                 (u64) port_stats->tx_bytes_lsd;
158
159         /* bad pkts received */
160         dev_stats->rx_errors = port_stats->rx_crc_errors +
161                 port_stats->rx_alignment_symbol_errors +
162                 port_stats->rx_in_range_errors +
163                 port_stats->rx_out_range_errors +
164                 port_stats->rx_frame_too_long +
165                 port_stats->rx_dropped_too_small +
166                 port_stats->rx_dropped_too_short +
167                 port_stats->rx_dropped_header_too_small +
168                 port_stats->rx_dropped_tcp_length +
169                 port_stats->rx_dropped_runt +
170                 port_stats->rx_tcp_checksum_errs +
171                 port_stats->rx_ip_checksum_errs +
172                 port_stats->rx_udp_checksum_errs;
173
174         /*  no space in linux buffers: best possible approximation */
175         dev_stats->rx_dropped =
176                 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
177
178         /* detailed rx errors */
179         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
180                 port_stats->rx_out_range_errors +
181                 port_stats->rx_frame_too_long;
182
183         /* receive ring buffer overflow */
184         dev_stats->rx_over_errors = 0;
185
186         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
187
188         /* frame alignment errors */
189         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
190
191         /* receiver fifo overrun */
192         /* drops_no_pbuf is no per i/f, it's per BE card */
193         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
194                                         port_stats->rx_input_fifo_overflow +
195                                         rxf_stats->rx_drops_no_pbuf;
196         /* receiver missed packetd */
197         dev_stats->rx_missed_errors = 0;
198
199         /*  packet transmit problems */
200         dev_stats->tx_errors = 0;
201
202         /* no space available in linux */
203         dev_stats->tx_dropped = 0;
204
205         dev_stats->multicast = port_stats->rx_multicast_frames;
206         dev_stats->collisions = 0;
207
208         /* detailed tx_errors */
209         dev_stats->tx_aborted_errors = 0;
210         dev_stats->tx_carrier_errors = 0;
211         dev_stats->tx_fifo_errors = 0;
212         dev_stats->tx_heartbeat_errors = 0;
213         dev_stats->tx_window_errors = 0;
214 }
215
216 void be_link_status_update(struct be_adapter *adapter, bool link_up)
217 {
218         struct net_device *netdev = adapter->netdev;
219
220         /* If link came up or went down */
221         if (adapter->link_up != link_up) {
222                 adapter->link_speed = -1;
223                 if (link_up) {
224                         netif_start_queue(netdev);
225                         netif_carrier_on(netdev);
226                         printk(KERN_INFO "%s: Link up\n", netdev->name);
227                 } else {
228                         netif_stop_queue(netdev);
229                         netif_carrier_off(netdev);
230                         printk(KERN_INFO "%s: Link down\n", netdev->name);
231                 }
232                 adapter->link_up = link_up;
233         }
234 }
235
236 /* Update the EQ delay n BE based on the RX frags consumed / sec */
237 static void be_rx_eqd_update(struct be_adapter *adapter)
238 {
239         struct be_eq_obj *rx_eq = &adapter->rx_eq;
240         struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
241         ulong now = jiffies;
242         u32 eqd;
243
244         if (!rx_eq->enable_aic)
245                 return;
246
247         /* Wrapped around */
248         if (time_before(now, stats->rx_fps_jiffies)) {
249                 stats->rx_fps_jiffies = now;
250                 return;
251         }
252
253         /* Update once a second */
254         if ((now - stats->rx_fps_jiffies) < HZ)
255                 return;
256
257         stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
258                         ((now - stats->rx_fps_jiffies) / HZ);
259
260         stats->rx_fps_jiffies = now;
261         stats->be_prev_rx_frags = stats->be_rx_frags;
262         eqd = stats->be_rx_fps / 110000;
263         eqd = eqd << 3;
264         if (eqd > rx_eq->max_eqd)
265                 eqd = rx_eq->max_eqd;
266         if (eqd < rx_eq->min_eqd)
267                 eqd = rx_eq->min_eqd;
268         if (eqd < 10)
269                 eqd = 0;
270         if (eqd != rx_eq->cur_eqd)
271                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
272
273         rx_eq->cur_eqd = eqd;
274 }
275
276 static struct net_device_stats *be_get_stats(struct net_device *dev)
277 {
278         return &dev->stats;
279 }
280
281 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
282 {
283         u64 rate = bytes;
284
285         do_div(rate, ticks / HZ);
286         rate <<= 3;                     /* bytes/sec -> bits/sec */
287         do_div(rate, 1000000ul);        /* MB/Sec */
288
289         return rate;
290 }
291
292 static void be_tx_rate_update(struct be_adapter *adapter)
293 {
294         struct be_drvr_stats *stats = drvr_stats(adapter);
295         ulong now = jiffies;
296
297         /* Wrapped around? */
298         if (time_before(now, stats->be_tx_jiffies)) {
299                 stats->be_tx_jiffies = now;
300                 return;
301         }
302
303         /* Update tx rate once in two seconds */
304         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
305                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
306                                                   - stats->be_tx_bytes_prev,
307                                                  now - stats->be_tx_jiffies);
308                 stats->be_tx_jiffies = now;
309                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
310         }
311 }
312
313 static void be_tx_stats_update(struct be_adapter *adapter,
314                         u32 wrb_cnt, u32 copied, bool stopped)
315 {
316         struct be_drvr_stats *stats = drvr_stats(adapter);
317         stats->be_tx_reqs++;
318         stats->be_tx_wrbs += wrb_cnt;
319         stats->be_tx_bytes += copied;
320         if (stopped)
321                 stats->be_tx_stops++;
322 }
323
324 /* Determine number of WRB entries needed to xmit data in an skb */
325 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
326 {
327         int cnt = (skb->len > skb->data_len);
328
329         cnt += skb_shinfo(skb)->nr_frags;
330
331         /* to account for hdr wrb */
332         cnt++;
333         if (cnt & 1) {
334                 /* add a dummy to make it an even num */
335                 cnt++;
336                 *dummy = true;
337         } else
338                 *dummy = false;
339         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
340         return cnt;
341 }
342
343 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
344 {
345         wrb->frag_pa_hi = upper_32_bits(addr);
346         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
347         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
348 }
349
350 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
351                 bool vlan, u32 wrb_cnt, u32 len)
352 {
353         memset(hdr, 0, sizeof(*hdr));
354
355         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
356
357         if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
358                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
359                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
360                         hdr, skb_shinfo(skb)->gso_size);
361         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
362                 if (is_tcp_pkt(skb))
363                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
364                 else if (is_udp_pkt(skb))
365                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
366         }
367
368         if (vlan && vlan_tx_tag_present(skb)) {
369                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
370                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
371                         hdr, vlan_tx_tag_get(skb));
372         }
373
374         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
375         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
376         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
377         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
378 }
379
380
381 static int make_tx_wrbs(struct be_adapter *adapter,
382                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
383 {
384         u64 busaddr;
385         u32 i, copied = 0;
386         struct pci_dev *pdev = adapter->pdev;
387         struct sk_buff *first_skb = skb;
388         struct be_queue_info *txq = &adapter->tx_obj.q;
389         struct be_eth_wrb *wrb;
390         struct be_eth_hdr_wrb *hdr;
391
392         hdr = queue_head_node(txq);
393         atomic_add(wrb_cnt, &txq->used);
394         queue_head_inc(txq);
395
396         if (skb->len > skb->data_len) {
397                 int len = skb->len - skb->data_len;
398                 busaddr = pci_map_single(pdev, skb->data, len,
399                                          PCI_DMA_TODEVICE);
400                 wrb = queue_head_node(txq);
401                 wrb_fill(wrb, busaddr, len);
402                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
403                 queue_head_inc(txq);
404                 copied += len;
405         }
406
407         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
408                 struct skb_frag_struct *frag =
409                         &skb_shinfo(skb)->frags[i];
410                 busaddr = pci_map_page(pdev, frag->page,
411                                        frag->page_offset,
412                                        frag->size, PCI_DMA_TODEVICE);
413                 wrb = queue_head_node(txq);
414                 wrb_fill(wrb, busaddr, frag->size);
415                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
416                 queue_head_inc(txq);
417                 copied += frag->size;
418         }
419
420         if (dummy_wrb) {
421                 wrb = queue_head_node(txq);
422                 wrb_fill(wrb, 0, 0);
423                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
424                 queue_head_inc(txq);
425         }
426
427         wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
428                 wrb_cnt, copied);
429         be_dws_cpu_to_le(hdr, sizeof(*hdr));
430
431         return copied;
432 }
433
434 static netdev_tx_t be_xmit(struct sk_buff *skb,
435                         struct net_device *netdev)
436 {
437         struct be_adapter *adapter = netdev_priv(netdev);
438         struct be_tx_obj *tx_obj = &adapter->tx_obj;
439         struct be_queue_info *txq = &tx_obj->q;
440         u32 wrb_cnt = 0, copied = 0;
441         u32 start = txq->head;
442         bool dummy_wrb, stopped = false;
443
444         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
445
446         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
447         if (copied) {
448                 /* record the sent skb in the sent_skb table */
449                 BUG_ON(tx_obj->sent_skb_list[start]);
450                 tx_obj->sent_skb_list[start] = skb;
451
452                 /* Ensure txq has space for the next skb; Else stop the queue
453                  * *BEFORE* ringing the tx doorbell, so that we serialze the
454                  * tx compls of the current transmit which'll wake up the queue
455                  */
456                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
457                                                                 txq->len) {
458                         netif_stop_queue(netdev);
459                         stopped = true;
460                 }
461
462                 be_txq_notify(adapter, txq->id, wrb_cnt);
463
464                 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
465         } else {
466                 txq->head = start;
467                 dev_kfree_skb_any(skb);
468         }
469         return NETDEV_TX_OK;
470 }
471
472 static int be_change_mtu(struct net_device *netdev, int new_mtu)
473 {
474         struct be_adapter *adapter = netdev_priv(netdev);
475         if (new_mtu < BE_MIN_MTU ||
476                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
477                                         (ETH_HLEN + ETH_FCS_LEN))) {
478                 dev_info(&adapter->pdev->dev,
479                         "MTU must be between %d and %d bytes\n",
480                         BE_MIN_MTU,
481                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
482                 return -EINVAL;
483         }
484         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
485                         netdev->mtu, new_mtu);
486         netdev->mtu = new_mtu;
487         return 0;
488 }
489
490 /*
491  * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
492  * program them in BE.  If more than BE_NUM_VLANS_SUPPORTED are configured,
493  * set the BE in promiscuous VLAN mode.
494  */
495 static int be_vid_config(struct be_adapter *adapter)
496 {
497         u16 vtag[BE_NUM_VLANS_SUPPORTED];
498         u16 ntags = 0, i;
499         int status;
500
501         if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED)  {
502                 /* Construct VLAN Table to give to HW */
503                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
504                         if (adapter->vlan_tag[i]) {
505                                 vtag[ntags] = cpu_to_le16(i);
506                                 ntags++;
507                         }
508                 }
509                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
510                                         vtag, ntags, 1, 0);
511         } else {
512                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
513                                         NULL, 0, 1, 1);
514         }
515         return status;
516 }
517
518 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
519 {
520         struct be_adapter *adapter = netdev_priv(netdev);
521         struct be_eq_obj *rx_eq = &adapter->rx_eq;
522         struct be_eq_obj *tx_eq = &adapter->tx_eq;
523
524         be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
525         be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
526         adapter->vlan_grp = grp;
527         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
528         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
529 }
530
531 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
532 {
533         struct be_adapter *adapter = netdev_priv(netdev);
534
535         adapter->num_vlans++;
536         adapter->vlan_tag[vid] = 1;
537
538         be_vid_config(adapter);
539 }
540
541 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
542 {
543         struct be_adapter *adapter = netdev_priv(netdev);
544
545         adapter->num_vlans--;
546         adapter->vlan_tag[vid] = 0;
547
548         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
549         be_vid_config(adapter);
550 }
551
552 static void be_set_multicast_list(struct net_device *netdev)
553 {
554         struct be_adapter *adapter = netdev_priv(netdev);
555
556         if (netdev->flags & IFF_PROMISC) {
557                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
558                 adapter->promiscuous = true;
559                 goto done;
560         }
561
562         /* BE was previously in promiscous mode; disable it */
563         if (adapter->promiscuous) {
564                 adapter->promiscuous = false;
565                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
566         }
567
568         /* Enable multicast promisc if num configured exceeds what we support */
569         if (netdev->flags & IFF_ALLMULTI ||
570             netdev_mc_count(netdev) > BE_MAX_MC) {
571                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
572                                 &adapter->mc_cmd_mem);
573                 goto done;
574         }
575
576         be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
577                 netdev_mc_count(netdev), &adapter->mc_cmd_mem);
578 done:
579         return;
580 }
581
582 static void be_rx_rate_update(struct be_adapter *adapter)
583 {
584         struct be_drvr_stats *stats = drvr_stats(adapter);
585         ulong now = jiffies;
586
587         /* Wrapped around */
588         if (time_before(now, stats->be_rx_jiffies)) {
589                 stats->be_rx_jiffies = now;
590                 return;
591         }
592
593         /* Update the rate once in two seconds */
594         if ((now - stats->be_rx_jiffies) < 2 * HZ)
595                 return;
596
597         stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
598                                           - stats->be_rx_bytes_prev,
599                                          now - stats->be_rx_jiffies);
600         stats->be_rx_jiffies = now;
601         stats->be_rx_bytes_prev = stats->be_rx_bytes;
602 }
603
604 static void be_rx_stats_update(struct be_adapter *adapter,
605                 u32 pktsize, u16 numfrags)
606 {
607         struct be_drvr_stats *stats = drvr_stats(adapter);
608
609         stats->be_rx_compl++;
610         stats->be_rx_frags += numfrags;
611         stats->be_rx_bytes += pktsize;
612 }
613
614 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
615 {
616         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
617
618         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
619         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
620         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
621         if (ip_version) {
622                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
623                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
624         }
625         ipv6_chk = (ip_version && (tcpf || udpf));
626
627         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
628 }
629
630 static struct be_rx_page_info *
631 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
632 {
633         struct be_rx_page_info *rx_page_info;
634         struct be_queue_info *rxq = &adapter->rx_obj.q;
635
636         rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
637         BUG_ON(!rx_page_info->page);
638
639         if (rx_page_info->last_page_user) {
640                 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
641                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
642                 rx_page_info->last_page_user = false;
643         }
644
645         atomic_dec(&rxq->used);
646         return rx_page_info;
647 }
648
649 /* Throwaway the data in the Rx completion */
650 static void be_rx_compl_discard(struct be_adapter *adapter,
651                         struct be_eth_rx_compl *rxcp)
652 {
653         struct be_queue_info *rxq = &adapter->rx_obj.q;
654         struct be_rx_page_info *page_info;
655         u16 rxq_idx, i, num_rcvd;
656
657         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
658         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
659
660         for (i = 0; i < num_rcvd; i++) {
661                 page_info = get_rx_page_info(adapter, rxq_idx);
662                 put_page(page_info->page);
663                 memset(page_info, 0, sizeof(*page_info));
664                 index_inc(&rxq_idx, rxq->len);
665         }
666 }
667
668 /*
669  * skb_fill_rx_data forms a complete skb for an ether frame
670  * indicated by rxcp.
671  */
672 static void skb_fill_rx_data(struct be_adapter *adapter,
673                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
674 {
675         struct be_queue_info *rxq = &adapter->rx_obj.q;
676         struct be_rx_page_info *page_info;
677         u16 rxq_idx, i, num_rcvd, j;
678         u32 pktsize, hdr_len, curr_frag_len, size;
679         u8 *start;
680
681         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
682         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
683         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
684
685         page_info = get_rx_page_info(adapter, rxq_idx);
686
687         start = page_address(page_info->page) + page_info->page_offset;
688         prefetch(start);
689
690         /* Copy data in the first descriptor of this completion */
691         curr_frag_len = min(pktsize, rx_frag_size);
692
693         /* Copy the header portion into skb_data */
694         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
695         memcpy(skb->data, start, hdr_len);
696         skb->len = curr_frag_len;
697         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
698                 /* Complete packet has now been moved to data */
699                 put_page(page_info->page);
700                 skb->data_len = 0;
701                 skb->tail += curr_frag_len;
702         } else {
703                 skb_shinfo(skb)->nr_frags = 1;
704                 skb_shinfo(skb)->frags[0].page = page_info->page;
705                 skb_shinfo(skb)->frags[0].page_offset =
706                                         page_info->page_offset + hdr_len;
707                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
708                 skb->data_len = curr_frag_len - hdr_len;
709                 skb->tail += hdr_len;
710         }
711         page_info->page = NULL;
712
713         if (pktsize <= rx_frag_size) {
714                 BUG_ON(num_rcvd != 1);
715                 goto done;
716         }
717
718         /* More frags present for this completion */
719         size = pktsize;
720         for (i = 1, j = 0; i < num_rcvd; i++) {
721                 size -= curr_frag_len;
722                 index_inc(&rxq_idx, rxq->len);
723                 page_info = get_rx_page_info(adapter, rxq_idx);
724
725                 curr_frag_len = min(size, rx_frag_size);
726
727                 /* Coalesce all frags from the same physical page in one slot */
728                 if (page_info->page_offset == 0) {
729                         /* Fresh page */
730                         j++;
731                         skb_shinfo(skb)->frags[j].page = page_info->page;
732                         skb_shinfo(skb)->frags[j].page_offset =
733                                                         page_info->page_offset;
734                         skb_shinfo(skb)->frags[j].size = 0;
735                         skb_shinfo(skb)->nr_frags++;
736                 } else {
737                         put_page(page_info->page);
738                 }
739
740                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
741                 skb->len += curr_frag_len;
742                 skb->data_len += curr_frag_len;
743
744                 page_info->page = NULL;
745         }
746         BUG_ON(j > MAX_SKB_FRAGS);
747
748 done:
749         be_rx_stats_update(adapter, pktsize, num_rcvd);
750         return;
751 }
752
753 /* Process the RX completion indicated by rxcp when GRO is disabled */
754 static void be_rx_compl_process(struct be_adapter *adapter,
755                         struct be_eth_rx_compl *rxcp)
756 {
757         struct sk_buff *skb;
758         u32 vlanf, vid;
759         u8 vtm;
760
761         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
762         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
763
764         /* vlanf could be wrongly set in some cards.
765          * ignore if vtm is not set */
766         if ((adapter->cap & 0x400) && !vtm)
767                 vlanf = 0;
768
769         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
770         if (!skb) {
771                 if (net_ratelimit())
772                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
773                 be_rx_compl_discard(adapter, rxcp);
774                 return;
775         }
776
777         skb_fill_rx_data(adapter, skb, rxcp);
778
779         if (do_pkt_csum(rxcp, adapter->rx_csum))
780                 skb->ip_summed = CHECKSUM_NONE;
781         else
782                 skb->ip_summed = CHECKSUM_UNNECESSARY;
783
784         skb->truesize = skb->len + sizeof(struct sk_buff);
785         skb->protocol = eth_type_trans(skb, adapter->netdev);
786         skb->dev = adapter->netdev;
787
788         if (vlanf) {
789                 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
790                         kfree_skb(skb);
791                         return;
792                 }
793                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
794                 vid = be16_to_cpu(vid);
795                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
796         } else {
797                 netif_receive_skb(skb);
798         }
799
800         return;
801 }
802
803 /* Process the RX completion indicated by rxcp when GRO is enabled */
804 static void be_rx_compl_process_gro(struct be_adapter *adapter,
805                         struct be_eth_rx_compl *rxcp)
806 {
807         struct be_rx_page_info *page_info;
808         struct sk_buff *skb = NULL;
809         struct be_queue_info *rxq = &adapter->rx_obj.q;
810         struct be_eq_obj *eq_obj =  &adapter->rx_eq;
811         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
812         u16 i, rxq_idx = 0, vid, j;
813         u8 vtm;
814
815         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
816         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
817         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
818         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
819         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
820
821         /* vlanf could be wrongly set in some cards.
822          * ignore if vtm is not set */
823         if ((adapter->cap & 0x400) && !vtm)
824                 vlanf = 0;
825
826         skb = napi_get_frags(&eq_obj->napi);
827         if (!skb) {
828                 be_rx_compl_discard(adapter, rxcp);
829                 return;
830         }
831
832         remaining = pkt_size;
833         for (i = 0, j = -1; i < num_rcvd; i++) {
834                 page_info = get_rx_page_info(adapter, rxq_idx);
835
836                 curr_frag_len = min(remaining, rx_frag_size);
837
838                 /* Coalesce all frags from the same physical page in one slot */
839                 if (i == 0 || page_info->page_offset == 0) {
840                         /* First frag or Fresh page */
841                         j++;
842                         skb_shinfo(skb)->frags[j].page = page_info->page;
843                         skb_shinfo(skb)->frags[j].page_offset =
844                                                         page_info->page_offset;
845                         skb_shinfo(skb)->frags[j].size = 0;
846                 } else {
847                         put_page(page_info->page);
848                 }
849                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
850
851                 remaining -= curr_frag_len;
852                 index_inc(&rxq_idx, rxq->len);
853                 memset(page_info, 0, sizeof(*page_info));
854         }
855         BUG_ON(j > MAX_SKB_FRAGS);
856
857         skb_shinfo(skb)->nr_frags = j + 1;
858         skb->len = pkt_size;
859         skb->data_len = pkt_size;
860         skb->truesize += pkt_size;
861         skb->ip_summed = CHECKSUM_UNNECESSARY;
862
863         if (likely(!vlanf)) {
864                 napi_gro_frags(&eq_obj->napi);
865         } else {
866                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
867                 vid = be16_to_cpu(vid);
868
869                 if (!adapter->vlan_grp || adapter->num_vlans == 0)
870                         return;
871
872                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
873         }
874
875         be_rx_stats_update(adapter, pkt_size, num_rcvd);
876         return;
877 }
878
879 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
880 {
881         struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
882
883         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
884                 return NULL;
885
886         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
887
888         queue_tail_inc(&adapter->rx_obj.cq);
889         return rxcp;
890 }
891
892 /* To reset the valid bit, we need to reset the whole word as
893  * when walking the queue the valid entries are little-endian
894  * and invalid entries are host endian
895  */
896 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
897 {
898         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
899 }
900
901 static inline struct page *be_alloc_pages(u32 size)
902 {
903         gfp_t alloc_flags = GFP_ATOMIC;
904         u32 order = get_order(size);
905         if (order > 0)
906                 alloc_flags |= __GFP_COMP;
907         return  alloc_pages(alloc_flags, order);
908 }
909
910 /*
911  * Allocate a page, split it to fragments of size rx_frag_size and post as
912  * receive buffers to BE
913  */
914 static void be_post_rx_frags(struct be_adapter *adapter)
915 {
916         struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
917         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
918         struct be_queue_info *rxq = &adapter->rx_obj.q;
919         struct page *pagep = NULL;
920         struct be_eth_rx_d *rxd;
921         u64 page_dmaaddr = 0, frag_dmaaddr;
922         u32 posted, page_offset = 0;
923
924         page_info = &page_info_tbl[rxq->head];
925         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
926                 if (!pagep) {
927                         pagep = be_alloc_pages(adapter->big_page_size);
928                         if (unlikely(!pagep)) {
929                                 drvr_stats(adapter)->be_ethrx_post_fail++;
930                                 break;
931                         }
932                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
933                                                 adapter->big_page_size,
934                                                 PCI_DMA_FROMDEVICE);
935                         page_info->page_offset = 0;
936                 } else {
937                         get_page(pagep);
938                         page_info->page_offset = page_offset + rx_frag_size;
939                 }
940                 page_offset = page_info->page_offset;
941                 page_info->page = pagep;
942                 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
943                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
944
945                 rxd = queue_head_node(rxq);
946                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
947                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
948
949                 /* Any space left in the current big page for another frag? */
950                 if ((page_offset + rx_frag_size + rx_frag_size) >
951                                         adapter->big_page_size) {
952                         pagep = NULL;
953                         page_info->last_page_user = true;
954                 }
955
956                 prev_page_info = page_info;
957                 queue_head_inc(rxq);
958                 page_info = &page_info_tbl[rxq->head];
959         }
960         if (pagep)
961                 prev_page_info->last_page_user = true;
962
963         if (posted) {
964                 atomic_add(posted, &rxq->used);
965                 be_rxq_notify(adapter, rxq->id, posted);
966         } else if (atomic_read(&rxq->used) == 0) {
967                 /* Let be_worker replenish when memory is available */
968                 adapter->rx_post_starved = true;
969         }
970
971         return;
972 }
973
974 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
975 {
976         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
977
978         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
979                 return NULL;
980
981         be_dws_le_to_cpu(txcp, sizeof(*txcp));
982
983         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
984
985         queue_tail_inc(tx_cq);
986         return txcp;
987 }
988
989 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
990 {
991         struct be_queue_info *txq = &adapter->tx_obj.q;
992         struct be_eth_wrb *wrb;
993         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
994         struct sk_buff *sent_skb;
995         u64 busaddr;
996         u16 cur_index, num_wrbs = 0;
997
998         cur_index = txq->tail;
999         sent_skb = sent_skbs[cur_index];
1000         BUG_ON(!sent_skb);
1001         sent_skbs[cur_index] = NULL;
1002         wrb = queue_tail_node(txq);
1003         be_dws_le_to_cpu(wrb, sizeof(*wrb));
1004         busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1005         if (busaddr != 0) {
1006                 pci_unmap_single(adapter->pdev, busaddr,
1007                                  wrb->frag_len, PCI_DMA_TODEVICE);
1008         }
1009         num_wrbs++;
1010         queue_tail_inc(txq);
1011
1012         while (cur_index != last_index) {
1013                 cur_index = txq->tail;
1014                 wrb = queue_tail_node(txq);
1015                 be_dws_le_to_cpu(wrb, sizeof(*wrb));
1016                 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1017                 if (busaddr != 0) {
1018                         pci_unmap_page(adapter->pdev, busaddr,
1019                                        wrb->frag_len, PCI_DMA_TODEVICE);
1020                 }
1021                 num_wrbs++;
1022                 queue_tail_inc(txq);
1023         }
1024
1025         atomic_sub(num_wrbs, &txq->used);
1026
1027         kfree_skb(sent_skb);
1028 }
1029
1030 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1031 {
1032         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1033
1034         if (!eqe->evt)
1035                 return NULL;
1036
1037         eqe->evt = le32_to_cpu(eqe->evt);
1038         queue_tail_inc(&eq_obj->q);
1039         return eqe;
1040 }
1041
1042 static int event_handle(struct be_adapter *adapter,
1043                         struct be_eq_obj *eq_obj)
1044 {
1045         struct be_eq_entry *eqe;
1046         u16 num = 0;
1047
1048         while ((eqe = event_get(eq_obj)) != NULL) {
1049                 eqe->evt = 0;
1050                 num++;
1051         }
1052
1053         /* Deal with any spurious interrupts that come
1054          * without events
1055          */
1056         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1057         if (num)
1058                 napi_schedule(&eq_obj->napi);
1059
1060         return num;
1061 }
1062
1063 /* Just read and notify events without processing them.
1064  * Used at the time of destroying event queues */
1065 static void be_eq_clean(struct be_adapter *adapter,
1066                         struct be_eq_obj *eq_obj)
1067 {
1068         struct be_eq_entry *eqe;
1069         u16 num = 0;
1070
1071         while ((eqe = event_get(eq_obj)) != NULL) {
1072                 eqe->evt = 0;
1073                 num++;
1074         }
1075
1076         if (num)
1077                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1078 }
1079
1080 static void be_rx_q_clean(struct be_adapter *adapter)
1081 {
1082         struct be_rx_page_info *page_info;
1083         struct be_queue_info *rxq = &adapter->rx_obj.q;
1084         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1085         struct be_eth_rx_compl *rxcp;
1086         u16 tail;
1087
1088         /* First cleanup pending rx completions */
1089         while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1090                 be_rx_compl_discard(adapter, rxcp);
1091                 be_rx_compl_reset(rxcp);
1092                 be_cq_notify(adapter, rx_cq->id, true, 1);
1093         }
1094
1095         /* Then free posted rx buffer that were not used */
1096         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1097         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1098                 page_info = get_rx_page_info(adapter, tail);
1099                 put_page(page_info->page);
1100                 memset(page_info, 0, sizeof(*page_info));
1101         }
1102         BUG_ON(atomic_read(&rxq->used));
1103 }
1104
1105 static void be_tx_compl_clean(struct be_adapter *adapter)
1106 {
1107         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1108         struct be_queue_info *txq = &adapter->tx_obj.q;
1109         struct be_eth_tx_compl *txcp;
1110         u16 end_idx, cmpl = 0, timeo = 0;
1111
1112         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1113         do {
1114                 while ((txcp = be_tx_compl_get(tx_cq))) {
1115                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1116                                         wrb_index, txcp);
1117                         be_tx_compl_process(adapter, end_idx);
1118                         cmpl++;
1119                 }
1120                 if (cmpl) {
1121                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1122                         cmpl = 0;
1123                 }
1124
1125                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1126                         break;
1127
1128                 mdelay(1);
1129         } while (true);
1130
1131         if (atomic_read(&txq->used))
1132                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1133                         atomic_read(&txq->used));
1134 }
1135
1136 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1137 {
1138         struct be_queue_info *q;
1139
1140         q = &adapter->mcc_obj.q;
1141         if (q->created)
1142                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1143         be_queue_free(adapter, q);
1144
1145         q = &adapter->mcc_obj.cq;
1146         if (q->created)
1147                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1148         be_queue_free(adapter, q);
1149 }
1150
1151 /* Must be called only after TX qs are created as MCC shares TX EQ */
1152 static int be_mcc_queues_create(struct be_adapter *adapter)
1153 {
1154         struct be_queue_info *q, *cq;
1155
1156         /* Alloc MCC compl queue */
1157         cq = &adapter->mcc_obj.cq;
1158         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1159                         sizeof(struct be_mcc_compl)))
1160                 goto err;
1161
1162         /* Ask BE to create MCC compl queue; share TX's eq */
1163         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1164                 goto mcc_cq_free;
1165
1166         /* Alloc MCC queue */
1167         q = &adapter->mcc_obj.q;
1168         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1169                 goto mcc_cq_destroy;
1170
1171         /* Ask BE to create MCC queue */
1172         if (be_cmd_mccq_create(adapter, q, cq))
1173                 goto mcc_q_free;
1174
1175         return 0;
1176
1177 mcc_q_free:
1178         be_queue_free(adapter, q);
1179 mcc_cq_destroy:
1180         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1181 mcc_cq_free:
1182         be_queue_free(adapter, cq);
1183 err:
1184         return -1;
1185 }
1186
1187 static void be_tx_queues_destroy(struct be_adapter *adapter)
1188 {
1189         struct be_queue_info *q;
1190
1191         q = &adapter->tx_obj.q;
1192         if (q->created)
1193                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1194         be_queue_free(adapter, q);
1195
1196         q = &adapter->tx_obj.cq;
1197         if (q->created)
1198                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1199         be_queue_free(adapter, q);
1200
1201         /* Clear any residual events */
1202         be_eq_clean(adapter, &adapter->tx_eq);
1203
1204         q = &adapter->tx_eq.q;
1205         if (q->created)
1206                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1207         be_queue_free(adapter, q);
1208 }
1209
1210 static int be_tx_queues_create(struct be_adapter *adapter)
1211 {
1212         struct be_queue_info *eq, *q, *cq;
1213
1214         adapter->tx_eq.max_eqd = 0;
1215         adapter->tx_eq.min_eqd = 0;
1216         adapter->tx_eq.cur_eqd = 96;
1217         adapter->tx_eq.enable_aic = false;
1218         /* Alloc Tx Event queue */
1219         eq = &adapter->tx_eq.q;
1220         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1221                 return -1;
1222
1223         /* Ask BE to create Tx Event queue */
1224         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1225                 goto tx_eq_free;
1226         /* Alloc TX eth compl queue */
1227         cq = &adapter->tx_obj.cq;
1228         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1229                         sizeof(struct be_eth_tx_compl)))
1230                 goto tx_eq_destroy;
1231
1232         /* Ask BE to create Tx eth compl queue */
1233         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1234                 goto tx_cq_free;
1235
1236         /* Alloc TX eth queue */
1237         q = &adapter->tx_obj.q;
1238         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1239                 goto tx_cq_destroy;
1240
1241         /* Ask BE to create Tx eth queue */
1242         if (be_cmd_txq_create(adapter, q, cq))
1243                 goto tx_q_free;
1244         return 0;
1245
1246 tx_q_free:
1247         be_queue_free(adapter, q);
1248 tx_cq_destroy:
1249         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1250 tx_cq_free:
1251         be_queue_free(adapter, cq);
1252 tx_eq_destroy:
1253         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1254 tx_eq_free:
1255         be_queue_free(adapter, eq);
1256         return -1;
1257 }
1258
1259 static void be_rx_queues_destroy(struct be_adapter *adapter)
1260 {
1261         struct be_queue_info *q;
1262
1263         q = &adapter->rx_obj.q;
1264         if (q->created) {
1265                 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1266                 be_rx_q_clean(adapter);
1267         }
1268         be_queue_free(adapter, q);
1269
1270         q = &adapter->rx_obj.cq;
1271         if (q->created)
1272                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1273         be_queue_free(adapter, q);
1274
1275         /* Clear any residual events */
1276         be_eq_clean(adapter, &adapter->rx_eq);
1277
1278         q = &adapter->rx_eq.q;
1279         if (q->created)
1280                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1281         be_queue_free(adapter, q);
1282 }
1283
1284 static int be_rx_queues_create(struct be_adapter *adapter)
1285 {
1286         struct be_queue_info *eq, *q, *cq;
1287         int rc;
1288
1289         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1290         adapter->rx_eq.max_eqd = BE_MAX_EQD;
1291         adapter->rx_eq.min_eqd = 0;
1292         adapter->rx_eq.cur_eqd = 0;
1293         adapter->rx_eq.enable_aic = true;
1294
1295         /* Alloc Rx Event queue */
1296         eq = &adapter->rx_eq.q;
1297         rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1298                                 sizeof(struct be_eq_entry));
1299         if (rc)
1300                 return rc;
1301
1302         /* Ask BE to create Rx Event queue */
1303         rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1304         if (rc)
1305                 goto rx_eq_free;
1306
1307         /* Alloc RX eth compl queue */
1308         cq = &adapter->rx_obj.cq;
1309         rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1310                         sizeof(struct be_eth_rx_compl));
1311         if (rc)
1312                 goto rx_eq_destroy;
1313
1314         /* Ask BE to create Rx eth compl queue */
1315         rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1316         if (rc)
1317                 goto rx_cq_free;
1318
1319         /* Alloc RX eth queue */
1320         q = &adapter->rx_obj.q;
1321         rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1322         if (rc)
1323                 goto rx_cq_destroy;
1324
1325         /* Ask BE to create Rx eth queue */
1326         rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1327                 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1328         if (rc)
1329                 goto rx_q_free;
1330
1331         return 0;
1332 rx_q_free:
1333         be_queue_free(adapter, q);
1334 rx_cq_destroy:
1335         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1336 rx_cq_free:
1337         be_queue_free(adapter, cq);
1338 rx_eq_destroy:
1339         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1340 rx_eq_free:
1341         be_queue_free(adapter, eq);
1342         return rc;
1343 }
1344
1345 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1346 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1347 {
1348         return eq_id - 8 * be_pci_func(adapter);
1349 }
1350
1351 static irqreturn_t be_intx(int irq, void *dev)
1352 {
1353         struct be_adapter *adapter = dev;
1354         int isr;
1355
1356         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1357                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1358         if (!isr)
1359                 return IRQ_NONE;
1360
1361         event_handle(adapter, &adapter->tx_eq);
1362         event_handle(adapter, &adapter->rx_eq);
1363
1364         return IRQ_HANDLED;
1365 }
1366
1367 static irqreturn_t be_msix_rx(int irq, void *dev)
1368 {
1369         struct be_adapter *adapter = dev;
1370
1371         event_handle(adapter, &adapter->rx_eq);
1372
1373         return IRQ_HANDLED;
1374 }
1375
1376 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1377 {
1378         struct be_adapter *adapter = dev;
1379
1380         event_handle(adapter, &adapter->tx_eq);
1381
1382         return IRQ_HANDLED;
1383 }
1384
1385 static inline bool do_gro(struct be_adapter *adapter,
1386                         struct be_eth_rx_compl *rxcp)
1387 {
1388         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1389         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1390
1391         if (err)
1392                 drvr_stats(adapter)->be_rxcp_err++;
1393
1394         return (tcp_frame && !err) ? true : false;
1395 }
1396
1397 int be_poll_rx(struct napi_struct *napi, int budget)
1398 {
1399         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1400         struct be_adapter *adapter =
1401                 container_of(rx_eq, struct be_adapter, rx_eq);
1402         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1403         struct be_eth_rx_compl *rxcp;
1404         u32 work_done;
1405
1406         adapter->stats.drvr_stats.be_rx_polls++;
1407         for (work_done = 0; work_done < budget; work_done++) {
1408                 rxcp = be_rx_compl_get(adapter);
1409                 if (!rxcp)
1410                         break;
1411
1412                 if (do_gro(adapter, rxcp))
1413                         be_rx_compl_process_gro(adapter, rxcp);
1414                 else
1415                         be_rx_compl_process(adapter, rxcp);
1416
1417                 be_rx_compl_reset(rxcp);
1418         }
1419
1420         /* Refill the queue */
1421         if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1422                 be_post_rx_frags(adapter);
1423
1424         /* All consumed */
1425         if (work_done < budget) {
1426                 napi_complete(napi);
1427                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1428         } else {
1429                 /* More to be consumed; continue with interrupts disabled */
1430                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1431         }
1432         return work_done;
1433 }
1434
1435 void be_process_tx(struct be_adapter *adapter)
1436 {
1437         struct be_queue_info *txq = &adapter->tx_obj.q;
1438         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1439         struct be_eth_tx_compl *txcp;
1440         u32 num_cmpl = 0;
1441         u16 end_idx;
1442
1443         while ((txcp = be_tx_compl_get(tx_cq))) {
1444                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1445                                         wrb_index, txcp);
1446                 be_tx_compl_process(adapter, end_idx);
1447                 num_cmpl++;
1448         }
1449
1450         if (num_cmpl) {
1451                 be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
1452
1453                 /* As Tx wrbs have been freed up, wake up netdev queue if
1454                  * it was stopped due to lack of tx wrbs.
1455                  */
1456                 if (netif_queue_stopped(adapter->netdev) &&
1457                         atomic_read(&txq->used) < txq->len / 2) {
1458                         netif_wake_queue(adapter->netdev);
1459                 }
1460
1461                 drvr_stats(adapter)->be_tx_events++;
1462                 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1463         }
1464 }
1465
1466 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1467  * For TX/MCC we don't honour budget; consume everything
1468  */
1469 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1470 {
1471         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1472         struct be_adapter *adapter =
1473                 container_of(tx_eq, struct be_adapter, tx_eq);
1474
1475         napi_complete(napi);
1476
1477         be_process_tx(adapter);
1478
1479         be_process_mcc(adapter);
1480
1481         return 1;
1482 }
1483
1484 static void be_worker(struct work_struct *work)
1485 {
1486         struct be_adapter *adapter =
1487                 container_of(work, struct be_adapter, work.work);
1488
1489         be_cmd_get_stats(adapter, &adapter->stats.cmd);
1490
1491         /* Set EQ delay */
1492         be_rx_eqd_update(adapter);
1493
1494         be_tx_rate_update(adapter);
1495         be_rx_rate_update(adapter);
1496
1497         if (adapter->rx_post_starved) {
1498                 adapter->rx_post_starved = false;
1499                 be_post_rx_frags(adapter);
1500         }
1501
1502         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1503 }
1504
1505 static void be_msix_disable(struct be_adapter *adapter)
1506 {
1507         if (adapter->msix_enabled) {
1508                 pci_disable_msix(adapter->pdev);
1509                 adapter->msix_enabled = false;
1510         }
1511 }
1512
1513 static void be_msix_enable(struct be_adapter *adapter)
1514 {
1515         int i, status;
1516
1517         for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1518                 adapter->msix_entries[i].entry = i;
1519
1520         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1521                 BE_NUM_MSIX_VECTORS);
1522         if (status == 0)
1523                 adapter->msix_enabled = true;
1524         return;
1525 }
1526
1527 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1528 {
1529         return adapter->msix_entries[
1530                         be_evt_bit_get(adapter, eq_id)].vector;
1531 }
1532
1533 static int be_request_irq(struct be_adapter *adapter,
1534                 struct be_eq_obj *eq_obj,
1535                 void *handler, char *desc)
1536 {
1537         struct net_device *netdev = adapter->netdev;
1538         int vec;
1539
1540         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1541         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1542         return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1543 }
1544
1545 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1546 {
1547         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1548         free_irq(vec, adapter);
1549 }
1550
1551 static int be_msix_register(struct be_adapter *adapter)
1552 {
1553         int status;
1554
1555         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1556         if (status)
1557                 goto err;
1558
1559         status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1560         if (status)
1561                 goto free_tx_irq;
1562
1563         return 0;
1564
1565 free_tx_irq:
1566         be_free_irq(adapter, &adapter->tx_eq);
1567 err:
1568         dev_warn(&adapter->pdev->dev,
1569                 "MSIX Request IRQ failed - err %d\n", status);
1570         pci_disable_msix(adapter->pdev);
1571         adapter->msix_enabled = false;
1572         return status;
1573 }
1574
1575 static int be_irq_register(struct be_adapter *adapter)
1576 {
1577         struct net_device *netdev = adapter->netdev;
1578         int status;
1579
1580         if (adapter->msix_enabled) {
1581                 status = be_msix_register(adapter);
1582                 if (status == 0)
1583                         goto done;
1584         }
1585
1586         /* INTx */
1587         netdev->irq = adapter->pdev->irq;
1588         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1589                         adapter);
1590         if (status) {
1591                 dev_err(&adapter->pdev->dev,
1592                         "INTx request IRQ failed - err %d\n", status);
1593                 return status;
1594         }
1595 done:
1596         adapter->isr_registered = true;
1597         return 0;
1598 }
1599
1600 static void be_irq_unregister(struct be_adapter *adapter)
1601 {
1602         struct net_device *netdev = adapter->netdev;
1603
1604         if (!adapter->isr_registered)
1605                 return;
1606
1607         /* INTx */
1608         if (!adapter->msix_enabled) {
1609                 free_irq(netdev->irq, adapter);
1610                 goto done;
1611         }
1612
1613         /* MSIx */
1614         be_free_irq(adapter, &adapter->tx_eq);
1615         be_free_irq(adapter, &adapter->rx_eq);
1616 done:
1617         adapter->isr_registered = false;
1618         return;
1619 }
1620
1621 static int be_open(struct net_device *netdev)
1622 {
1623         struct be_adapter *adapter = netdev_priv(netdev);
1624         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1625         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1626         bool link_up;
1627         int status;
1628         u8 mac_speed;
1629         u16 link_speed;
1630
1631         /* First time posting */
1632         be_post_rx_frags(adapter);
1633
1634         napi_enable(&rx_eq->napi);
1635         napi_enable(&tx_eq->napi);
1636
1637         be_irq_register(adapter);
1638
1639         be_intr_set(adapter, true);
1640
1641         /* The evt queues are created in unarmed state; arm them */
1642         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
1643         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1644
1645         /* Rx compl queue may be in unarmed state; rearm it */
1646         be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1647
1648         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1649                         &link_speed);
1650         if (status)
1651                 goto ret_sts;
1652         be_link_status_update(adapter, link_up);
1653
1654         status = be_vid_config(adapter);
1655         if (status)
1656                 goto ret_sts;
1657
1658         status = be_cmd_set_flow_control(adapter,
1659                                         adapter->tx_fc, adapter->rx_fc);
1660         if (status)
1661                 goto ret_sts;
1662
1663         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1664 ret_sts:
1665         return status;
1666 }
1667
1668 static int be_setup_wol(struct be_adapter *adapter, bool enable)
1669 {
1670         struct be_dma_mem cmd;
1671         int status = 0;
1672         u8 mac[ETH_ALEN];
1673
1674         memset(mac, 0, ETH_ALEN);
1675
1676         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
1677         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1678         if (cmd.va == NULL)
1679                 return -1;
1680         memset(cmd.va, 0, cmd.size);
1681
1682         if (enable) {
1683                 status = pci_write_config_dword(adapter->pdev,
1684                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
1685                 if (status) {
1686                         dev_err(&adapter->pdev->dev,
1687                                 "Could not enable Wake-on-lan \n");
1688                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
1689                                         cmd.dma);
1690                         return status;
1691                 }
1692                 status = be_cmd_enable_magic_wol(adapter,
1693                                 adapter->netdev->dev_addr, &cmd);
1694                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
1695                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
1696         } else {
1697                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
1698                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
1699                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
1700         }
1701
1702         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1703         return status;
1704 }
1705
1706 static int be_setup(struct be_adapter *adapter)
1707 {
1708         struct net_device *netdev = adapter->netdev;
1709         u32 cap_flags, en_flags;
1710         int status;
1711
1712         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1713                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
1714                         BE_IF_FLAGS_PROMISCUOUS |
1715                         BE_IF_FLAGS_PASS_L3L4_ERRORS;
1716         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1717                         BE_IF_FLAGS_PASS_L3L4_ERRORS;
1718
1719         status = be_cmd_if_create(adapter, cap_flags, en_flags,
1720                         netdev->dev_addr, false/* pmac_invalid */,
1721                         &adapter->if_handle, &adapter->pmac_id);
1722         if (status != 0)
1723                 goto do_none;
1724
1725         status = be_tx_queues_create(adapter);
1726         if (status != 0)
1727                 goto if_destroy;
1728
1729         status = be_rx_queues_create(adapter);
1730         if (status != 0)
1731                 goto tx_qs_destroy;
1732
1733         status = be_mcc_queues_create(adapter);
1734         if (status != 0)
1735                 goto rx_qs_destroy;
1736
1737         adapter->link_speed = -1;
1738
1739         return 0;
1740
1741 rx_qs_destroy:
1742         be_rx_queues_destroy(adapter);
1743 tx_qs_destroy:
1744         be_tx_queues_destroy(adapter);
1745 if_destroy:
1746         be_cmd_if_destroy(adapter, adapter->if_handle);
1747 do_none:
1748         return status;
1749 }
1750
1751 static int be_clear(struct be_adapter *adapter)
1752 {
1753         be_mcc_queues_destroy(adapter);
1754         be_rx_queues_destroy(adapter);
1755         be_tx_queues_destroy(adapter);
1756
1757         be_cmd_if_destroy(adapter, adapter->if_handle);
1758
1759         /* tell fw we're done with firing cmds */
1760         be_cmd_fw_clean(adapter);
1761         return 0;
1762 }
1763
1764 static int be_close(struct net_device *netdev)
1765 {
1766         struct be_adapter *adapter = netdev_priv(netdev);
1767         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1768         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1769         int vec;
1770
1771         cancel_delayed_work_sync(&adapter->work);
1772
1773         netif_stop_queue(netdev);
1774         netif_carrier_off(netdev);
1775         adapter->link_up = false;
1776
1777         be_intr_set(adapter, false);
1778
1779         if (adapter->msix_enabled) {
1780                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1781                 synchronize_irq(vec);
1782                 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1783                 synchronize_irq(vec);
1784         } else {
1785                 synchronize_irq(netdev->irq);
1786         }
1787         be_irq_unregister(adapter);
1788
1789         napi_disable(&rx_eq->napi);
1790         napi_disable(&tx_eq->napi);
1791
1792         /* Wait for all pending tx completions to arrive so that
1793          * all tx skbs are freed.
1794          */
1795         be_tx_compl_clean(adapter);
1796
1797         return 0;
1798 }
1799
1800 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
1801 char flash_cookie[2][16] =      {"*** SE FLAS",
1802                                 "H DIRECTORY *** "};
1803
1804 static bool be_flash_redboot(struct be_adapter *adapter,
1805                         const u8 *p, u32 img_start, int image_size,
1806                         int hdr_size)
1807 {
1808         u32 crc_offset;
1809         u8 flashed_crc[4];
1810         int status;
1811
1812         crc_offset = hdr_size + img_start + image_size - 4;
1813
1814         p += crc_offset;
1815
1816         status = be_cmd_get_flash_crc(adapter, flashed_crc,
1817                         (img_start + image_size - 4));
1818         if (status) {
1819                 dev_err(&adapter->pdev->dev,
1820                 "could not get crc from flash, not flashing redboot\n");
1821                 return false;
1822         }
1823
1824         /*update redboot only if crc does not match*/
1825         if (!memcmp(flashed_crc, p, 4))
1826                 return false;
1827         else
1828                 return true;
1829 }
1830
1831 static int be_flash_data(struct be_adapter *adapter,
1832                         const struct firmware *fw,
1833                         struct be_dma_mem *flash_cmd, int num_of_images)
1834
1835 {
1836         int status = 0, i, filehdr_size = 0;
1837         u32 total_bytes = 0, flash_op;
1838         int num_bytes;
1839         const u8 *p = fw->data;
1840         struct be_cmd_write_flashrom *req = flash_cmd->va;
1841         struct flash_comp *pflashcomp;
1842
1843         struct flash_comp gen3_flash_types[8] = {
1844                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
1845                         FLASH_IMAGE_MAX_SIZE_g3},
1846                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
1847                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
1848                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
1849                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1850                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
1851                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1852                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
1853                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1854                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
1855                         FLASH_IMAGE_MAX_SIZE_g3},
1856                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
1857                         FLASH_IMAGE_MAX_SIZE_g3},
1858                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
1859                         FLASH_IMAGE_MAX_SIZE_g3}
1860         };
1861         struct flash_comp gen2_flash_types[8] = {
1862                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
1863                         FLASH_IMAGE_MAX_SIZE_g2},
1864                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
1865                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
1866                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
1867                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1868                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
1869                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1870                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
1871                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1872                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
1873                         FLASH_IMAGE_MAX_SIZE_g2},
1874                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
1875                         FLASH_IMAGE_MAX_SIZE_g2},
1876                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
1877                          FLASH_IMAGE_MAX_SIZE_g2}
1878         };
1879
1880         if (adapter->generation == BE_GEN3) {
1881                 pflashcomp = gen3_flash_types;
1882                 filehdr_size = sizeof(struct flash_file_hdr_g3);
1883         } else {
1884                 pflashcomp = gen2_flash_types;
1885                 filehdr_size = sizeof(struct flash_file_hdr_g2);
1886         }
1887         for (i = 0; i < 8; i++) {
1888                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
1889                         (!be_flash_redboot(adapter, fw->data,
1890                          pflashcomp[i].offset, pflashcomp[i].size,
1891                          filehdr_size)))
1892                         continue;
1893                 p = fw->data;
1894                 p += filehdr_size + pflashcomp[i].offset
1895                         + (num_of_images * sizeof(struct image_hdr));
1896         if (p + pflashcomp[i].size > fw->data + fw->size)
1897                 return -1;
1898         total_bytes = pflashcomp[i].size;
1899                 while (total_bytes) {
1900                         if (total_bytes > 32*1024)
1901                                 num_bytes = 32*1024;
1902                         else
1903                                 num_bytes = total_bytes;
1904                         total_bytes -= num_bytes;
1905
1906                         if (!total_bytes)
1907                                 flash_op = FLASHROM_OPER_FLASH;
1908                         else
1909                                 flash_op = FLASHROM_OPER_SAVE;
1910                         memcpy(req->params.data_buf, p, num_bytes);
1911                         p += num_bytes;
1912                         status = be_cmd_write_flashrom(adapter, flash_cmd,
1913                                 pflashcomp[i].optype, flash_op, num_bytes);
1914                         if (status) {
1915                                 dev_err(&adapter->pdev->dev,
1916                                         "cmd to write to flash rom failed.\n");
1917                                 return -1;
1918                         }
1919                         yield();
1920                 }
1921         }
1922         return 0;
1923 }
1924
1925 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
1926 {
1927         if (fhdr == NULL)
1928                 return 0;
1929         if (fhdr->build[0] == '3')
1930                 return BE_GEN3;
1931         else if (fhdr->build[0] == '2')
1932                 return BE_GEN2;
1933         else
1934                 return 0;
1935 }
1936
1937 int be_load_fw(struct be_adapter *adapter, u8 *func)
1938 {
1939         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
1940         const struct firmware *fw;
1941         struct flash_file_hdr_g2 *fhdr;
1942         struct flash_file_hdr_g3 *fhdr3;
1943         struct image_hdr *img_hdr_ptr = NULL;
1944         struct be_dma_mem flash_cmd;
1945         int status, i = 0;
1946         const u8 *p;
1947         char fw_ver[FW_VER_LEN];
1948         char fw_cfg;
1949
1950         status = be_cmd_get_fw_ver(adapter, fw_ver);
1951         if (status)
1952                 return status;
1953
1954         fw_cfg = *(fw_ver + 2);
1955         if (fw_cfg == '0')
1956                 fw_cfg = '1';
1957         strcpy(fw_file, func);
1958
1959         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
1960         if (status)
1961                 goto fw_exit;
1962
1963         p = fw->data;
1964         fhdr = (struct flash_file_hdr_g2 *) p;
1965         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
1966
1967         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
1968         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
1969                                         &flash_cmd.dma);
1970         if (!flash_cmd.va) {
1971                 status = -ENOMEM;
1972                 dev_err(&adapter->pdev->dev,
1973                         "Memory allocation failure while flashing\n");
1974                 goto fw_exit;
1975         }
1976
1977         if ((adapter->generation == BE_GEN3) &&
1978                         (get_ufigen_type(fhdr) == BE_GEN3)) {
1979                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
1980                 for (i = 0; i < fhdr3->num_imgs; i++) {
1981                         img_hdr_ptr = (struct image_hdr *) (fw->data +
1982                                         (sizeof(struct flash_file_hdr_g3) +
1983                                         i * sizeof(struct image_hdr)));
1984                         if (img_hdr_ptr->imageid == 1) {
1985                                 status = be_flash_data(adapter, fw,
1986                                                 &flash_cmd, fhdr3->num_imgs);
1987                         }
1988
1989                 }
1990         } else if ((adapter->generation == BE_GEN2) &&
1991                         (get_ufigen_type(fhdr) == BE_GEN2)) {
1992                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
1993         } else {
1994                 dev_err(&adapter->pdev->dev,
1995                         "UFI and Interface are not compatible for flashing\n");
1996                 status = -1;
1997         }
1998
1999         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2000                                 flash_cmd.dma);
2001         if (status) {
2002                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2003                 goto fw_exit;
2004         }
2005
2006         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2007
2008 fw_exit:
2009         release_firmware(fw);
2010         return status;
2011 }
2012
2013 static struct net_device_ops be_netdev_ops = {
2014         .ndo_open               = be_open,
2015         .ndo_stop               = be_close,
2016         .ndo_start_xmit         = be_xmit,
2017         .ndo_get_stats          = be_get_stats,
2018         .ndo_set_rx_mode        = be_set_multicast_list,
2019         .ndo_set_mac_address    = be_mac_addr_set,
2020         .ndo_change_mtu         = be_change_mtu,
2021         .ndo_validate_addr      = eth_validate_addr,
2022         .ndo_vlan_rx_register   = be_vlan_register,
2023         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2024         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2025 };
2026
2027 static void be_netdev_init(struct net_device *netdev)
2028 {
2029         struct be_adapter *adapter = netdev_priv(netdev);
2030
2031         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2032                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2033                 NETIF_F_GRO;
2034
2035         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2036
2037         netdev->flags |= IFF_MULTICAST;
2038
2039         adapter->rx_csum = true;
2040
2041         /* Default settings for Rx and Tx flow control */
2042         adapter->rx_fc = true;
2043         adapter->tx_fc = true;
2044
2045         netif_set_gso_max_size(netdev, 65535);
2046
2047         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2048
2049         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2050
2051         netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2052                 BE_NAPI_WEIGHT);
2053         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2054                 BE_NAPI_WEIGHT);
2055
2056         netif_carrier_off(netdev);
2057         netif_stop_queue(netdev);
2058 }
2059
2060 static void be_unmap_pci_bars(struct be_adapter *adapter)
2061 {
2062         if (adapter->csr)
2063                 iounmap(adapter->csr);
2064         if (adapter->db)
2065                 iounmap(adapter->db);
2066         if (adapter->pcicfg)
2067                 iounmap(adapter->pcicfg);
2068 }
2069
2070 static int be_map_pci_bars(struct be_adapter *adapter)
2071 {
2072         u8 __iomem *addr;
2073         int pcicfg_reg;
2074
2075         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2076                         pci_resource_len(adapter->pdev, 2));
2077         if (addr == NULL)
2078                 return -ENOMEM;
2079         adapter->csr = addr;
2080
2081         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
2082                         128 * 1024);
2083         if (addr == NULL)
2084                 goto pci_map_err;
2085         adapter->db = addr;
2086
2087         if (adapter->generation == BE_GEN2)
2088                 pcicfg_reg = 1;
2089         else
2090                 pcicfg_reg = 0;
2091
2092         addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
2093                         pci_resource_len(adapter->pdev, pcicfg_reg));
2094         if (addr == NULL)
2095                 goto pci_map_err;
2096         adapter->pcicfg = addr;
2097
2098         return 0;
2099 pci_map_err:
2100         be_unmap_pci_bars(adapter);
2101         return -ENOMEM;
2102 }
2103
2104
2105 static void be_ctrl_cleanup(struct be_adapter *adapter)
2106 {
2107         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2108
2109         be_unmap_pci_bars(adapter);
2110
2111         if (mem->va)
2112                 pci_free_consistent(adapter->pdev, mem->size,
2113                         mem->va, mem->dma);
2114
2115         mem = &adapter->mc_cmd_mem;
2116         if (mem->va)
2117                 pci_free_consistent(adapter->pdev, mem->size,
2118                         mem->va, mem->dma);
2119 }
2120
2121 static int be_ctrl_init(struct be_adapter *adapter)
2122 {
2123         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2124         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2125         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2126         int status;
2127
2128         status = be_map_pci_bars(adapter);
2129         if (status)
2130                 goto done;
2131
2132         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2133         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2134                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2135         if (!mbox_mem_alloc->va) {
2136                 status = -ENOMEM;
2137                 goto unmap_pci_bars;
2138         }
2139
2140         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2141         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2142         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2143         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2144
2145         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2146         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2147                         &mc_cmd_mem->dma);
2148         if (mc_cmd_mem->va == NULL) {
2149                 status = -ENOMEM;
2150                 goto free_mbox;
2151         }
2152         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2153
2154         spin_lock_init(&adapter->mbox_lock);
2155         spin_lock_init(&adapter->mcc_lock);
2156         spin_lock_init(&adapter->mcc_cq_lock);
2157
2158         return 0;
2159
2160 free_mbox:
2161         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2162                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2163
2164 unmap_pci_bars:
2165         be_unmap_pci_bars(adapter);
2166
2167 done:
2168         return status;
2169 }
2170
2171 static void be_stats_cleanup(struct be_adapter *adapter)
2172 {
2173         struct be_stats_obj *stats = &adapter->stats;
2174         struct be_dma_mem *cmd = &stats->cmd;
2175
2176         if (cmd->va)
2177                 pci_free_consistent(adapter->pdev, cmd->size,
2178                         cmd->va, cmd->dma);
2179 }
2180
2181 static int be_stats_init(struct be_adapter *adapter)
2182 {
2183         struct be_stats_obj *stats = &adapter->stats;
2184         struct be_dma_mem *cmd = &stats->cmd;
2185
2186         cmd->size = sizeof(struct be_cmd_req_get_stats);
2187         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2188         if (cmd->va == NULL)
2189                 return -1;
2190         memset(cmd->va, 0, cmd->size);
2191         return 0;
2192 }
2193
2194 static void __devexit be_remove(struct pci_dev *pdev)
2195 {
2196         struct be_adapter *adapter = pci_get_drvdata(pdev);
2197
2198         if (!adapter)
2199                 return;
2200
2201         unregister_netdev(adapter->netdev);
2202
2203         be_clear(adapter);
2204
2205         be_stats_cleanup(adapter);
2206
2207         be_ctrl_cleanup(adapter);
2208
2209         be_msix_disable(adapter);
2210
2211         pci_set_drvdata(pdev, NULL);
2212         pci_release_regions(pdev);
2213         pci_disable_device(pdev);
2214
2215         free_netdev(adapter->netdev);
2216 }
2217
2218 static int be_get_config(struct be_adapter *adapter)
2219 {
2220         int status;
2221         u8 mac[ETH_ALEN];
2222
2223         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2224         if (status)
2225                 return status;
2226
2227         status = be_cmd_query_fw_cfg(adapter,
2228                                 &adapter->port_num, &adapter->cap);
2229         if (status)
2230                 return status;
2231
2232         memset(mac, 0, ETH_ALEN);
2233         status = be_cmd_mac_addr_query(adapter, mac,
2234                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2235         if (status)
2236                 return status;
2237
2238         if (!is_valid_ether_addr(mac))
2239                 return -EADDRNOTAVAIL;
2240
2241         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2242         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2243
2244         return 0;
2245 }
2246
2247 static int __devinit be_probe(struct pci_dev *pdev,
2248                         const struct pci_device_id *pdev_id)
2249 {
2250         int status = 0;
2251         struct be_adapter *adapter;
2252         struct net_device *netdev;
2253
2254         status = pci_enable_device(pdev);
2255         if (status)
2256                 goto do_none;
2257
2258         status = pci_request_regions(pdev, DRV_NAME);
2259         if (status)
2260                 goto disable_dev;
2261         pci_set_master(pdev);
2262
2263         netdev = alloc_etherdev(sizeof(struct be_adapter));
2264         if (netdev == NULL) {
2265                 status = -ENOMEM;
2266                 goto rel_reg;
2267         }
2268         adapter = netdev_priv(netdev);
2269
2270         switch (pdev->device) {
2271         case BE_DEVICE_ID1:
2272         case OC_DEVICE_ID1:
2273                 adapter->generation = BE_GEN2;
2274                 break;
2275         case BE_DEVICE_ID2:
2276         case OC_DEVICE_ID2:
2277                 adapter->generation = BE_GEN3;
2278                 break;
2279         default:
2280                 adapter->generation = 0;
2281         }
2282
2283         adapter->pdev = pdev;
2284         pci_set_drvdata(pdev, adapter);
2285         adapter->netdev = netdev;
2286         be_netdev_init(netdev);
2287         SET_NETDEV_DEV(netdev, &pdev->dev);
2288
2289         be_msix_enable(adapter);
2290
2291         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2292         if (!status) {
2293                 netdev->features |= NETIF_F_HIGHDMA;
2294         } else {
2295                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2296                 if (status) {
2297                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2298                         goto free_netdev;
2299                 }
2300         }
2301
2302         status = be_ctrl_init(adapter);
2303         if (status)
2304                 goto free_netdev;
2305
2306         /* sync up with fw's ready state */
2307         status = be_cmd_POST(adapter);
2308         if (status)
2309                 goto ctrl_clean;
2310
2311         /* tell fw we're ready to fire cmds */
2312         status = be_cmd_fw_init(adapter);
2313         if (status)
2314                 goto ctrl_clean;
2315
2316         status = be_cmd_reset_function(adapter);
2317         if (status)
2318                 goto ctrl_clean;
2319
2320         status = be_stats_init(adapter);
2321         if (status)
2322                 goto ctrl_clean;
2323
2324         status = be_get_config(adapter);
2325         if (status)
2326                 goto stats_clean;
2327
2328         INIT_DELAYED_WORK(&adapter->work, be_worker);
2329
2330         status = be_setup(adapter);
2331         if (status)
2332                 goto stats_clean;
2333
2334         status = register_netdev(netdev);
2335         if (status != 0)
2336                 goto unsetup;
2337
2338         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2339         return 0;
2340
2341 unsetup:
2342         be_clear(adapter);
2343 stats_clean:
2344         be_stats_cleanup(adapter);
2345 ctrl_clean:
2346         be_ctrl_cleanup(adapter);
2347 free_netdev:
2348         be_msix_disable(adapter);
2349         free_netdev(adapter->netdev);
2350         pci_set_drvdata(pdev, NULL);
2351 rel_reg:
2352         pci_release_regions(pdev);
2353 disable_dev:
2354         pci_disable_device(pdev);
2355 do_none:
2356         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2357         return status;
2358 }
2359
2360 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2361 {
2362         struct be_adapter *adapter = pci_get_drvdata(pdev);
2363         struct net_device *netdev =  adapter->netdev;
2364
2365         if (adapter->wol)
2366                 be_setup_wol(adapter, true);
2367
2368         netif_device_detach(netdev);
2369         if (netif_running(netdev)) {
2370                 rtnl_lock();
2371                 be_close(netdev);
2372                 rtnl_unlock();
2373         }
2374         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2375         be_clear(adapter);
2376
2377         pci_save_state(pdev);
2378         pci_disable_device(pdev);
2379         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2380         return 0;
2381 }
2382
2383 static int be_resume(struct pci_dev *pdev)
2384 {
2385         int status = 0;
2386         struct be_adapter *adapter = pci_get_drvdata(pdev);
2387         struct net_device *netdev =  adapter->netdev;
2388
2389         netif_device_detach(netdev);
2390
2391         status = pci_enable_device(pdev);
2392         if (status)
2393                 return status;
2394
2395         pci_set_power_state(pdev, 0);
2396         pci_restore_state(pdev);
2397
2398         /* tell fw we're ready to fire cmds */
2399         status = be_cmd_fw_init(adapter);
2400         if (status)
2401                 return status;
2402
2403         be_setup(adapter);
2404         if (netif_running(netdev)) {
2405                 rtnl_lock();
2406                 be_open(netdev);
2407                 rtnl_unlock();
2408         }
2409         netif_device_attach(netdev);
2410
2411         if (adapter->wol)
2412                 be_setup_wol(adapter, false);
2413         return 0;
2414 }
2415
2416 static struct pci_driver be_driver = {
2417         .name = DRV_NAME,
2418         .id_table = be_dev_ids,
2419         .probe = be_probe,
2420         .remove = be_remove,
2421         .suspend = be_suspend,
2422         .resume = be_resume
2423 };
2424
2425 static int __init be_init_module(void)
2426 {
2427         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2428             rx_frag_size != 2048) {
2429                 printk(KERN_WARNING DRV_NAME
2430                         " : Module param rx_frag_size must be 2048/4096/8192."
2431                         " Using 2048\n");
2432                 rx_frag_size = 2048;
2433         }
2434
2435         return pci_register_driver(&be_driver);
2436 }
2437 module_init(be_init_module);
2438
2439 static void __exit be_exit_module(void)
2440 {
2441         pci_unregister_driver(&be_driver);
2442 }
2443 module_exit(be_exit_module);