2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
121 struct be_dma_mem *mem = &q->dma_mem;
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
130 struct be_dma_mem *mem = &q->dma_mem;
132 memset(q, 0, sizeof(*q));
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
140 memset(mem->va, 0, mem->size);
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
150 if (adapter->eeh_err)
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 iowrite32(reg, addr);
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184 bool arm, bool clear_int, u16 num_popped)
187 val |= qid & DB_EQ_RING_ID_MASK;
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
191 if (adapter->eeh_err)
195 val |= 1 << DB_EQ_REARM_SHIFT;
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 val |= qid & DB_CQ_RING_ID_MASK;
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
210 if (adapter->eeh_err)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
231 if (!be_physfn(adapter))
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240 adapter->if_handle, &adapter->pmac_id, 0);
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248 void netdev_stats_update(struct be_adapter *adapter)
250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
255 struct be_erx_stats *erx_stats = &hw_stats->erx;
256 struct be_rx_obj *rxo;
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
304 void be_link_status_update(struct be_adapter *adapter, bool link_up)
306 struct net_device *netdev = adapter->netdev;
308 /* If link came up or went down */
309 if (adapter->link_up != link_up) {
310 adapter->link_speed = -1;
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
318 adapter->link_up = link_up;
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
330 if (!rx_eq->enable_aic)
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
339 /* Update once a second */
340 if ((now - stats->rx_fps_jiffies) < HZ)
343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344 ((now - stats->rx_fps_jiffies) / HZ);
346 stats->rx_fps_jiffies = now;
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
356 if (eqd != rx_eq->cur_eqd)
357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
359 rx_eq->cur_eqd = eqd;
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
373 static void be_tx_rate_update(struct be_adapter *adapter)
375 struct be_tx_stats *stats = tx_stats(adapter);
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
394 static void be_tx_stats_update(struct be_adapter *adapter,
395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
397 struct be_tx_stats *stats = tx_stats(adapter);
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
403 stats->be_tx_stops++;
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
410 int cnt = (skb->len > skb->data_len);
412 cnt += skb_shinfo(skb)->nr_frags;
414 /* to account for hdr wrb */
416 if (lancer_chip(adapter) || !(cnt & 1)) {
419 /* add a dummy to make it an even num */
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
427 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
434 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
440 memset(hdr, 0, sizeof(*hdr));
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
444 if (skb_is_gso(skb)) {
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
484 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
494 dma_unmap_single(dev, dma, wrb->frag_len,
497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
501 static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
506 struct device *dev = &adapter->pdev->dev;
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
511 bool map_single = false;
514 hdr = queue_head_node(txq);
516 map_head = txq->head;
518 if (skb->len > skb->data_len) {
519 int len = skb_headlen(skb);
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
542 copied += frag->size;
546 wrb = queue_head_node(txq);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
557 txq->head = map_head;
559 wrb = queue_head_node(txq);
560 unmap_tx_frag(dev, wrb, map_single);
562 copied -= wrb->frag_len;
568 static netdev_tx_t be_xmit(struct sk_buff *skb,
569 struct net_device *netdev)
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
590 atomic_add(wrb_cnt, &txq->used);
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
593 netif_stop_queue(netdev);
597 be_txq_notify(adapter, txq->id, wrb_cnt);
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
603 dev_kfree_skb_any(skb);
608 static int be_change_mtu(struct net_device *netdev, int new_mtu)
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
630 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
643 if (adapter->vlans_added <= adapter->max_vlans) {
644 /* Construct VLAN Table to give to HW */
645 for (i = 0; i < VLAN_N_VID; i++) {
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
661 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
663 struct be_adapter *adapter = netdev_priv(netdev);
665 adapter->vlan_grp = grp;
668 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
670 struct be_adapter *adapter = netdev_priv(netdev);
672 adapter->vlans_added++;
673 if (!be_physfn(adapter))
676 adapter->vlan_tag[vid] = 1;
677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
678 be_vid_config(adapter, false, 0);
681 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
683 struct be_adapter *adapter = netdev_priv(netdev);
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688 if (!be_physfn(adapter))
691 adapter->vlan_tag[vid] = 0;
692 if (adapter->vlans_added <= adapter->max_vlans)
693 be_vid_config(adapter, false, 0);
696 static void be_set_multicast_list(struct net_device *netdev)
698 struct be_adapter *adapter = netdev_priv(netdev);
700 if (netdev->flags & IFF_PROMISC) {
701 be_cmd_promiscuous_config(adapter, true);
702 adapter->promiscuous = true;
706 /* BE was previously in promiscuous mode; disable it */
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
709 be_cmd_promiscuous_config(adapter, false);
712 /* Enable multicast promisc if num configured exceeds what we support */
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
716 &adapter->mc_cmd_mem);
720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
721 &adapter->mc_cmd_mem);
726 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
728 struct be_adapter *adapter = netdev_priv(netdev);
731 if (!adapter->sriov_enabled)
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
755 static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
758 struct be_adapter *adapter = netdev_priv(netdev);
760 if (!adapter->sriov_enabled)
767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
775 static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
778 struct be_adapter *adapter = netdev_priv(netdev);
781 if (!adapter->sriov_enabled)
784 if ((vf >= num_vfs) || (vlan > 4095))
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
795 status = be_vid_config(adapter, true, vf);
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
803 static int be_set_vf_tx_rate(struct net_device *netdev,
806 struct be_adapter *adapter = netdev_priv(netdev);
809 if (!adapter->sriov_enabled)
812 if ((vf >= num_vfs) || (rate < 0))
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
827 static void be_rx_rate_update(struct be_rx_obj *rxo)
829 struct be_rx_stats *stats = &rxo->stats;
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
838 /* Update the rate once in two seconds */
839 if ((now - stats->rx_jiffies) < 2 * HZ)
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
848 static void be_rx_stats_update(struct be_rx_obj *rxo,
849 struct be_rx_compl_info *rxcp)
851 struct be_rx_stats *stats = &rxo->stats;
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
858 stats->rx_mcast_pkts++;
863 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
871 static struct be_rx_page_info *
872 get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
876 struct be_rx_page_info *rx_page_info;
877 struct be_queue_info *rxq = &rxo->q;
879 rx_page_info = &rxo->page_info_tbl[frag_idx];
880 BUG_ON(!rx_page_info->page);
882 if (rx_page_info->last_page_user) {
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
886 rx_page_info->last_page_user = false;
889 atomic_dec(&rxq->used);
893 /* Throwaway the data in the Rx completion */
894 static void be_rx_compl_discard(struct be_adapter *adapter,
895 struct be_rx_obj *rxo,
896 struct be_rx_compl_info *rxcp)
898 struct be_queue_info *rxq = &rxo->q;
899 struct be_rx_page_info *page_info;
900 u16 i, num_rcvd = rxcp->num_rcvd;
902 for (i = 0; i < num_rcvd; i++) {
903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
906 index_inc(&rxcp->rxq_idx, rxq->len);
911 * skb_fill_rx_data forms a complete skb for an ether frame
914 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
917 struct be_queue_info *rxq = &rxo->q;
918 struct be_rx_page_info *page_info;
920 u16 hdr_len, curr_frag_len, remaining;
923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
924 start = page_address(page_info->page) + page_info->page_offset;
927 /* Copy data in the first descriptor of this completion */
928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
930 /* Copy the header portion into skb_data */
931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
938 skb->tail += curr_frag_len;
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
948 page_info->page = NULL;
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
955 /* More frags present for this completion */
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
972 put_page(page_info->page);
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
981 page_info->page = NULL;
983 BUG_ON(j > MAX_SKB_FRAGS);
986 /* Process the RX completion indicated by rxcp when GRO is disabled */
987 static void be_rx_compl_process(struct be_adapter *adapter,
988 struct be_rx_obj *rxo,
989 struct be_rx_compl_info *rxcp)
991 struct net_device *netdev = adapter->netdev;
994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
995 if (unlikely(!skb)) {
997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
998 be_rx_compl_discard(adapter, rxo, rxcp);
1002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1005 skb->ip_summed = CHECKSUM_UNNECESSARY;
1007 skb_checksum_none_assert(skb);
1009 skb->truesize = skb->len + sizeof(struct sk_buff);
1010 skb->protocol = eth_type_trans(skb, netdev);
1011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1015 if (unlikely(rxcp->vlanf)) {
1016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1020 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1023 netif_receive_skb(skb);
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029 struct be_rx_obj *rxo,
1030 struct be_rx_compl_info *rxcp)
1032 struct be_rx_page_info *page_info;
1033 struct sk_buff *skb = NULL;
1034 struct be_queue_info *rxq = &rxo->q;
1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1036 u16 remaining, curr_frag_len;
1039 skb = napi_get_frags(&eq_obj->napi);
1041 be_rx_compl_discard(adapter, rxo, rxcp);
1045 remaining = rxcp->pkt_size;
1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1049 curr_frag_len = min(remaining, rx_frag_size);
1051 /* Coalesce all frags from the same physical page in one slot */
1052 if (i == 0 || page_info->page_offset == 0) {
1053 /* First frag or Fresh page */
1055 skb_shinfo(skb)->frags[j].page = page_info->page;
1056 skb_shinfo(skb)->frags[j].page_offset =
1057 page_info->page_offset;
1058 skb_shinfo(skb)->frags[j].size = 0;
1060 put_page(page_info->page);
1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1064 remaining -= curr_frag_len;
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 memset(page_info, 0, sizeof(*page_info));
1068 BUG_ON(j > MAX_SKB_FRAGS);
1070 skb_shinfo(skb)->nr_frags = j + 1;
1071 skb->len = rxcp->pkt_size;
1072 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size;
1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
1075 if (adapter->netdev->features & NETIF_F_RXHASH)
1076 skb->rxhash = rxcp->rss_hash;
1078 if (likely(!rxcp->vlanf))
1079 napi_gro_frags(&eq_obj->napi);
1081 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1085 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1086 struct be_eth_rx_compl *compl,
1087 struct be_rx_compl_info *rxcp)
1090 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1091 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1092 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1093 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1094 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1108 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1110 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1112 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1117 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1118 struct be_eth_rx_compl *compl,
1119 struct be_rx_compl_info *rxcp)
1122 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1123 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1124 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1125 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1126 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1128 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1140 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1142 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1144 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1149 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1151 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1152 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1153 struct be_adapter *adapter = rxo->adapter;
1155 /* For checking the valid bit it is Ok to use either definition as the
1156 * valid bit is at the same position in both v0 and v1 Rx compl */
1157 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1161 be_dws_le_to_cpu(compl, sizeof(*compl));
1163 if (adapter->be3_native)
1164 be_parse_rx_compl_v1(adapter, compl, rxcp);
1166 be_parse_rx_compl_v0(adapter, compl, rxcp);
1169 /* vlanf could be wrongly set in some cards.
1170 * ignore if vtm is not set */
1171 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1174 if (!lancer_chip(adapter))
1175 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1177 if (((adapter->pvid & VLAN_VID_MASK) ==
1178 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1179 !adapter->vlan_tag[rxcp->vlan_tag])
1183 /* As the compl has been parsed, reset it; we wont touch it again */
1184 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1186 queue_tail_inc(&rxo->cq);
1190 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1192 u32 order = get_order(size);
1196 return alloc_pages(gfp, order);
1200 * Allocate a page, split it to fragments of size rx_frag_size and post as
1201 * receive buffers to BE
1203 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1205 struct be_adapter *adapter = rxo->adapter;
1206 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1207 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1208 struct be_queue_info *rxq = &rxo->q;
1209 struct page *pagep = NULL;
1210 struct be_eth_rx_d *rxd;
1211 u64 page_dmaaddr = 0, frag_dmaaddr;
1212 u32 posted, page_offset = 0;
1214 page_info = &rxo->page_info_tbl[rxq->head];
1215 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1217 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1218 if (unlikely(!pagep)) {
1219 rxo->stats.rx_post_fail++;
1222 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1223 0, adapter->big_page_size,
1225 page_info->page_offset = 0;
1228 page_info->page_offset = page_offset + rx_frag_size;
1230 page_offset = page_info->page_offset;
1231 page_info->page = pagep;
1232 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1233 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1235 rxd = queue_head_node(rxq);
1236 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1237 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1239 /* Any space left in the current big page for another frag? */
1240 if ((page_offset + rx_frag_size + rx_frag_size) >
1241 adapter->big_page_size) {
1243 page_info->last_page_user = true;
1246 prev_page_info = page_info;
1247 queue_head_inc(rxq);
1248 page_info = &page_info_tbl[rxq->head];
1251 prev_page_info->last_page_user = true;
1254 atomic_add(posted, &rxq->used);
1255 be_rxq_notify(adapter, rxq->id, posted);
1256 } else if (atomic_read(&rxq->used) == 0) {
1257 /* Let be_worker replenish when memory is available */
1258 rxo->rx_post_starved = true;
1262 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1264 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1266 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1270 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1272 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1274 queue_tail_inc(tx_cq);
1278 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1280 struct be_queue_info *txq = &adapter->tx_obj.q;
1281 struct be_eth_wrb *wrb;
1282 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1283 struct sk_buff *sent_skb;
1284 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1285 bool unmap_skb_hdr = true;
1287 sent_skb = sent_skbs[txq->tail];
1289 sent_skbs[txq->tail] = NULL;
1291 /* skip header wrb */
1292 queue_tail_inc(txq);
1295 cur_index = txq->tail;
1296 wrb = queue_tail_node(txq);
1297 unmap_tx_frag(&adapter->pdev->dev, wrb,
1298 (unmap_skb_hdr && skb_headlen(sent_skb)));
1299 unmap_skb_hdr = false;
1302 queue_tail_inc(txq);
1303 } while (cur_index != last_index);
1305 atomic_sub(num_wrbs, &txq->used);
1307 kfree_skb(sent_skb);
1310 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1312 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1318 eqe->evt = le32_to_cpu(eqe->evt);
1319 queue_tail_inc(&eq_obj->q);
1323 static int event_handle(struct be_adapter *adapter,
1324 struct be_eq_obj *eq_obj)
1326 struct be_eq_entry *eqe;
1329 while ((eqe = event_get(eq_obj)) != NULL) {
1334 /* Deal with any spurious interrupts that come
1337 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1339 napi_schedule(&eq_obj->napi);
1344 /* Just read and notify events without processing them.
1345 * Used at the time of destroying event queues */
1346 static void be_eq_clean(struct be_adapter *adapter,
1347 struct be_eq_obj *eq_obj)
1349 struct be_eq_entry *eqe;
1352 while ((eqe = event_get(eq_obj)) != NULL) {
1358 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1361 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1363 struct be_rx_page_info *page_info;
1364 struct be_queue_info *rxq = &rxo->q;
1365 struct be_queue_info *rx_cq = &rxo->cq;
1366 struct be_rx_compl_info *rxcp;
1369 /* First cleanup pending rx completions */
1370 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1371 be_rx_compl_discard(adapter, rxo, rxcp);
1372 be_cq_notify(adapter, rx_cq->id, false, 1);
1375 /* Then free posted rx buffer that were not used */
1376 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1377 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1378 page_info = get_rx_page_info(adapter, rxo, tail);
1379 put_page(page_info->page);
1380 memset(page_info, 0, sizeof(*page_info));
1382 BUG_ON(atomic_read(&rxq->used));
1385 static void be_tx_compl_clean(struct be_adapter *adapter)
1387 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1388 struct be_queue_info *txq = &adapter->tx_obj.q;
1389 struct be_eth_tx_compl *txcp;
1390 u16 end_idx, cmpl = 0, timeo = 0;
1391 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1392 struct sk_buff *sent_skb;
1395 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1397 while ((txcp = be_tx_compl_get(tx_cq))) {
1398 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1400 be_tx_compl_process(adapter, end_idx);
1404 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1408 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1414 if (atomic_read(&txq->used))
1415 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1416 atomic_read(&txq->used));
1418 /* free posted tx for which compls will never arrive */
1419 while (atomic_read(&txq->used)) {
1420 sent_skb = sent_skbs[txq->tail];
1421 end_idx = txq->tail;
1423 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1425 be_tx_compl_process(adapter, end_idx);
1429 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1431 struct be_queue_info *q;
1433 q = &adapter->mcc_obj.q;
1435 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1436 be_queue_free(adapter, q);
1438 q = &adapter->mcc_obj.cq;
1440 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1441 be_queue_free(adapter, q);
1444 /* Must be called only after TX qs are created as MCC shares TX EQ */
1445 static int be_mcc_queues_create(struct be_adapter *adapter)
1447 struct be_queue_info *q, *cq;
1449 /* Alloc MCC compl queue */
1450 cq = &adapter->mcc_obj.cq;
1451 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1452 sizeof(struct be_mcc_compl)))
1455 /* Ask BE to create MCC compl queue; share TX's eq */
1456 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1459 /* Alloc MCC queue */
1460 q = &adapter->mcc_obj.q;
1461 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1462 goto mcc_cq_destroy;
1464 /* Ask BE to create MCC queue */
1465 if (be_cmd_mccq_create(adapter, q, cq))
1471 be_queue_free(adapter, q);
1473 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1475 be_queue_free(adapter, cq);
1480 static void be_tx_queues_destroy(struct be_adapter *adapter)
1482 struct be_queue_info *q;
1484 q = &adapter->tx_obj.q;
1486 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1487 be_queue_free(adapter, q);
1489 q = &adapter->tx_obj.cq;
1491 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1492 be_queue_free(adapter, q);
1494 /* Clear any residual events */
1495 be_eq_clean(adapter, &adapter->tx_eq);
1497 q = &adapter->tx_eq.q;
1499 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1500 be_queue_free(adapter, q);
1503 static int be_tx_queues_create(struct be_adapter *adapter)
1505 struct be_queue_info *eq, *q, *cq;
1507 adapter->tx_eq.max_eqd = 0;
1508 adapter->tx_eq.min_eqd = 0;
1509 adapter->tx_eq.cur_eqd = 96;
1510 adapter->tx_eq.enable_aic = false;
1511 /* Alloc Tx Event queue */
1512 eq = &adapter->tx_eq.q;
1513 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1516 /* Ask BE to create Tx Event queue */
1517 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1520 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1523 /* Alloc TX eth compl queue */
1524 cq = &adapter->tx_obj.cq;
1525 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1526 sizeof(struct be_eth_tx_compl)))
1529 /* Ask BE to create Tx eth compl queue */
1530 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1533 /* Alloc TX eth queue */
1534 q = &adapter->tx_obj.q;
1535 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1538 /* Ask BE to create Tx eth queue */
1539 if (be_cmd_txq_create(adapter, q, cq))
1544 be_queue_free(adapter, q);
1546 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1548 be_queue_free(adapter, cq);
1550 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1552 be_queue_free(adapter, eq);
1556 static void be_rx_queues_destroy(struct be_adapter *adapter)
1558 struct be_queue_info *q;
1559 struct be_rx_obj *rxo;
1562 for_all_rx_queues(adapter, rxo, i) {
1565 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1566 /* After the rxq is invalidated, wait for a grace time
1567 * of 1ms for all dma to end and the flush compl to
1571 be_rx_q_clean(adapter, rxo);
1573 be_queue_free(adapter, q);
1577 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1578 be_queue_free(adapter, q);
1580 /* Clear any residual events */
1583 be_eq_clean(adapter, &rxo->rx_eq);
1584 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1586 be_queue_free(adapter, q);
1590 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1592 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1593 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1594 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1596 dev_warn(&adapter->pdev->dev,
1597 "No support for multiple RX queues\n");
1602 static int be_rx_queues_create(struct be_adapter *adapter)
1604 struct be_queue_info *eq, *q, *cq;
1605 struct be_rx_obj *rxo;
1608 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1609 msix_enabled(adapter) ?
1610 adapter->num_msix_vec - 1 : 1);
1611 if (adapter->num_rx_qs != MAX_RX_QS)
1612 dev_warn(&adapter->pdev->dev,
1613 "Can create only %d RX queues", adapter->num_rx_qs);
1615 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1616 for_all_rx_queues(adapter, rxo, i) {
1617 rxo->adapter = adapter;
1618 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1619 rxo->rx_eq.enable_aic = true;
1623 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1624 sizeof(struct be_eq_entry));
1628 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1632 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1636 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1637 sizeof(struct be_eth_rx_compl));
1641 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1646 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1647 sizeof(struct be_eth_rx_d));
1651 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1652 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1653 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1658 if (be_multi_rxq(adapter)) {
1659 u8 rsstable[MAX_RSS_QS];
1661 for_all_rss_queues(adapter, rxo, i)
1662 rsstable[i] = rxo->rss_id;
1664 rc = be_cmd_rss_config(adapter, rsstable,
1665 adapter->num_rx_qs - 1);
1672 be_rx_queues_destroy(adapter);
1676 static bool event_peek(struct be_eq_obj *eq_obj)
1678 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1685 static irqreturn_t be_intx(int irq, void *dev)
1687 struct be_adapter *adapter = dev;
1688 struct be_rx_obj *rxo;
1689 int isr, i, tx = 0 , rx = 0;
1691 if (lancer_chip(adapter)) {
1692 if (event_peek(&adapter->tx_eq))
1693 tx = event_handle(adapter, &adapter->tx_eq);
1694 for_all_rx_queues(adapter, rxo, i) {
1695 if (event_peek(&rxo->rx_eq))
1696 rx |= event_handle(adapter, &rxo->rx_eq);
1703 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1704 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1708 if ((1 << adapter->tx_eq.eq_idx & isr))
1709 event_handle(adapter, &adapter->tx_eq);
1711 for_all_rx_queues(adapter, rxo, i) {
1712 if ((1 << rxo->rx_eq.eq_idx & isr))
1713 event_handle(adapter, &rxo->rx_eq);
1720 static irqreturn_t be_msix_rx(int irq, void *dev)
1722 struct be_rx_obj *rxo = dev;
1723 struct be_adapter *adapter = rxo->adapter;
1725 event_handle(adapter, &rxo->rx_eq);
1730 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1732 struct be_adapter *adapter = dev;
1734 event_handle(adapter, &adapter->tx_eq);
1739 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1741 return (rxcp->tcpf && !rxcp->err) ? true : false;
1744 static int be_poll_rx(struct napi_struct *napi, int budget)
1746 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1747 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1748 struct be_adapter *adapter = rxo->adapter;
1749 struct be_queue_info *rx_cq = &rxo->cq;
1750 struct be_rx_compl_info *rxcp;
1753 rxo->stats.rx_polls++;
1754 for (work_done = 0; work_done < budget; work_done++) {
1755 rxcp = be_rx_compl_get(rxo);
1759 /* Ignore flush completions */
1760 if (rxcp->num_rcvd && rxcp->pkt_size) {
1762 be_rx_compl_process_gro(adapter, rxo, rxcp);
1764 be_rx_compl_process(adapter, rxo, rxcp);
1765 } else if (rxcp->pkt_size == 0) {
1766 be_rx_compl_discard(adapter, rxo, rxcp);
1769 be_rx_stats_update(rxo, rxcp);
1772 /* Refill the queue */
1773 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1774 be_post_rx_frags(rxo, GFP_ATOMIC);
1777 if (work_done < budget) {
1778 napi_complete(napi);
1779 be_cq_notify(adapter, rx_cq->id, true, work_done);
1781 /* More to be consumed; continue with interrupts disabled */
1782 be_cq_notify(adapter, rx_cq->id, false, work_done);
1787 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1788 * For TX/MCC we don't honour budget; consume everything
1790 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1792 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1793 struct be_adapter *adapter =
1794 container_of(tx_eq, struct be_adapter, tx_eq);
1795 struct be_queue_info *txq = &adapter->tx_obj.q;
1796 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1797 struct be_eth_tx_compl *txcp;
1798 int tx_compl = 0, mcc_compl, status = 0;
1801 while ((txcp = be_tx_compl_get(tx_cq))) {
1802 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1804 be_tx_compl_process(adapter, end_idx);
1808 mcc_compl = be_process_mcc(adapter, &status);
1810 napi_complete(napi);
1813 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1814 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1818 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1820 /* As Tx wrbs have been freed up, wake up netdev queue if
1821 * it was stopped due to lack of tx wrbs.
1823 if (netif_queue_stopped(adapter->netdev) &&
1824 atomic_read(&txq->used) < txq->len / 2) {
1825 netif_wake_queue(adapter->netdev);
1828 tx_stats(adapter)->be_tx_events++;
1829 tx_stats(adapter)->be_tx_compl += tx_compl;
1835 void be_detect_dump_ue(struct be_adapter *adapter)
1837 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1840 pci_read_config_dword(adapter->pdev,
1841 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1842 pci_read_config_dword(adapter->pdev,
1843 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1844 pci_read_config_dword(adapter->pdev,
1845 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1846 pci_read_config_dword(adapter->pdev,
1847 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1849 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1850 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1852 if (ue_status_lo || ue_status_hi) {
1853 adapter->ue_detected = true;
1854 adapter->eeh_err = true;
1855 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1859 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1860 if (ue_status_lo & 1)
1861 dev_err(&adapter->pdev->dev,
1862 "UE: %s bit set\n", ue_status_low_desc[i]);
1866 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1867 if (ue_status_hi & 1)
1868 dev_err(&adapter->pdev->dev,
1869 "UE: %s bit set\n", ue_status_hi_desc[i]);
1875 static void be_worker(struct work_struct *work)
1877 struct be_adapter *adapter =
1878 container_of(work, struct be_adapter, work.work);
1879 struct be_rx_obj *rxo;
1882 if (!adapter->ue_detected && !lancer_chip(adapter))
1883 be_detect_dump_ue(adapter);
1885 /* when interrupts are not yet enabled, just reap any pending
1886 * mcc completions */
1887 if (!netif_running(adapter->netdev)) {
1888 int mcc_compl, status = 0;
1890 mcc_compl = be_process_mcc(adapter, &status);
1893 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1894 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1900 if (!adapter->stats_cmd_sent)
1901 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1903 be_tx_rate_update(adapter);
1905 for_all_rx_queues(adapter, rxo, i) {
1906 be_rx_rate_update(rxo);
1907 be_rx_eqd_update(adapter, rxo);
1909 if (rxo->rx_post_starved) {
1910 rxo->rx_post_starved = false;
1911 be_post_rx_frags(rxo, GFP_KERNEL);
1916 adapter->work_counter++;
1917 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1920 static void be_msix_disable(struct be_adapter *adapter)
1922 if (msix_enabled(adapter)) {
1923 pci_disable_msix(adapter->pdev);
1924 adapter->num_msix_vec = 0;
1928 static void be_msix_enable(struct be_adapter *adapter)
1930 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1931 int i, status, num_vec;
1933 num_vec = be_num_rxqs_want(adapter) + 1;
1935 for (i = 0; i < num_vec; i++)
1936 adapter->msix_entries[i].entry = i;
1938 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1941 } else if (status >= BE_MIN_MSIX_VECTORS) {
1943 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1949 adapter->num_msix_vec = num_vec;
1953 static void be_sriov_enable(struct be_adapter *adapter)
1955 be_check_sriov_fn_type(adapter);
1956 #ifdef CONFIG_PCI_IOV
1957 if (be_physfn(adapter) && num_vfs) {
1961 pos = pci_find_ext_capability(adapter->pdev,
1962 PCI_EXT_CAP_ID_SRIOV);
1963 pci_read_config_word(adapter->pdev,
1964 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1966 if (num_vfs > nvfs) {
1967 dev_info(&adapter->pdev->dev,
1968 "Device supports %d VFs and not %d\n",
1973 status = pci_enable_sriov(adapter->pdev, num_vfs);
1974 adapter->sriov_enabled = status ? false : true;
1979 static void be_sriov_disable(struct be_adapter *adapter)
1981 #ifdef CONFIG_PCI_IOV
1982 if (adapter->sriov_enabled) {
1983 pci_disable_sriov(adapter->pdev);
1984 adapter->sriov_enabled = false;
1989 static inline int be_msix_vec_get(struct be_adapter *adapter,
1990 struct be_eq_obj *eq_obj)
1992 return adapter->msix_entries[eq_obj->eq_idx].vector;
1995 static int be_request_irq(struct be_adapter *adapter,
1996 struct be_eq_obj *eq_obj,
1997 void *handler, char *desc, void *context)
1999 struct net_device *netdev = adapter->netdev;
2002 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2003 vec = be_msix_vec_get(adapter, eq_obj);
2004 return request_irq(vec, handler, 0, eq_obj->desc, context);
2007 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2010 int vec = be_msix_vec_get(adapter, eq_obj);
2011 free_irq(vec, context);
2014 static int be_msix_register(struct be_adapter *adapter)
2016 struct be_rx_obj *rxo;
2020 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2025 for_all_rx_queues(adapter, rxo, i) {
2026 sprintf(qname, "rxq%d", i);
2027 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2036 be_free_irq(adapter, &adapter->tx_eq, adapter);
2038 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2039 be_free_irq(adapter, &rxo->rx_eq, rxo);
2042 dev_warn(&adapter->pdev->dev,
2043 "MSIX Request IRQ failed - err %d\n", status);
2044 be_msix_disable(adapter);
2048 static int be_irq_register(struct be_adapter *adapter)
2050 struct net_device *netdev = adapter->netdev;
2053 if (msix_enabled(adapter)) {
2054 status = be_msix_register(adapter);
2057 /* INTx is not supported for VF */
2058 if (!be_physfn(adapter))
2063 netdev->irq = adapter->pdev->irq;
2064 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2067 dev_err(&adapter->pdev->dev,
2068 "INTx request IRQ failed - err %d\n", status);
2072 adapter->isr_registered = true;
2076 static void be_irq_unregister(struct be_adapter *adapter)
2078 struct net_device *netdev = adapter->netdev;
2079 struct be_rx_obj *rxo;
2082 if (!adapter->isr_registered)
2086 if (!msix_enabled(adapter)) {
2087 free_irq(netdev->irq, adapter);
2092 be_free_irq(adapter, &adapter->tx_eq, adapter);
2094 for_all_rx_queues(adapter, rxo, i)
2095 be_free_irq(adapter, &rxo->rx_eq, rxo);
2098 adapter->isr_registered = false;
2101 static int be_close(struct net_device *netdev)
2103 struct be_adapter *adapter = netdev_priv(netdev);
2104 struct be_rx_obj *rxo;
2105 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2108 be_async_mcc_disable(adapter);
2110 netif_carrier_off(netdev);
2111 adapter->link_up = false;
2113 if (!lancer_chip(adapter))
2114 be_intr_set(adapter, false);
2116 for_all_rx_queues(adapter, rxo, i)
2117 napi_disable(&rxo->rx_eq.napi);
2119 napi_disable(&tx_eq->napi);
2121 if (lancer_chip(adapter)) {
2122 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2123 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2124 for_all_rx_queues(adapter, rxo, i)
2125 be_cq_notify(adapter, rxo->cq.id, false, 0);
2128 if (msix_enabled(adapter)) {
2129 vec = be_msix_vec_get(adapter, tx_eq);
2130 synchronize_irq(vec);
2132 for_all_rx_queues(adapter, rxo, i) {
2133 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2134 synchronize_irq(vec);
2137 synchronize_irq(netdev->irq);
2139 be_irq_unregister(adapter);
2141 /* Wait for all pending tx completions to arrive so that
2142 * all tx skbs are freed.
2144 be_tx_compl_clean(adapter);
2149 static int be_open(struct net_device *netdev)
2151 struct be_adapter *adapter = netdev_priv(netdev);
2152 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2153 struct be_rx_obj *rxo;
2159 for_all_rx_queues(adapter, rxo, i) {
2160 be_post_rx_frags(rxo, GFP_KERNEL);
2161 napi_enable(&rxo->rx_eq.napi);
2163 napi_enable(&tx_eq->napi);
2165 be_irq_register(adapter);
2167 if (!lancer_chip(adapter))
2168 be_intr_set(adapter, true);
2170 /* The evt queues are created in unarmed state; arm them */
2171 for_all_rx_queues(adapter, rxo, i) {
2172 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2173 be_cq_notify(adapter, rxo->cq.id, true, 0);
2175 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2177 /* Now that interrupts are on we can process async mcc */
2178 be_async_mcc_enable(adapter);
2180 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2184 be_link_status_update(adapter, link_up);
2186 if (be_physfn(adapter)) {
2187 status = be_vid_config(adapter, false, 0);
2191 status = be_cmd_set_flow_control(adapter,
2192 adapter->tx_fc, adapter->rx_fc);
2199 be_close(adapter->netdev);
2203 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2205 struct be_dma_mem cmd;
2209 memset(mac, 0, ETH_ALEN);
2211 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2212 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2216 memset(cmd.va, 0, cmd.size);
2219 status = pci_write_config_dword(adapter->pdev,
2220 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2222 dev_err(&adapter->pdev->dev,
2223 "Could not enable Wake-on-lan\n");
2224 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2228 status = be_cmd_enable_magic_wol(adapter,
2229 adapter->netdev->dev_addr, &cmd);
2230 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2231 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2233 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2234 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2235 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2238 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2243 * Generate a seed MAC address from the PF MAC Address using jhash.
2244 * MAC Address for VFs are assigned incrementally starting from the seed.
2245 * These addresses are programmed in the ASIC by the PF and the VF driver
2246 * queries for the MAC address during its probe.
2248 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2254 be_vf_eth_addr_generate(adapter, mac);
2256 for (vf = 0; vf < num_vfs; vf++) {
2257 status = be_cmd_pmac_add(adapter, mac,
2258 adapter->vf_cfg[vf].vf_if_handle,
2259 &adapter->vf_cfg[vf].vf_pmac_id,
2262 dev_err(&adapter->pdev->dev,
2263 "Mac address add failed for VF %d\n", vf);
2265 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2272 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2276 for (vf = 0; vf < num_vfs; vf++) {
2277 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2278 be_cmd_pmac_del(adapter,
2279 adapter->vf_cfg[vf].vf_if_handle,
2280 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2284 static int be_setup(struct be_adapter *adapter)
2286 struct net_device *netdev = adapter->netdev;
2287 u32 cap_flags, en_flags, vf = 0;
2291 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2292 BE_IF_FLAGS_BROADCAST |
2293 BE_IF_FLAGS_MULTICAST;
2295 if (be_physfn(adapter)) {
2296 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2297 BE_IF_FLAGS_PROMISCUOUS |
2298 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2299 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2301 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2302 cap_flags |= BE_IF_FLAGS_RSS;
2303 en_flags |= BE_IF_FLAGS_RSS;
2307 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2308 netdev->dev_addr, false/* pmac_invalid */,
2309 &adapter->if_handle, &adapter->pmac_id, 0);
2313 if (be_physfn(adapter)) {
2314 if (adapter->sriov_enabled) {
2315 while (vf < num_vfs) {
2316 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2317 BE_IF_FLAGS_BROADCAST;
2318 status = be_cmd_if_create(adapter, cap_flags,
2319 en_flags, mac, true,
2320 &adapter->vf_cfg[vf].vf_if_handle,
2323 dev_err(&adapter->pdev->dev,
2324 "Interface Create failed for VF %d\n",
2328 adapter->vf_cfg[vf].vf_pmac_id =
2334 status = be_cmd_mac_addr_query(adapter, mac,
2335 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2337 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2338 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2342 status = be_tx_queues_create(adapter);
2346 status = be_rx_queues_create(adapter);
2350 status = be_mcc_queues_create(adapter);
2354 adapter->link_speed = -1;
2359 be_rx_queues_destroy(adapter);
2361 be_tx_queues_destroy(adapter);
2363 if (be_physfn(adapter) && adapter->sriov_enabled)
2364 for (vf = 0; vf < num_vfs; vf++)
2365 if (adapter->vf_cfg[vf].vf_if_handle)
2366 be_cmd_if_destroy(adapter,
2367 adapter->vf_cfg[vf].vf_if_handle,
2369 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2374 static int be_clear(struct be_adapter *adapter)
2378 if (be_physfn(adapter) && adapter->sriov_enabled)
2379 be_vf_eth_addr_rem(adapter);
2381 be_mcc_queues_destroy(adapter);
2382 be_rx_queues_destroy(adapter);
2383 be_tx_queues_destroy(adapter);
2384 adapter->eq_next_idx = 0;
2386 if (be_physfn(adapter) && adapter->sriov_enabled)
2387 for (vf = 0; vf < num_vfs; vf++)
2388 if (adapter->vf_cfg[vf].vf_if_handle)
2389 be_cmd_if_destroy(adapter,
2390 adapter->vf_cfg[vf].vf_if_handle,
2393 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2395 /* tell fw we're done with firing cmds */
2396 be_cmd_fw_clean(adapter);
2401 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2402 static bool be_flash_redboot(struct be_adapter *adapter,
2403 const u8 *p, u32 img_start, int image_size,
2410 crc_offset = hdr_size + img_start + image_size - 4;
2414 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2417 dev_err(&adapter->pdev->dev,
2418 "could not get crc from flash, not flashing redboot\n");
2422 /*update redboot only if crc does not match*/
2423 if (!memcmp(flashed_crc, p, 4))
2429 static int be_flash_data(struct be_adapter *adapter,
2430 const struct firmware *fw,
2431 struct be_dma_mem *flash_cmd, int num_of_images)
2434 int status = 0, i, filehdr_size = 0;
2435 u32 total_bytes = 0, flash_op;
2437 const u8 *p = fw->data;
2438 struct be_cmd_write_flashrom *req = flash_cmd->va;
2439 const struct flash_comp *pflashcomp;
2442 static const struct flash_comp gen3_flash_types[9] = {
2443 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2444 FLASH_IMAGE_MAX_SIZE_g3},
2445 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2446 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2447 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2448 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2449 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2450 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2451 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2452 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2453 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2454 FLASH_IMAGE_MAX_SIZE_g3},
2455 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2456 FLASH_IMAGE_MAX_SIZE_g3},
2457 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2458 FLASH_IMAGE_MAX_SIZE_g3},
2459 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2460 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2462 static const struct flash_comp gen2_flash_types[8] = {
2463 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2464 FLASH_IMAGE_MAX_SIZE_g2},
2465 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2466 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2467 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2468 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2469 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2470 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2471 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2472 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2473 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2474 FLASH_IMAGE_MAX_SIZE_g2},
2475 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2476 FLASH_IMAGE_MAX_SIZE_g2},
2477 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2478 FLASH_IMAGE_MAX_SIZE_g2}
2481 if (adapter->generation == BE_GEN3) {
2482 pflashcomp = gen3_flash_types;
2483 filehdr_size = sizeof(struct flash_file_hdr_g3);
2484 num_comp = ARRAY_SIZE(gen3_flash_types);
2486 pflashcomp = gen2_flash_types;
2487 filehdr_size = sizeof(struct flash_file_hdr_g2);
2488 num_comp = ARRAY_SIZE(gen2_flash_types);
2490 for (i = 0; i < num_comp; i++) {
2491 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2492 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2494 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2495 (!be_flash_redboot(adapter, fw->data,
2496 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2497 (num_of_images * sizeof(struct image_hdr)))))
2500 p += filehdr_size + pflashcomp[i].offset
2501 + (num_of_images * sizeof(struct image_hdr));
2502 if (p + pflashcomp[i].size > fw->data + fw->size)
2504 total_bytes = pflashcomp[i].size;
2505 while (total_bytes) {
2506 if (total_bytes > 32*1024)
2507 num_bytes = 32*1024;
2509 num_bytes = total_bytes;
2510 total_bytes -= num_bytes;
2513 flash_op = FLASHROM_OPER_FLASH;
2515 flash_op = FLASHROM_OPER_SAVE;
2516 memcpy(req->params.data_buf, p, num_bytes);
2518 status = be_cmd_write_flashrom(adapter, flash_cmd,
2519 pflashcomp[i].optype, flash_op, num_bytes);
2521 dev_err(&adapter->pdev->dev,
2522 "cmd to write to flash rom failed.\n");
2531 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2535 if (fhdr->build[0] == '3')
2537 else if (fhdr->build[0] == '2')
2543 int be_load_fw(struct be_adapter *adapter, u8 *func)
2545 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2546 const struct firmware *fw;
2547 struct flash_file_hdr_g2 *fhdr;
2548 struct flash_file_hdr_g3 *fhdr3;
2549 struct image_hdr *img_hdr_ptr = NULL;
2550 struct be_dma_mem flash_cmd;
2551 int status, i = 0, num_imgs = 0;
2554 if (!netif_running(adapter->netdev)) {
2555 dev_err(&adapter->pdev->dev,
2556 "Firmware load not allowed (interface is down)\n");
2560 strcpy(fw_file, func);
2562 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2567 fhdr = (struct flash_file_hdr_g2 *) p;
2568 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2570 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2571 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2572 &flash_cmd.dma, GFP_KERNEL);
2573 if (!flash_cmd.va) {
2575 dev_err(&adapter->pdev->dev,
2576 "Memory allocation failure while flashing\n");
2580 if ((adapter->generation == BE_GEN3) &&
2581 (get_ufigen_type(fhdr) == BE_GEN3)) {
2582 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2583 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2584 for (i = 0; i < num_imgs; i++) {
2585 img_hdr_ptr = (struct image_hdr *) (fw->data +
2586 (sizeof(struct flash_file_hdr_g3) +
2587 i * sizeof(struct image_hdr)));
2588 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2589 status = be_flash_data(adapter, fw, &flash_cmd,
2592 } else if ((adapter->generation == BE_GEN2) &&
2593 (get_ufigen_type(fhdr) == BE_GEN2)) {
2594 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2596 dev_err(&adapter->pdev->dev,
2597 "UFI and Interface are not compatible for flashing\n");
2601 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2604 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2608 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2611 release_firmware(fw);
2615 static struct net_device_ops be_netdev_ops = {
2616 .ndo_open = be_open,
2617 .ndo_stop = be_close,
2618 .ndo_start_xmit = be_xmit,
2619 .ndo_set_rx_mode = be_set_multicast_list,
2620 .ndo_set_mac_address = be_mac_addr_set,
2621 .ndo_change_mtu = be_change_mtu,
2622 .ndo_validate_addr = eth_validate_addr,
2623 .ndo_vlan_rx_register = be_vlan_register,
2624 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2625 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2626 .ndo_set_vf_mac = be_set_vf_mac,
2627 .ndo_set_vf_vlan = be_set_vf_vlan,
2628 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2629 .ndo_get_vf_config = be_get_vf_config
2632 static void be_netdev_init(struct net_device *netdev)
2634 struct be_adapter *adapter = netdev_priv(netdev);
2635 struct be_rx_obj *rxo;
2638 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2639 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2641 if (be_multi_rxq(adapter))
2642 netdev->hw_features |= NETIF_F_RXHASH;
2644 netdev->features |= netdev->hw_features |
2645 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2647 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2648 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2650 if (lancer_chip(adapter))
2651 netdev->vlan_features |= NETIF_F_TSO6;
2653 netdev->flags |= IFF_MULTICAST;
2655 /* Default settings for Rx and Tx flow control */
2656 adapter->rx_fc = true;
2657 adapter->tx_fc = true;
2659 netif_set_gso_max_size(netdev, 65535);
2661 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2663 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2665 for_all_rx_queues(adapter, rxo, i)
2666 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2669 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2673 static void be_unmap_pci_bars(struct be_adapter *adapter)
2676 iounmap(adapter->csr);
2678 iounmap(adapter->db);
2679 if (adapter->pcicfg && be_physfn(adapter))
2680 iounmap(adapter->pcicfg);
2683 static int be_map_pci_bars(struct be_adapter *adapter)
2686 int pcicfg_reg, db_reg;
2688 if (lancer_chip(adapter)) {
2689 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2690 pci_resource_len(adapter->pdev, 0));
2697 if (be_physfn(adapter)) {
2698 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2699 pci_resource_len(adapter->pdev, 2));
2702 adapter->csr = addr;
2705 if (adapter->generation == BE_GEN2) {
2710 if (be_physfn(adapter))
2715 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2716 pci_resource_len(adapter->pdev, db_reg));
2721 if (be_physfn(adapter)) {
2722 addr = ioremap_nocache(
2723 pci_resource_start(adapter->pdev, pcicfg_reg),
2724 pci_resource_len(adapter->pdev, pcicfg_reg));
2727 adapter->pcicfg = addr;
2729 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2733 be_unmap_pci_bars(adapter);
2738 static void be_ctrl_cleanup(struct be_adapter *adapter)
2740 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2742 be_unmap_pci_bars(adapter);
2745 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2748 mem = &adapter->mc_cmd_mem;
2750 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2754 static int be_ctrl_init(struct be_adapter *adapter)
2756 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2757 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2758 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2761 status = be_map_pci_bars(adapter);
2765 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2766 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2767 mbox_mem_alloc->size,
2768 &mbox_mem_alloc->dma,
2770 if (!mbox_mem_alloc->va) {
2772 goto unmap_pci_bars;
2775 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2776 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2778 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2780 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2781 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2782 mc_cmd_mem->size, &mc_cmd_mem->dma,
2784 if (mc_cmd_mem->va == NULL) {
2788 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2790 mutex_init(&adapter->mbox_lock);
2791 spin_lock_init(&adapter->mcc_lock);
2792 spin_lock_init(&adapter->mcc_cq_lock);
2794 init_completion(&adapter->flash_compl);
2795 pci_save_state(adapter->pdev);
2799 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2800 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2803 be_unmap_pci_bars(adapter);
2809 static void be_stats_cleanup(struct be_adapter *adapter)
2811 struct be_dma_mem *cmd = &adapter->stats_cmd;
2814 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2818 static int be_stats_init(struct be_adapter *adapter)
2820 struct be_dma_mem *cmd = &adapter->stats_cmd;
2822 cmd->size = sizeof(struct be_cmd_req_get_stats);
2823 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2825 if (cmd->va == NULL)
2827 memset(cmd->va, 0, cmd->size);
2831 static void __devexit be_remove(struct pci_dev *pdev)
2833 struct be_adapter *adapter = pci_get_drvdata(pdev);
2838 cancel_delayed_work_sync(&adapter->work);
2840 unregister_netdev(adapter->netdev);
2844 be_stats_cleanup(adapter);
2846 be_ctrl_cleanup(adapter);
2848 kfree(adapter->vf_cfg);
2849 be_sriov_disable(adapter);
2851 be_msix_disable(adapter);
2853 pci_set_drvdata(pdev, NULL);
2854 pci_release_regions(pdev);
2855 pci_disable_device(pdev);
2857 free_netdev(adapter->netdev);
2860 static int be_get_config(struct be_adapter *adapter)
2865 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2869 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2870 &adapter->function_mode, &adapter->function_caps);
2874 memset(mac, 0, ETH_ALEN);
2876 if (be_physfn(adapter)) {
2877 status = be_cmd_mac_addr_query(adapter, mac,
2878 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2883 if (!is_valid_ether_addr(mac))
2884 return -EADDRNOTAVAIL;
2886 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2887 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2890 if (adapter->function_mode & 0x400)
2891 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2893 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2895 status = be_cmd_get_cntl_attributes(adapter);
2899 be_cmd_check_native_mode(adapter);
2903 static int be_dev_family_check(struct be_adapter *adapter)
2905 struct pci_dev *pdev = adapter->pdev;
2906 u32 sli_intf = 0, if_type;
2908 switch (pdev->device) {
2911 adapter->generation = BE_GEN2;
2915 adapter->generation = BE_GEN3;
2918 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2919 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2920 SLI_INTF_IF_TYPE_SHIFT;
2922 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2924 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2928 dev_err(&pdev->dev, "VFs not supported\n");
2931 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2932 SLI_INTF_FAMILY_SHIFT);
2933 adapter->generation = BE_GEN3;
2936 adapter->generation = 0;
2941 static int lancer_wait_ready(struct be_adapter *adapter)
2943 #define SLIPORT_READY_TIMEOUT 500
2947 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2948 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2949 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2955 if (i == SLIPORT_READY_TIMEOUT)
2961 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2964 u32 sliport_status, err, reset_needed;
2965 status = lancer_wait_ready(adapter);
2967 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2968 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2969 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2970 if (err && reset_needed) {
2971 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2972 adapter->db + SLIPORT_CONTROL_OFFSET);
2974 /* check adapter has corrected the error */
2975 status = lancer_wait_ready(adapter);
2976 sliport_status = ioread32(adapter->db +
2977 SLIPORT_STATUS_OFFSET);
2978 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2979 SLIPORT_STATUS_RN_MASK);
2980 if (status || sliport_status)
2982 } else if (err || reset_needed) {
2989 static int __devinit be_probe(struct pci_dev *pdev,
2990 const struct pci_device_id *pdev_id)
2993 struct be_adapter *adapter;
2994 struct net_device *netdev;
2996 status = pci_enable_device(pdev);
3000 status = pci_request_regions(pdev, DRV_NAME);
3003 pci_set_master(pdev);
3005 netdev = alloc_etherdev(sizeof(struct be_adapter));
3006 if (netdev == NULL) {
3010 adapter = netdev_priv(netdev);
3011 adapter->pdev = pdev;
3012 pci_set_drvdata(pdev, adapter);
3014 status = be_dev_family_check(adapter);
3018 adapter->netdev = netdev;
3019 SET_NETDEV_DEV(netdev, &pdev->dev);
3021 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3023 netdev->features |= NETIF_F_HIGHDMA;
3025 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3027 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3032 be_sriov_enable(adapter);
3033 if (adapter->sriov_enabled) {
3034 adapter->vf_cfg = kcalloc(num_vfs,
3035 sizeof(struct be_vf_cfg), GFP_KERNEL);
3037 if (!adapter->vf_cfg)
3041 status = be_ctrl_init(adapter);
3045 if (lancer_chip(adapter)) {
3046 status = lancer_test_and_set_rdy_state(adapter);
3048 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3053 /* sync up with fw's ready state */
3054 if (be_physfn(adapter)) {
3055 status = be_cmd_POST(adapter);
3060 /* tell fw we're ready to fire cmds */
3061 status = be_cmd_fw_init(adapter);
3065 status = be_cmd_reset_function(adapter);
3069 status = be_stats_init(adapter);
3073 status = be_get_config(adapter);
3077 be_msix_enable(adapter);
3079 INIT_DELAYED_WORK(&adapter->work, be_worker);
3081 status = be_setup(adapter);
3085 be_netdev_init(netdev);
3086 status = register_netdev(netdev);
3089 netif_carrier_off(netdev);
3091 if (be_physfn(adapter) && adapter->sriov_enabled) {
3096 status = be_vf_eth_addr_config(adapter);
3100 for (vf = 0; vf < num_vfs; vf++) {
3101 status = be_cmd_link_status_query(adapter, &link_up,
3102 &mac_speed, &lnk_speed, vf + 1);
3104 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3110 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3111 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3115 unregister_netdev(netdev);
3119 be_msix_disable(adapter);
3121 be_stats_cleanup(adapter);
3123 be_ctrl_cleanup(adapter);
3125 kfree(adapter->vf_cfg);
3127 be_sriov_disable(adapter);
3128 free_netdev(netdev);
3129 pci_set_drvdata(pdev, NULL);
3131 pci_release_regions(pdev);
3133 pci_disable_device(pdev);
3135 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3139 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3141 struct be_adapter *adapter = pci_get_drvdata(pdev);
3142 struct net_device *netdev = adapter->netdev;
3144 cancel_delayed_work_sync(&adapter->work);
3146 be_setup_wol(adapter, true);
3148 netif_device_detach(netdev);
3149 if (netif_running(netdev)) {
3154 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3157 be_msix_disable(adapter);
3158 pci_save_state(pdev);
3159 pci_disable_device(pdev);
3160 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3164 static int be_resume(struct pci_dev *pdev)
3167 struct be_adapter *adapter = pci_get_drvdata(pdev);
3168 struct net_device *netdev = adapter->netdev;
3170 netif_device_detach(netdev);
3172 status = pci_enable_device(pdev);
3176 pci_set_power_state(pdev, 0);
3177 pci_restore_state(pdev);
3179 be_msix_enable(adapter);
3180 /* tell fw we're ready to fire cmds */
3181 status = be_cmd_fw_init(adapter);
3186 if (netif_running(netdev)) {
3191 netif_device_attach(netdev);
3194 be_setup_wol(adapter, false);
3196 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3201 * An FLR will stop BE from DMAing any data.
3203 static void be_shutdown(struct pci_dev *pdev)
3205 struct be_adapter *adapter = pci_get_drvdata(pdev);
3210 cancel_delayed_work_sync(&adapter->work);
3212 netif_device_detach(adapter->netdev);
3215 be_setup_wol(adapter, true);
3217 be_cmd_reset_function(adapter);
3219 pci_disable_device(pdev);
3222 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3223 pci_channel_state_t state)
3225 struct be_adapter *adapter = pci_get_drvdata(pdev);
3226 struct net_device *netdev = adapter->netdev;
3228 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3230 adapter->eeh_err = true;
3232 netif_device_detach(netdev);
3234 if (netif_running(netdev)) {
3241 if (state == pci_channel_io_perm_failure)
3242 return PCI_ERS_RESULT_DISCONNECT;
3244 pci_disable_device(pdev);
3246 return PCI_ERS_RESULT_NEED_RESET;
3249 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3251 struct be_adapter *adapter = pci_get_drvdata(pdev);
3254 dev_info(&adapter->pdev->dev, "EEH reset\n");
3255 adapter->eeh_err = false;
3257 status = pci_enable_device(pdev);
3259 return PCI_ERS_RESULT_DISCONNECT;
3261 pci_set_master(pdev);
3262 pci_set_power_state(pdev, 0);
3263 pci_restore_state(pdev);
3265 /* Check if card is ok and fw is ready */
3266 status = be_cmd_POST(adapter);
3268 return PCI_ERS_RESULT_DISCONNECT;
3270 return PCI_ERS_RESULT_RECOVERED;
3273 static void be_eeh_resume(struct pci_dev *pdev)
3276 struct be_adapter *adapter = pci_get_drvdata(pdev);
3277 struct net_device *netdev = adapter->netdev;
3279 dev_info(&adapter->pdev->dev, "EEH resume\n");
3281 pci_save_state(pdev);
3283 /* tell fw we're ready to fire cmds */
3284 status = be_cmd_fw_init(adapter);
3288 status = be_setup(adapter);
3292 if (netif_running(netdev)) {
3293 status = be_open(netdev);
3297 netif_device_attach(netdev);
3300 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3303 static struct pci_error_handlers be_eeh_handlers = {
3304 .error_detected = be_eeh_err_detected,
3305 .slot_reset = be_eeh_reset,
3306 .resume = be_eeh_resume,
3309 static struct pci_driver be_driver = {
3311 .id_table = be_dev_ids,
3313 .remove = be_remove,
3314 .suspend = be_suspend,
3315 .resume = be_resume,
3316 .shutdown = be_shutdown,
3317 .err_handler = &be_eeh_handlers
3320 static int __init be_init_module(void)
3322 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3323 rx_frag_size != 2048) {
3324 printk(KERN_WARNING DRV_NAME
3325 " : Module param rx_frag_size must be 2048/4096/8192."
3327 rx_frag_size = 2048;
3330 return pci_register_driver(&be_driver);
3332 module_init(be_init_module);
3334 static void __exit be_exit_module(void)
3336 pci_unregister_driver(&be_driver);
3338 module_exit(be_exit_module);