2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
121 return (adapter->num_rx_qs > 1);
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 struct be_dma_mem *mem = &q->dma_mem;
128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
135 struct be_dma_mem *mem = &q->dma_mem;
137 memset(q, 0, sizeof(*q));
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 memset(mem->va, 0, mem->size);
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 if (adapter->eeh_err)
158 if (!enabled && enable)
159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 else if (enabled && !enable)
161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 iowrite32(reg, addr);
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189 bool arm, bool clear_int, u16 num_popped)
192 val |= qid & DB_EQ_RING_ID_MASK;
193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
196 if (adapter->eeh_err)
200 val |= 1 << DB_EQ_REARM_SHIFT;
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
211 val |= qid & DB_CQ_RING_ID_MASK;
212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
215 if (adapter->eeh_err)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
236 if (!be_physfn(adapter))
239 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
243 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
244 adapter->if_handle, &adapter->pmac_id);
247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
252 void netdev_stats_update(struct be_adapter *adapter)
254 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
255 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
256 struct be_port_rxf_stats *port_stats =
257 &rxf_stats->port[adapter->port_num];
258 struct net_device_stats *dev_stats = &adapter->netdev->stats;
259 struct be_erx_stats *erx_stats = &hw_stats->erx;
260 struct be_rx_obj *rxo;
263 memset(dev_stats, 0, sizeof(*dev_stats));
264 for_all_rx_queues(adapter, rxo, i) {
265 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
266 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
267 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
268 /* no space in linux buffers: best possible approximation */
269 dev_stats->rx_dropped +=
270 erx_stats->rx_drops_no_fragments[rxo->q.id];
273 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
274 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276 /* bad pkts received */
277 dev_stats->rx_errors = port_stats->rx_crc_errors +
278 port_stats->rx_alignment_symbol_errors +
279 port_stats->rx_in_range_errors +
280 port_stats->rx_out_range_errors +
281 port_stats->rx_frame_too_long +
282 port_stats->rx_dropped_too_small +
283 port_stats->rx_dropped_too_short +
284 port_stats->rx_dropped_header_too_small +
285 port_stats->rx_dropped_tcp_length +
286 port_stats->rx_dropped_runt +
287 port_stats->rx_tcp_checksum_errs +
288 port_stats->rx_ip_checksum_errs +
289 port_stats->rx_udp_checksum_errs;
291 /* detailed rx errors */
292 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
293 port_stats->rx_out_range_errors +
294 port_stats->rx_frame_too_long;
296 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298 /* frame alignment errors */
299 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301 /* receiver fifo overrun */
302 /* drops_no_pbuf is no per i/f, it's per BE card */
303 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
304 port_stats->rx_input_fifo_overflow +
305 rxf_stats->rx_drops_no_pbuf;
308 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 struct net_device *netdev = adapter->netdev;
312 /* If link came up or went down */
313 if (adapter->link_up != link_up) {
314 adapter->link_speed = -1;
316 netif_start_queue(netdev);
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
320 netif_stop_queue(netdev);
321 netif_carrier_off(netdev);
322 printk(KERN_INFO "%s: Link down\n", netdev->name);
324 adapter->link_up = link_up;
328 /* Update the EQ delay n BE based on the RX frags consumed / sec */
329 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
331 struct be_eq_obj *rx_eq = &rxo->rx_eq;
332 struct be_rx_stats *stats = &rxo->stats;
336 if (!rx_eq->enable_aic)
340 if (time_before(now, stats->rx_fps_jiffies)) {
341 stats->rx_fps_jiffies = now;
345 /* Update once a second */
346 if ((now - stats->rx_fps_jiffies) < HZ)
349 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
350 ((now - stats->rx_fps_jiffies) / HZ);
352 stats->rx_fps_jiffies = now;
353 stats->prev_rx_frags = stats->rx_frags;
354 eqd = stats->rx_fps / 110000;
356 if (eqd > rx_eq->max_eqd)
357 eqd = rx_eq->max_eqd;
358 if (eqd < rx_eq->min_eqd)
359 eqd = rx_eq->min_eqd;
362 if (eqd != rx_eq->cur_eqd)
363 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
365 rx_eq->cur_eqd = eqd;
368 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
372 do_div(rate, ticks / HZ);
373 rate <<= 3; /* bytes/sec -> bits/sec */
374 do_div(rate, 1000000ul); /* MB/Sec */
379 static void be_tx_rate_update(struct be_adapter *adapter)
381 struct be_tx_stats *stats = tx_stats(adapter);
384 /* Wrapped around? */
385 if (time_before(now, stats->be_tx_jiffies)) {
386 stats->be_tx_jiffies = now;
390 /* Update tx rate once in two seconds */
391 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
392 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
393 - stats->be_tx_bytes_prev,
394 now - stats->be_tx_jiffies);
395 stats->be_tx_jiffies = now;
396 stats->be_tx_bytes_prev = stats->be_tx_bytes;
400 static void be_tx_stats_update(struct be_adapter *adapter,
401 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
403 struct be_tx_stats *stats = tx_stats(adapter);
405 stats->be_tx_wrbs += wrb_cnt;
406 stats->be_tx_bytes += copied;
407 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
409 stats->be_tx_stops++;
412 /* Determine number of WRB entries needed to xmit data in an skb */
413 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
416 int cnt = (skb->len > skb->data_len);
418 cnt += skb_shinfo(skb)->nr_frags;
420 /* to account for hdr wrb */
422 if (lancer_chip(adapter) || !(cnt & 1)) {
425 /* add a dummy to make it an even num */
429 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
433 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
435 wrb->frag_pa_hi = upper_32_bits(addr);
436 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
437 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
440 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
441 struct sk_buff *skb, u32 wrb_cnt, u32 len)
446 memset(hdr, 0, sizeof(*hdr));
448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
450 if (skb_is_gso(skb)) {
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453 hdr, skb_shinfo(skb)->gso_size);
454 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
456 if (lancer_chip(adapter) && adapter->sli_family ==
457 LANCER_A0_SLI_FAMILY) {
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
462 else if (is_udp_pkt(skb))
463 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
466 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
469 else if (is_udp_pkt(skb))
470 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
473 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
474 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
475 vlan_tag = vlan_tx_tag_get(skb);
476 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
477 /* If vlan priority provided by OS is NOT in available bmap */
478 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
479 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
480 adapter->recommended_prio;
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
487 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
490 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
495 be_dws_le_to_cpu(wrb, sizeof(*wrb));
497 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
500 dma_unmap_single(dev, dma, wrb->frag_len,
503 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
507 static int make_tx_wrbs(struct be_adapter *adapter,
508 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
512 struct device *dev = &adapter->pdev->dev;
513 struct sk_buff *first_skb = skb;
514 struct be_queue_info *txq = &adapter->tx_obj.q;
515 struct be_eth_wrb *wrb;
516 struct be_eth_hdr_wrb *hdr;
517 bool map_single = false;
520 hdr = queue_head_node(txq);
522 map_head = txq->head;
524 if (skb->len > skb->data_len) {
525 int len = skb_headlen(skb);
526 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
527 if (dma_mapping_error(dev, busaddr))
530 wrb = queue_head_node(txq);
531 wrb_fill(wrb, busaddr, len);
532 be_dws_cpu_to_le(wrb, sizeof(*wrb));
537 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
538 struct skb_frag_struct *frag =
539 &skb_shinfo(skb)->frags[i];
540 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
541 frag->size, DMA_TO_DEVICE);
542 if (dma_mapping_error(dev, busaddr))
544 wrb = queue_head_node(txq);
545 wrb_fill(wrb, busaddr, frag->size);
546 be_dws_cpu_to_le(wrb, sizeof(*wrb));
548 copied += frag->size;
552 wrb = queue_head_node(txq);
554 be_dws_cpu_to_le(wrb, sizeof(*wrb));
558 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
559 be_dws_cpu_to_le(hdr, sizeof(*hdr));
563 txq->head = map_head;
565 wrb = queue_head_node(txq);
566 unmap_tx_frag(dev, wrb, map_single);
568 copied -= wrb->frag_len;
574 static netdev_tx_t be_xmit(struct sk_buff *skb,
575 struct net_device *netdev)
577 struct be_adapter *adapter = netdev_priv(netdev);
578 struct be_tx_obj *tx_obj = &adapter->tx_obj;
579 struct be_queue_info *txq = &tx_obj->q;
580 u32 wrb_cnt = 0, copied = 0;
581 u32 start = txq->head;
582 bool dummy_wrb, stopped = false;
584 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
586 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
588 /* record the sent skb in the sent_skb table */
589 BUG_ON(tx_obj->sent_skb_list[start]);
590 tx_obj->sent_skb_list[start] = skb;
592 /* Ensure txq has space for the next skb; Else stop the queue
593 * *BEFORE* ringing the tx doorbell, so that we serialze the
594 * tx compls of the current transmit which'll wake up the queue
596 atomic_add(wrb_cnt, &txq->used);
597 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
599 netif_stop_queue(netdev);
603 be_txq_notify(adapter, txq->id, wrb_cnt);
605 be_tx_stats_update(adapter, wrb_cnt, copied,
606 skb_shinfo(skb)->gso_segs, stopped);
609 dev_kfree_skb_any(skb);
614 static int be_change_mtu(struct net_device *netdev, int new_mtu)
616 struct be_adapter *adapter = netdev_priv(netdev);
617 if (new_mtu < BE_MIN_MTU ||
618 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
619 (ETH_HLEN + ETH_FCS_LEN))) {
620 dev_info(&adapter->pdev->dev,
621 "MTU must be between %d and %d bytes\n",
623 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
626 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
627 netdev->mtu, new_mtu);
628 netdev->mtu = new_mtu;
633 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
634 * If the user configures more, place BE in vlan promiscuous mode.
636 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
638 u16 vtag[BE_NUM_VLANS_SUPPORTED];
644 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
645 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
646 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
649 if (adapter->vlans_added <= adapter->max_vlans) {
650 /* Construct VLAN Table to give to HW */
651 for (i = 0; i < VLAN_N_VID; i++) {
652 if (adapter->vlan_tag[i]) {
653 vtag[ntags] = cpu_to_le16(i);
657 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 status = be_cmd_vlan_config(adapter, adapter->if_handle,
667 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
669 struct be_adapter *adapter = netdev_priv(netdev);
671 adapter->vlan_grp = grp;
674 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
676 struct be_adapter *adapter = netdev_priv(netdev);
678 adapter->vlans_added++;
679 if (!be_physfn(adapter))
682 adapter->vlan_tag[vid] = 1;
683 if (adapter->vlans_added <= (adapter->max_vlans + 1))
684 be_vid_config(adapter, false, 0);
687 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
689 struct be_adapter *adapter = netdev_priv(netdev);
691 adapter->vlans_added--;
692 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
694 if (!be_physfn(adapter))
697 adapter->vlan_tag[vid] = 0;
698 if (adapter->vlans_added <= adapter->max_vlans)
699 be_vid_config(adapter, false, 0);
702 static void be_set_multicast_list(struct net_device *netdev)
704 struct be_adapter *adapter = netdev_priv(netdev);
706 if (netdev->flags & IFF_PROMISC) {
707 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
708 adapter->promiscuous = true;
712 /* BE was previously in promiscous mode; disable it */
713 if (adapter->promiscuous) {
714 adapter->promiscuous = false;
715 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
718 /* Enable multicast promisc if num configured exceeds what we support */
719 if (netdev->flags & IFF_ALLMULTI ||
720 netdev_mc_count(netdev) > BE_MAX_MC) {
721 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
722 &adapter->mc_cmd_mem);
726 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
727 &adapter->mc_cmd_mem);
732 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
734 struct be_adapter *adapter = netdev_priv(netdev);
737 if (!adapter->sriov_enabled)
740 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
743 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
744 status = be_cmd_pmac_del(adapter,
745 adapter->vf_cfg[vf].vf_if_handle,
746 adapter->vf_cfg[vf].vf_pmac_id);
748 status = be_cmd_pmac_add(adapter, mac,
749 adapter->vf_cfg[vf].vf_if_handle,
750 &adapter->vf_cfg[vf].vf_pmac_id);
753 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
756 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
761 static int be_get_vf_config(struct net_device *netdev, int vf,
762 struct ifla_vf_info *vi)
764 struct be_adapter *adapter = netdev_priv(netdev);
766 if (!adapter->sriov_enabled)
773 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
774 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
776 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
781 static int be_set_vf_vlan(struct net_device *netdev,
782 int vf, u16 vlan, u8 qos)
784 struct be_adapter *adapter = netdev_priv(netdev);
787 if (!adapter->sriov_enabled)
790 if ((vf >= num_vfs) || (vlan > 4095))
794 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
795 adapter->vlans_added++;
797 adapter->vf_cfg[vf].vf_vlan_tag = 0;
798 adapter->vlans_added--;
801 status = be_vid_config(adapter, true, vf);
804 dev_info(&adapter->pdev->dev,
805 "VLAN %d config on VF %d failed\n", vlan, vf);
809 static int be_set_vf_tx_rate(struct net_device *netdev,
812 struct be_adapter *adapter = netdev_priv(netdev);
815 if (!adapter->sriov_enabled)
818 if ((vf >= num_vfs) || (rate < 0))
824 adapter->vf_cfg[vf].vf_tx_rate = rate;
825 status = be_cmd_set_qos(adapter, rate / 10, vf);
828 dev_info(&adapter->pdev->dev,
829 "tx rate %d on VF %d failed\n", rate, vf);
833 static void be_rx_rate_update(struct be_rx_obj *rxo)
835 struct be_rx_stats *stats = &rxo->stats;
839 if (time_before(now, stats->rx_jiffies)) {
840 stats->rx_jiffies = now;
844 /* Update the rate once in two seconds */
845 if ((now - stats->rx_jiffies) < 2 * HZ)
848 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
849 now - stats->rx_jiffies);
850 stats->rx_jiffies = now;
851 stats->rx_bytes_prev = stats->rx_bytes;
854 static void be_rx_stats_update(struct be_rx_obj *rxo,
855 u32 pktsize, u16 numfrags, u8 pkt_type)
857 struct be_rx_stats *stats = &rxo->stats;
860 stats->rx_frags += numfrags;
861 stats->rx_bytes += pktsize;
863 if (pkt_type == BE_MULTICAST_PACKET)
864 stats->rx_mcast_pkts++;
867 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
869 u8 l4_cksm, ipv6, ipcksm;
871 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
872 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
873 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
875 /* Ignore ipcksm for ipv6 pkts */
876 return l4_cksm && (ipcksm || ipv6);
879 static struct be_rx_page_info *
880 get_rx_page_info(struct be_adapter *adapter,
881 struct be_rx_obj *rxo,
884 struct be_rx_page_info *rx_page_info;
885 struct be_queue_info *rxq = &rxo->q;
887 rx_page_info = &rxo->page_info_tbl[frag_idx];
888 BUG_ON(!rx_page_info->page);
890 if (rx_page_info->last_page_user) {
891 dma_unmap_page(&adapter->pdev->dev,
892 dma_unmap_addr(rx_page_info, bus),
893 adapter->big_page_size, DMA_FROM_DEVICE);
894 rx_page_info->last_page_user = false;
897 atomic_dec(&rxq->used);
901 /* Throwaway the data in the Rx completion */
902 static void be_rx_compl_discard(struct be_adapter *adapter,
903 struct be_rx_obj *rxo,
904 struct be_eth_rx_compl *rxcp)
906 struct be_queue_info *rxq = &rxo->q;
907 struct be_rx_page_info *page_info;
908 u16 rxq_idx, i, num_rcvd;
910 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
911 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
913 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
914 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
916 rxo->last_frag_index = rxq_idx;
918 for (i = 0; i < num_rcvd; i++) {
919 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
920 put_page(page_info->page);
921 memset(page_info, 0, sizeof(*page_info));
922 index_inc(&rxq_idx, rxq->len);
928 * skb_fill_rx_data forms a complete skb for an ether frame
931 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
932 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
935 struct be_queue_info *rxq = &rxo->q;
936 struct be_rx_page_info *page_info;
938 u32 pktsize, hdr_len, curr_frag_len, size;
942 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
943 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
944 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
946 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
948 start = page_address(page_info->page) + page_info->page_offset;
951 /* Copy data in the first descriptor of this completion */
952 curr_frag_len = min(pktsize, rx_frag_size);
954 /* Copy the header portion into skb_data */
955 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
956 memcpy(skb->data, start, hdr_len);
957 skb->len = curr_frag_len;
958 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
959 /* Complete packet has now been moved to data */
960 put_page(page_info->page);
962 skb->tail += curr_frag_len;
964 skb_shinfo(skb)->nr_frags = 1;
965 skb_shinfo(skb)->frags[0].page = page_info->page;
966 skb_shinfo(skb)->frags[0].page_offset =
967 page_info->page_offset + hdr_len;
968 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
969 skb->data_len = curr_frag_len - hdr_len;
970 skb->tail += hdr_len;
972 page_info->page = NULL;
974 if (pktsize <= rx_frag_size) {
975 BUG_ON(num_rcvd != 1);
979 /* More frags present for this completion */
981 for (i = 1, j = 0; i < num_rcvd; i++) {
982 size -= curr_frag_len;
983 index_inc(&rxq_idx, rxq->len);
984 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
986 curr_frag_len = min(size, rx_frag_size);
988 /* Coalesce all frags from the same physical page in one slot */
989 if (page_info->page_offset == 0) {
992 skb_shinfo(skb)->frags[j].page = page_info->page;
993 skb_shinfo(skb)->frags[j].page_offset =
994 page_info->page_offset;
995 skb_shinfo(skb)->frags[j].size = 0;
996 skb_shinfo(skb)->nr_frags++;
998 put_page(page_info->page);
1001 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1002 skb->len += curr_frag_len;
1003 skb->data_len += curr_frag_len;
1005 page_info->page = NULL;
1007 BUG_ON(j > MAX_SKB_FRAGS);
1010 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1013 /* Process the RX completion indicated by rxcp when GRO is disabled */
1014 static void be_rx_compl_process(struct be_adapter *adapter,
1015 struct be_rx_obj *rxo,
1016 struct be_eth_rx_compl *rxcp)
1018 struct sk_buff *skb;
1023 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1025 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1026 if (unlikely(!skb)) {
1027 if (net_ratelimit())
1028 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1029 be_rx_compl_discard(adapter, rxo, rxcp);
1033 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1035 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1036 skb->ip_summed = CHECKSUM_UNNECESSARY;
1038 skb_checksum_none_assert(skb);
1040 skb->truesize = skb->len + sizeof(struct sk_buff);
1041 skb->protocol = eth_type_trans(skb, adapter->netdev);
1043 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1044 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1046 /* vlanf could be wrongly set in some cards.
1047 * ignore if vtm is not set */
1048 if ((adapter->function_mode & 0x400) && !vtm)
1051 if (unlikely(vlanf)) {
1052 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1056 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1057 if (!lancer_chip(adapter))
1059 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1061 netif_receive_skb(skb);
1065 /* Process the RX completion indicated by rxcp when GRO is enabled */
1066 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1067 struct be_rx_obj *rxo,
1068 struct be_eth_rx_compl *rxcp)
1070 struct be_rx_page_info *page_info;
1071 struct sk_buff *skb = NULL;
1072 struct be_queue_info *rxq = &rxo->q;
1073 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1074 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1075 u16 i, rxq_idx = 0, vid, j;
1079 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1080 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1081 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1082 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1083 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1084 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1086 /* vlanf could be wrongly set in some cards.
1087 * ignore if vtm is not set */
1088 if ((adapter->function_mode & 0x400) && !vtm)
1091 skb = napi_get_frags(&eq_obj->napi);
1093 be_rx_compl_discard(adapter, rxo, rxcp);
1097 remaining = pkt_size;
1098 for (i = 0, j = -1; i < num_rcvd; i++) {
1099 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1101 curr_frag_len = min(remaining, rx_frag_size);
1103 /* Coalesce all frags from the same physical page in one slot */
1104 if (i == 0 || page_info->page_offset == 0) {
1105 /* First frag or Fresh page */
1107 skb_shinfo(skb)->frags[j].page = page_info->page;
1108 skb_shinfo(skb)->frags[j].page_offset =
1109 page_info->page_offset;
1110 skb_shinfo(skb)->frags[j].size = 0;
1112 put_page(page_info->page);
1114 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1116 remaining -= curr_frag_len;
1117 index_inc(&rxq_idx, rxq->len);
1118 memset(page_info, 0, sizeof(*page_info));
1120 BUG_ON(j > MAX_SKB_FRAGS);
1122 skb_shinfo(skb)->nr_frags = j + 1;
1123 skb->len = pkt_size;
1124 skb->data_len = pkt_size;
1125 skb->truesize += pkt_size;
1126 skb->ip_summed = CHECKSUM_UNNECESSARY;
1128 if (likely(!vlanf)) {
1129 napi_gro_frags(&eq_obj->napi);
1131 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1132 if (!lancer_chip(adapter))
1135 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1138 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1141 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1144 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1146 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1148 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1152 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1154 queue_tail_inc(&rxo->cq);
1158 /* To reset the valid bit, we need to reset the whole word as
1159 * when walking the queue the valid entries are little-endian
1160 * and invalid entries are host endian
1162 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1164 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1167 static inline struct page *be_alloc_pages(u32 size)
1169 gfp_t alloc_flags = GFP_ATOMIC;
1170 u32 order = get_order(size);
1172 alloc_flags |= __GFP_COMP;
1173 return alloc_pages(alloc_flags, order);
1177 * Allocate a page, split it to fragments of size rx_frag_size and post as
1178 * receive buffers to BE
1180 static void be_post_rx_frags(struct be_rx_obj *rxo)
1182 struct be_adapter *adapter = rxo->adapter;
1183 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1184 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1185 struct be_queue_info *rxq = &rxo->q;
1186 struct page *pagep = NULL;
1187 struct be_eth_rx_d *rxd;
1188 u64 page_dmaaddr = 0, frag_dmaaddr;
1189 u32 posted, page_offset = 0;
1191 page_info = &rxo->page_info_tbl[rxq->head];
1192 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1194 pagep = be_alloc_pages(adapter->big_page_size);
1195 if (unlikely(!pagep)) {
1196 rxo->stats.rx_post_fail++;
1199 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1200 0, adapter->big_page_size,
1202 page_info->page_offset = 0;
1205 page_info->page_offset = page_offset + rx_frag_size;
1207 page_offset = page_info->page_offset;
1208 page_info->page = pagep;
1209 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1210 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1212 rxd = queue_head_node(rxq);
1213 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1214 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1216 /* Any space left in the current big page for another frag? */
1217 if ((page_offset + rx_frag_size + rx_frag_size) >
1218 adapter->big_page_size) {
1220 page_info->last_page_user = true;
1223 prev_page_info = page_info;
1224 queue_head_inc(rxq);
1225 page_info = &page_info_tbl[rxq->head];
1228 prev_page_info->last_page_user = true;
1231 atomic_add(posted, &rxq->used);
1232 be_rxq_notify(adapter, rxq->id, posted);
1233 } else if (atomic_read(&rxq->used) == 0) {
1234 /* Let be_worker replenish when memory is available */
1235 rxo->rx_post_starved = true;
1239 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1241 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1243 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1247 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1249 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1251 queue_tail_inc(tx_cq);
1255 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1257 struct be_queue_info *txq = &adapter->tx_obj.q;
1258 struct be_eth_wrb *wrb;
1259 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1260 struct sk_buff *sent_skb;
1261 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1262 bool unmap_skb_hdr = true;
1264 sent_skb = sent_skbs[txq->tail];
1266 sent_skbs[txq->tail] = NULL;
1268 /* skip header wrb */
1269 queue_tail_inc(txq);
1272 cur_index = txq->tail;
1273 wrb = queue_tail_node(txq);
1274 unmap_tx_frag(&adapter->pdev->dev, wrb,
1275 (unmap_skb_hdr && skb_headlen(sent_skb)));
1276 unmap_skb_hdr = false;
1279 queue_tail_inc(txq);
1280 } while (cur_index != last_index);
1282 atomic_sub(num_wrbs, &txq->used);
1284 kfree_skb(sent_skb);
1287 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1289 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1295 eqe->evt = le32_to_cpu(eqe->evt);
1296 queue_tail_inc(&eq_obj->q);
1300 static int event_handle(struct be_adapter *adapter,
1301 struct be_eq_obj *eq_obj)
1303 struct be_eq_entry *eqe;
1306 while ((eqe = event_get(eq_obj)) != NULL) {
1311 /* Deal with any spurious interrupts that come
1314 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1316 napi_schedule(&eq_obj->napi);
1321 /* Just read and notify events without processing them.
1322 * Used at the time of destroying event queues */
1323 static void be_eq_clean(struct be_adapter *adapter,
1324 struct be_eq_obj *eq_obj)
1326 struct be_eq_entry *eqe;
1329 while ((eqe = event_get(eq_obj)) != NULL) {
1335 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1338 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1340 struct be_rx_page_info *page_info;
1341 struct be_queue_info *rxq = &rxo->q;
1342 struct be_queue_info *rx_cq = &rxo->cq;
1343 struct be_eth_rx_compl *rxcp;
1346 /* First cleanup pending rx completions */
1347 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1348 be_rx_compl_discard(adapter, rxo, rxcp);
1349 be_rx_compl_reset(rxcp);
1350 be_cq_notify(adapter, rx_cq->id, false, 1);
1353 /* Then free posted rx buffer that were not used */
1354 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1355 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1356 page_info = get_rx_page_info(adapter, rxo, tail);
1357 put_page(page_info->page);
1358 memset(page_info, 0, sizeof(*page_info));
1360 BUG_ON(atomic_read(&rxq->used));
1363 static void be_tx_compl_clean(struct be_adapter *adapter)
1365 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1366 struct be_queue_info *txq = &adapter->tx_obj.q;
1367 struct be_eth_tx_compl *txcp;
1368 u16 end_idx, cmpl = 0, timeo = 0;
1369 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1370 struct sk_buff *sent_skb;
1373 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1375 while ((txcp = be_tx_compl_get(tx_cq))) {
1376 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1378 be_tx_compl_process(adapter, end_idx);
1382 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1386 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1392 if (atomic_read(&txq->used))
1393 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1394 atomic_read(&txq->used));
1396 /* free posted tx for which compls will never arrive */
1397 while (atomic_read(&txq->used)) {
1398 sent_skb = sent_skbs[txq->tail];
1399 end_idx = txq->tail;
1401 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403 be_tx_compl_process(adapter, end_idx);
1407 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1409 struct be_queue_info *q;
1411 q = &adapter->mcc_obj.q;
1413 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1414 be_queue_free(adapter, q);
1416 q = &adapter->mcc_obj.cq;
1418 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1419 be_queue_free(adapter, q);
1422 /* Must be called only after TX qs are created as MCC shares TX EQ */
1423 static int be_mcc_queues_create(struct be_adapter *adapter)
1425 struct be_queue_info *q, *cq;
1427 /* Alloc MCC compl queue */
1428 cq = &adapter->mcc_obj.cq;
1429 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1430 sizeof(struct be_mcc_compl)))
1433 /* Ask BE to create MCC compl queue; share TX's eq */
1434 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1437 /* Alloc MCC queue */
1438 q = &adapter->mcc_obj.q;
1439 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1440 goto mcc_cq_destroy;
1442 /* Ask BE to create MCC queue */
1443 if (be_cmd_mccq_create(adapter, q, cq))
1449 be_queue_free(adapter, q);
1451 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1453 be_queue_free(adapter, cq);
1458 static void be_tx_queues_destroy(struct be_adapter *adapter)
1460 struct be_queue_info *q;
1462 q = &adapter->tx_obj.q;
1464 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1465 be_queue_free(adapter, q);
1467 q = &adapter->tx_obj.cq;
1469 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1470 be_queue_free(adapter, q);
1472 /* Clear any residual events */
1473 be_eq_clean(adapter, &adapter->tx_eq);
1475 q = &adapter->tx_eq.q;
1477 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1478 be_queue_free(adapter, q);
1481 static int be_tx_queues_create(struct be_adapter *adapter)
1483 struct be_queue_info *eq, *q, *cq;
1485 adapter->tx_eq.max_eqd = 0;
1486 adapter->tx_eq.min_eqd = 0;
1487 adapter->tx_eq.cur_eqd = 96;
1488 adapter->tx_eq.enable_aic = false;
1489 /* Alloc Tx Event queue */
1490 eq = &adapter->tx_eq.q;
1491 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494 /* Ask BE to create Tx Event queue */
1495 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501 /* Alloc TX eth compl queue */
1502 cq = &adapter->tx_obj.cq;
1503 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1504 sizeof(struct be_eth_tx_compl)))
1507 /* Ask BE to create Tx eth compl queue */
1508 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1511 /* Alloc TX eth queue */
1512 q = &adapter->tx_obj.q;
1513 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516 /* Ask BE to create Tx eth queue */
1517 if (be_cmd_txq_create(adapter, q, cq))
1522 be_queue_free(adapter, q);
1524 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1526 be_queue_free(adapter, cq);
1528 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1530 be_queue_free(adapter, eq);
1534 static void be_rx_queues_destroy(struct be_adapter *adapter)
1536 struct be_queue_info *q;
1537 struct be_rx_obj *rxo;
1540 for_all_rx_queues(adapter, rxo, i) {
1543 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1544 /* After the rxq is invalidated, wait for a grace time
1545 * of 1ms for all dma to end and the flush compl to
1549 be_rx_q_clean(adapter, rxo);
1551 be_queue_free(adapter, q);
1555 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1556 be_queue_free(adapter, q);
1558 /* Clear any residual events */
1561 be_eq_clean(adapter, &rxo->rx_eq);
1562 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1564 be_queue_free(adapter, q);
1568 static int be_rx_queues_create(struct be_adapter *adapter)
1570 struct be_queue_info *eq, *q, *cq;
1571 struct be_rx_obj *rxo;
1574 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1575 for_all_rx_queues(adapter, rxo, i) {
1576 rxo->adapter = adapter;
1577 /* Init last_frag_index so that the frag index in the first
1578 * completion will never match */
1579 rxo->last_frag_index = 0xffff;
1580 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1581 rxo->rx_eq.enable_aic = true;
1585 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1586 sizeof(struct be_eq_entry));
1590 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1594 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1598 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1599 sizeof(struct be_eth_rx_compl));
1603 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1608 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1609 sizeof(struct be_eth_rx_d));
1613 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1614 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1615 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1620 if (be_multi_rxq(adapter)) {
1621 u8 rsstable[MAX_RSS_QS];
1623 for_all_rss_queues(adapter, rxo, i)
1624 rsstable[i] = rxo->rss_id;
1626 rc = be_cmd_rss_config(adapter, rsstable,
1627 adapter->num_rx_qs - 1);
1634 be_rx_queues_destroy(adapter);
1638 static bool event_peek(struct be_eq_obj *eq_obj)
1640 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1647 static irqreturn_t be_intx(int irq, void *dev)
1649 struct be_adapter *adapter = dev;
1650 struct be_rx_obj *rxo;
1651 int isr, i, tx = 0 , rx = 0;
1653 if (lancer_chip(adapter)) {
1654 if (event_peek(&adapter->tx_eq))
1655 tx = event_handle(adapter, &adapter->tx_eq);
1656 for_all_rx_queues(adapter, rxo, i) {
1657 if (event_peek(&rxo->rx_eq))
1658 rx |= event_handle(adapter, &rxo->rx_eq);
1665 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1666 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1670 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1671 event_handle(adapter, &adapter->tx_eq);
1673 for_all_rx_queues(adapter, rxo, i) {
1674 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1675 event_handle(adapter, &rxo->rx_eq);
1682 static irqreturn_t be_msix_rx(int irq, void *dev)
1684 struct be_rx_obj *rxo = dev;
1685 struct be_adapter *adapter = rxo->adapter;
1687 event_handle(adapter, &rxo->rx_eq);
1692 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1694 struct be_adapter *adapter = dev;
1696 event_handle(adapter, &adapter->tx_eq);
1701 static inline bool do_gro(struct be_rx_obj *rxo,
1702 struct be_eth_rx_compl *rxcp, u8 err)
1704 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1707 rxo->stats.rxcp_err++;
1709 return (tcp_frame && !err) ? true : false;
1712 static int be_poll_rx(struct napi_struct *napi, int budget)
1714 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1715 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1716 struct be_adapter *adapter = rxo->adapter;
1717 struct be_queue_info *rx_cq = &rxo->cq;
1718 struct be_eth_rx_compl *rxcp;
1720 u16 frag_index, num_rcvd;
1723 rxo->stats.rx_polls++;
1724 for (work_done = 0; work_done < budget; work_done++) {
1725 rxcp = be_rx_compl_get(rxo);
1729 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1730 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1732 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1735 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1736 if (likely(frag_index != rxo->last_frag_index &&
1738 rxo->last_frag_index = frag_index;
1740 if (do_gro(rxo, rxcp, err))
1741 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743 be_rx_compl_process(adapter, rxo, rxcp);
1746 be_rx_compl_reset(rxcp);
1749 /* Refill the queue */
1750 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1751 be_post_rx_frags(rxo);
1754 if (work_done < budget) {
1755 napi_complete(napi);
1756 be_cq_notify(adapter, rx_cq->id, true, work_done);
1758 /* More to be consumed; continue with interrupts disabled */
1759 be_cq_notify(adapter, rx_cq->id, false, work_done);
1764 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1765 * For TX/MCC we don't honour budget; consume everything
1767 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1769 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1770 struct be_adapter *adapter =
1771 container_of(tx_eq, struct be_adapter, tx_eq);
1772 struct be_queue_info *txq = &adapter->tx_obj.q;
1773 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1774 struct be_eth_tx_compl *txcp;
1775 int tx_compl = 0, mcc_compl, status = 0;
1778 while ((txcp = be_tx_compl_get(tx_cq))) {
1779 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1781 be_tx_compl_process(adapter, end_idx);
1785 mcc_compl = be_process_mcc(adapter, &status);
1787 napi_complete(napi);
1790 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1791 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1795 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1797 /* As Tx wrbs have been freed up, wake up netdev queue if
1798 * it was stopped due to lack of tx wrbs.
1800 if (netif_queue_stopped(adapter->netdev) &&
1801 atomic_read(&txq->used) < txq->len / 2) {
1802 netif_wake_queue(adapter->netdev);
1805 tx_stats(adapter)->be_tx_events++;
1806 tx_stats(adapter)->be_tx_compl += tx_compl;
1812 void be_detect_dump_ue(struct be_adapter *adapter)
1814 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1817 pci_read_config_dword(adapter->pdev,
1818 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1819 pci_read_config_dword(adapter->pdev,
1820 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1821 pci_read_config_dword(adapter->pdev,
1822 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1823 pci_read_config_dword(adapter->pdev,
1824 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1826 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1827 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1829 if (ue_status_lo || ue_status_hi) {
1830 adapter->ue_detected = true;
1831 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1835 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1836 if (ue_status_lo & 1)
1837 dev_err(&adapter->pdev->dev,
1838 "UE: %s bit set\n", ue_status_low_desc[i]);
1842 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1843 if (ue_status_hi & 1)
1844 dev_err(&adapter->pdev->dev,
1845 "UE: %s bit set\n", ue_status_hi_desc[i]);
1851 static void be_worker(struct work_struct *work)
1853 struct be_adapter *adapter =
1854 container_of(work, struct be_adapter, work.work);
1855 struct be_rx_obj *rxo;
1858 /* when interrupts are not yet enabled, just reap any pending
1859 * mcc completions */
1860 if (!netif_running(adapter->netdev)) {
1861 int mcc_compl, status = 0;
1863 mcc_compl = be_process_mcc(adapter, &status);
1866 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1867 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1872 if (!adapter->stats_ioctl_sent)
1873 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1875 be_tx_rate_update(adapter);
1877 for_all_rx_queues(adapter, rxo, i) {
1878 be_rx_rate_update(rxo);
1879 be_rx_eqd_update(adapter, rxo);
1881 if (rxo->rx_post_starved) {
1882 rxo->rx_post_starved = false;
1883 be_post_rx_frags(rxo);
1886 if (!adapter->ue_detected && !lancer_chip(adapter))
1887 be_detect_dump_ue(adapter);
1890 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1893 static void be_msix_disable(struct be_adapter *adapter)
1895 if (adapter->msix_enabled) {
1896 pci_disable_msix(adapter->pdev);
1897 adapter->msix_enabled = false;
1901 static int be_num_rxqs_get(struct be_adapter *adapter)
1903 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1904 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1905 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1907 dev_warn(&adapter->pdev->dev,
1908 "No support for multiple RX queues\n");
1913 static void be_msix_enable(struct be_adapter *adapter)
1915 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1918 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1920 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1921 adapter->msix_entries[i].entry = i;
1923 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1924 adapter->num_rx_qs + 1);
1927 } else if (status >= BE_MIN_MSIX_VECTORS) {
1928 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1930 adapter->num_rx_qs = status - 1;
1931 dev_warn(&adapter->pdev->dev,
1932 "Could alloc only %d MSIx vectors. "
1933 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1939 adapter->msix_enabled = true;
1942 static void be_sriov_enable(struct be_adapter *adapter)
1944 be_check_sriov_fn_type(adapter);
1945 #ifdef CONFIG_PCI_IOV
1946 if (be_physfn(adapter) && num_vfs) {
1949 status = pci_enable_sriov(adapter->pdev, num_vfs);
1950 adapter->sriov_enabled = status ? false : true;
1955 static void be_sriov_disable(struct be_adapter *adapter)
1957 #ifdef CONFIG_PCI_IOV
1958 if (adapter->sriov_enabled) {
1959 pci_disable_sriov(adapter->pdev);
1960 adapter->sriov_enabled = false;
1965 static inline int be_msix_vec_get(struct be_adapter *adapter,
1966 struct be_eq_obj *eq_obj)
1968 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1971 static int be_request_irq(struct be_adapter *adapter,
1972 struct be_eq_obj *eq_obj,
1973 void *handler, char *desc, void *context)
1975 struct net_device *netdev = adapter->netdev;
1978 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1979 vec = be_msix_vec_get(adapter, eq_obj);
1980 return request_irq(vec, handler, 0, eq_obj->desc, context);
1983 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1986 int vec = be_msix_vec_get(adapter, eq_obj);
1987 free_irq(vec, context);
1990 static int be_msix_register(struct be_adapter *adapter)
1992 struct be_rx_obj *rxo;
1996 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2001 for_all_rx_queues(adapter, rxo, i) {
2002 sprintf(qname, "rxq%d", i);
2003 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2012 be_free_irq(adapter, &adapter->tx_eq, adapter);
2014 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2015 be_free_irq(adapter, &rxo->rx_eq, rxo);
2018 dev_warn(&adapter->pdev->dev,
2019 "MSIX Request IRQ failed - err %d\n", status);
2020 pci_disable_msix(adapter->pdev);
2021 adapter->msix_enabled = false;
2025 static int be_irq_register(struct be_adapter *adapter)
2027 struct net_device *netdev = adapter->netdev;
2030 if (adapter->msix_enabled) {
2031 status = be_msix_register(adapter);
2034 /* INTx is not supported for VF */
2035 if (!be_physfn(adapter))
2040 netdev->irq = adapter->pdev->irq;
2041 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2044 dev_err(&adapter->pdev->dev,
2045 "INTx request IRQ failed - err %d\n", status);
2049 adapter->isr_registered = true;
2053 static void be_irq_unregister(struct be_adapter *adapter)
2055 struct net_device *netdev = adapter->netdev;
2056 struct be_rx_obj *rxo;
2059 if (!adapter->isr_registered)
2063 if (!adapter->msix_enabled) {
2064 free_irq(netdev->irq, adapter);
2069 be_free_irq(adapter, &adapter->tx_eq, adapter);
2071 for_all_rx_queues(adapter, rxo, i)
2072 be_free_irq(adapter, &rxo->rx_eq, rxo);
2075 adapter->isr_registered = false;
2078 static int be_close(struct net_device *netdev)
2080 struct be_adapter *adapter = netdev_priv(netdev);
2081 struct be_rx_obj *rxo;
2082 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2085 be_async_mcc_disable(adapter);
2087 netif_stop_queue(netdev);
2088 netif_carrier_off(netdev);
2089 adapter->link_up = false;
2091 if (!lancer_chip(adapter))
2092 be_intr_set(adapter, false);
2094 if (adapter->msix_enabled) {
2095 vec = be_msix_vec_get(adapter, tx_eq);
2096 synchronize_irq(vec);
2098 for_all_rx_queues(adapter, rxo, i) {
2099 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2100 synchronize_irq(vec);
2103 synchronize_irq(netdev->irq);
2105 be_irq_unregister(adapter);
2107 for_all_rx_queues(adapter, rxo, i)
2108 napi_disable(&rxo->rx_eq.napi);
2110 napi_disable(&tx_eq->napi);
2112 /* Wait for all pending tx completions to arrive so that
2113 * all tx skbs are freed.
2115 be_tx_compl_clean(adapter);
2120 static int be_open(struct net_device *netdev)
2122 struct be_adapter *adapter = netdev_priv(netdev);
2123 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2124 struct be_rx_obj *rxo;
2130 for_all_rx_queues(adapter, rxo, i) {
2131 be_post_rx_frags(rxo);
2132 napi_enable(&rxo->rx_eq.napi);
2134 napi_enable(&tx_eq->napi);
2136 be_irq_register(adapter);
2138 if (!lancer_chip(adapter))
2139 be_intr_set(adapter, true);
2141 /* The evt queues are created in unarmed state; arm them */
2142 for_all_rx_queues(adapter, rxo, i) {
2143 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2144 be_cq_notify(adapter, rxo->cq.id, true, 0);
2146 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2148 /* Now that interrupts are on we can process async mcc */
2149 be_async_mcc_enable(adapter);
2151 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2155 be_link_status_update(adapter, link_up);
2157 if (be_physfn(adapter)) {
2158 status = be_vid_config(adapter, false, 0);
2162 status = be_cmd_set_flow_control(adapter,
2163 adapter->tx_fc, adapter->rx_fc);
2170 be_close(adapter->netdev);
2174 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2176 struct be_dma_mem cmd;
2180 memset(mac, 0, ETH_ALEN);
2182 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2183 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2187 memset(cmd.va, 0, cmd.size);
2190 status = pci_write_config_dword(adapter->pdev,
2191 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2193 dev_err(&adapter->pdev->dev,
2194 "Could not enable Wake-on-lan\n");
2195 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2199 status = be_cmd_enable_magic_wol(adapter,
2200 adapter->netdev->dev_addr, &cmd);
2201 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2202 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2204 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2205 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2209 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2214 * Generate a seed MAC address from the PF MAC Address using jhash.
2215 * MAC Address for VFs are assigned incrementally starting from the seed.
2216 * These addresses are programmed in the ASIC by the PF and the VF driver
2217 * queries for the MAC address during its probe.
2219 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2225 be_vf_eth_addr_generate(adapter, mac);
2227 for (vf = 0; vf < num_vfs; vf++) {
2228 status = be_cmd_pmac_add(adapter, mac,
2229 adapter->vf_cfg[vf].vf_if_handle,
2230 &adapter->vf_cfg[vf].vf_pmac_id);
2232 dev_err(&adapter->pdev->dev,
2233 "Mac address add failed for VF %d\n", vf);
2235 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2242 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2246 for (vf = 0; vf < num_vfs; vf++) {
2247 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248 be_cmd_pmac_del(adapter,
2249 adapter->vf_cfg[vf].vf_if_handle,
2250 adapter->vf_cfg[vf].vf_pmac_id);
2254 static int be_setup(struct be_adapter *adapter)
2256 struct net_device *netdev = adapter->netdev;
2257 u32 cap_flags, en_flags, vf = 0;
2261 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2263 if (be_physfn(adapter)) {
2264 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2265 BE_IF_FLAGS_PROMISCUOUS |
2266 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2267 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2269 if (be_multi_rxq(adapter)) {
2270 cap_flags |= BE_IF_FLAGS_RSS;
2271 en_flags |= BE_IF_FLAGS_RSS;
2275 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2276 netdev->dev_addr, false/* pmac_invalid */,
2277 &adapter->if_handle, &adapter->pmac_id, 0);
2281 if (be_physfn(adapter)) {
2282 while (vf < num_vfs) {
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2284 | BE_IF_FLAGS_BROADCAST;
2285 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2287 &adapter->vf_cfg[vf].vf_if_handle,
2290 dev_err(&adapter->pdev->dev,
2291 "Interface Create failed for VF %d\n", vf);
2294 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2297 } else if (!be_physfn(adapter)) {
2298 status = be_cmd_mac_addr_query(adapter, mac,
2299 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2301 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2302 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2306 status = be_tx_queues_create(adapter);
2310 status = be_rx_queues_create(adapter);
2314 status = be_mcc_queues_create(adapter);
2318 if (be_physfn(adapter)) {
2319 status = be_vf_eth_addr_config(adapter);
2324 adapter->link_speed = -1;
2329 if (be_physfn(adapter))
2330 be_vf_eth_addr_rem(adapter);
2331 be_mcc_queues_destroy(adapter);
2333 be_rx_queues_destroy(adapter);
2335 be_tx_queues_destroy(adapter);
2337 for (vf = 0; vf < num_vfs; vf++)
2338 if (adapter->vf_cfg[vf].vf_if_handle)
2339 be_cmd_if_destroy(adapter,
2340 adapter->vf_cfg[vf].vf_if_handle);
2341 be_cmd_if_destroy(adapter, adapter->if_handle);
2346 static int be_clear(struct be_adapter *adapter)
2348 if (be_physfn(adapter))
2349 be_vf_eth_addr_rem(adapter);
2351 be_mcc_queues_destroy(adapter);
2352 be_rx_queues_destroy(adapter);
2353 be_tx_queues_destroy(adapter);
2355 be_cmd_if_destroy(adapter, adapter->if_handle);
2357 /* tell fw we're done with firing cmds */
2358 be_cmd_fw_clean(adapter);
2363 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2364 static bool be_flash_redboot(struct be_adapter *adapter,
2365 const u8 *p, u32 img_start, int image_size,
2372 crc_offset = hdr_size + img_start + image_size - 4;
2376 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2379 dev_err(&adapter->pdev->dev,
2380 "could not get crc from flash, not flashing redboot\n");
2384 /*update redboot only if crc does not match*/
2385 if (!memcmp(flashed_crc, p, 4))
2391 static int be_flash_data(struct be_adapter *adapter,
2392 const struct firmware *fw,
2393 struct be_dma_mem *flash_cmd, int num_of_images)
2396 int status = 0, i, filehdr_size = 0;
2397 u32 total_bytes = 0, flash_op;
2399 const u8 *p = fw->data;
2400 struct be_cmd_write_flashrom *req = flash_cmd->va;
2401 const struct flash_comp *pflashcomp;
2404 static const struct flash_comp gen3_flash_types[9] = {
2405 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2406 FLASH_IMAGE_MAX_SIZE_g3},
2407 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2408 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2409 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2414 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2420 FLASH_IMAGE_MAX_SIZE_g3},
2421 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2422 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2424 static const struct flash_comp gen2_flash_types[8] = {
2425 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2426 FLASH_IMAGE_MAX_SIZE_g2},
2427 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2428 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2429 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2434 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2438 FLASH_IMAGE_MAX_SIZE_g2},
2439 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2440 FLASH_IMAGE_MAX_SIZE_g2}
2443 if (adapter->generation == BE_GEN3) {
2444 pflashcomp = gen3_flash_types;
2445 filehdr_size = sizeof(struct flash_file_hdr_g3);
2446 num_comp = ARRAY_SIZE(gen3_flash_types);
2448 pflashcomp = gen2_flash_types;
2449 filehdr_size = sizeof(struct flash_file_hdr_g2);
2450 num_comp = ARRAY_SIZE(gen2_flash_types);
2452 for (i = 0; i < num_comp; i++) {
2453 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2454 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2456 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457 (!be_flash_redboot(adapter, fw->data,
2458 pflashcomp[i].offset, pflashcomp[i].size,
2462 p += filehdr_size + pflashcomp[i].offset
2463 + (num_of_images * sizeof(struct image_hdr));
2464 if (p + pflashcomp[i].size > fw->data + fw->size)
2466 total_bytes = pflashcomp[i].size;
2467 while (total_bytes) {
2468 if (total_bytes > 32*1024)
2469 num_bytes = 32*1024;
2471 num_bytes = total_bytes;
2472 total_bytes -= num_bytes;
2475 flash_op = FLASHROM_OPER_FLASH;
2477 flash_op = FLASHROM_OPER_SAVE;
2478 memcpy(req->params.data_buf, p, num_bytes);
2480 status = be_cmd_write_flashrom(adapter, flash_cmd,
2481 pflashcomp[i].optype, flash_op, num_bytes);
2483 dev_err(&adapter->pdev->dev,
2484 "cmd to write to flash rom failed.\n");
2493 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2497 if (fhdr->build[0] == '3')
2499 else if (fhdr->build[0] == '2')
2505 int be_load_fw(struct be_adapter *adapter, u8 *func)
2507 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2508 const struct firmware *fw;
2509 struct flash_file_hdr_g2 *fhdr;
2510 struct flash_file_hdr_g3 *fhdr3;
2511 struct image_hdr *img_hdr_ptr = NULL;
2512 struct be_dma_mem flash_cmd;
2513 int status, i = 0, num_imgs = 0;
2516 if (!netif_running(adapter->netdev)) {
2517 dev_err(&adapter->pdev->dev,
2518 "Firmware load not allowed (interface is down)\n");
2522 strcpy(fw_file, func);
2524 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2529 fhdr = (struct flash_file_hdr_g2 *) p;
2530 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2532 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2533 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2534 &flash_cmd.dma, GFP_KERNEL);
2535 if (!flash_cmd.va) {
2537 dev_err(&adapter->pdev->dev,
2538 "Memory allocation failure while flashing\n");
2542 if ((adapter->generation == BE_GEN3) &&
2543 (get_ufigen_type(fhdr) == BE_GEN3)) {
2544 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2545 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2546 for (i = 0; i < num_imgs; i++) {
2547 img_hdr_ptr = (struct image_hdr *) (fw->data +
2548 (sizeof(struct flash_file_hdr_g3) +
2549 i * sizeof(struct image_hdr)));
2550 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2551 status = be_flash_data(adapter, fw, &flash_cmd,
2554 } else if ((adapter->generation == BE_GEN2) &&
2555 (get_ufigen_type(fhdr) == BE_GEN2)) {
2556 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2558 dev_err(&adapter->pdev->dev,
2559 "UFI and Interface are not compatible for flashing\n");
2563 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2566 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2570 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2573 release_firmware(fw);
2577 static struct net_device_ops be_netdev_ops = {
2578 .ndo_open = be_open,
2579 .ndo_stop = be_close,
2580 .ndo_start_xmit = be_xmit,
2581 .ndo_set_rx_mode = be_set_multicast_list,
2582 .ndo_set_mac_address = be_mac_addr_set,
2583 .ndo_change_mtu = be_change_mtu,
2584 .ndo_validate_addr = eth_validate_addr,
2585 .ndo_vlan_rx_register = be_vlan_register,
2586 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2587 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2588 .ndo_set_vf_mac = be_set_vf_mac,
2589 .ndo_set_vf_vlan = be_set_vf_vlan,
2590 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2591 .ndo_get_vf_config = be_get_vf_config
2594 static void be_netdev_init(struct net_device *netdev)
2596 struct be_adapter *adapter = netdev_priv(netdev);
2597 struct be_rx_obj *rxo;
2600 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2601 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2603 NETIF_F_GRO | NETIF_F_TSO6;
2605 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2608 if (lancer_chip(adapter))
2609 netdev->vlan_features |= NETIF_F_TSO6;
2611 netdev->flags |= IFF_MULTICAST;
2613 adapter->rx_csum = true;
2615 /* Default settings for Rx and Tx flow control */
2616 adapter->rx_fc = true;
2617 adapter->tx_fc = true;
2619 netif_set_gso_max_size(netdev, 65535);
2621 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2623 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2625 for_all_rx_queues(adapter, rxo, i)
2626 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2632 netif_stop_queue(netdev);
2635 static void be_unmap_pci_bars(struct be_adapter *adapter)
2638 iounmap(adapter->csr);
2640 iounmap(adapter->db);
2641 if (adapter->pcicfg && be_physfn(adapter))
2642 iounmap(adapter->pcicfg);
2645 static int be_map_pci_bars(struct be_adapter *adapter)
2648 int pcicfg_reg, db_reg;
2650 if (lancer_chip(adapter)) {
2651 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652 pci_resource_len(adapter->pdev, 0));
2659 if (be_physfn(adapter)) {
2660 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2661 pci_resource_len(adapter->pdev, 2));
2664 adapter->csr = addr;
2667 if (adapter->generation == BE_GEN2) {
2672 if (be_physfn(adapter))
2677 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2678 pci_resource_len(adapter->pdev, db_reg));
2683 if (be_physfn(adapter)) {
2684 addr = ioremap_nocache(
2685 pci_resource_start(adapter->pdev, pcicfg_reg),
2686 pci_resource_len(adapter->pdev, pcicfg_reg));
2689 adapter->pcicfg = addr;
2691 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2695 be_unmap_pci_bars(adapter);
2700 static void be_ctrl_cleanup(struct be_adapter *adapter)
2702 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2704 be_unmap_pci_bars(adapter);
2707 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2710 mem = &adapter->mc_cmd_mem;
2712 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2716 static int be_ctrl_init(struct be_adapter *adapter)
2718 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2719 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2720 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2723 status = be_map_pci_bars(adapter);
2727 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2729 mbox_mem_alloc->size,
2730 &mbox_mem_alloc->dma,
2732 if (!mbox_mem_alloc->va) {
2734 goto unmap_pci_bars;
2737 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2738 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2739 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2740 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2742 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2743 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2744 mc_cmd_mem->size, &mc_cmd_mem->dma,
2746 if (mc_cmd_mem->va == NULL) {
2750 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2752 mutex_init(&adapter->mbox_lock);
2753 spin_lock_init(&adapter->mcc_lock);
2754 spin_lock_init(&adapter->mcc_cq_lock);
2756 init_completion(&adapter->flash_compl);
2757 pci_save_state(adapter->pdev);
2761 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2762 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2765 be_unmap_pci_bars(adapter);
2771 static void be_stats_cleanup(struct be_adapter *adapter)
2773 struct be_dma_mem *cmd = &adapter->stats_cmd;
2776 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2780 static int be_stats_init(struct be_adapter *adapter)
2782 struct be_dma_mem *cmd = &adapter->stats_cmd;
2784 cmd->size = sizeof(struct be_cmd_req_get_stats);
2785 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2787 if (cmd->va == NULL)
2789 memset(cmd->va, 0, cmd->size);
2793 static void __devexit be_remove(struct pci_dev *pdev)
2795 struct be_adapter *adapter = pci_get_drvdata(pdev);
2800 cancel_delayed_work_sync(&adapter->work);
2802 unregister_netdev(adapter->netdev);
2806 be_stats_cleanup(adapter);
2808 be_ctrl_cleanup(adapter);
2810 be_sriov_disable(adapter);
2812 be_msix_disable(adapter);
2814 pci_set_drvdata(pdev, NULL);
2815 pci_release_regions(pdev);
2816 pci_disable_device(pdev);
2818 free_netdev(adapter->netdev);
2821 static int be_get_config(struct be_adapter *adapter)
2826 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2830 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2831 &adapter->function_mode, &adapter->function_caps);
2835 memset(mac, 0, ETH_ALEN);
2837 if (be_physfn(adapter)) {
2838 status = be_cmd_mac_addr_query(adapter, mac,
2839 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2844 if (!is_valid_ether_addr(mac))
2845 return -EADDRNOTAVAIL;
2847 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2848 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2851 if (adapter->function_mode & 0x400)
2852 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2854 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2859 static int be_dev_family_check(struct be_adapter *adapter)
2861 struct pci_dev *pdev = adapter->pdev;
2862 u32 sli_intf = 0, if_type;
2864 switch (pdev->device) {
2867 adapter->generation = BE_GEN2;
2871 adapter->generation = BE_GEN3;
2874 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2875 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2876 SLI_INTF_IF_TYPE_SHIFT;
2878 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2880 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2884 dev_err(&pdev->dev, "VFs not supported\n");
2887 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2888 SLI_INTF_FAMILY_SHIFT);
2889 adapter->generation = BE_GEN3;
2892 adapter->generation = 0;
2897 static int __devinit be_probe(struct pci_dev *pdev,
2898 const struct pci_device_id *pdev_id)
2901 struct be_adapter *adapter;
2902 struct net_device *netdev;
2904 status = pci_enable_device(pdev);
2908 status = pci_request_regions(pdev, DRV_NAME);
2911 pci_set_master(pdev);
2913 netdev = alloc_etherdev(sizeof(struct be_adapter));
2914 if (netdev == NULL) {
2918 adapter = netdev_priv(netdev);
2919 adapter->pdev = pdev;
2920 pci_set_drvdata(pdev, adapter);
2922 status = be_dev_family_check(adapter);
2926 adapter->netdev = netdev;
2927 SET_NETDEV_DEV(netdev, &pdev->dev);
2929 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2931 netdev->features |= NETIF_F_HIGHDMA;
2933 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2935 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2940 be_sriov_enable(adapter);
2942 status = be_ctrl_init(adapter);
2946 /* sync up with fw's ready state */
2947 if (be_physfn(adapter)) {
2948 status = be_cmd_POST(adapter);
2953 /* tell fw we're ready to fire cmds */
2954 status = be_cmd_fw_init(adapter);
2958 if (be_physfn(adapter)) {
2959 status = be_cmd_reset_function(adapter);
2964 status = be_stats_init(adapter);
2968 status = be_get_config(adapter);
2972 be_msix_enable(adapter);
2974 INIT_DELAYED_WORK(&adapter->work, be_worker);
2976 status = be_setup(adapter);
2980 be_netdev_init(netdev);
2981 status = register_netdev(netdev);
2984 netif_carrier_off(netdev);
2986 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2987 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2993 be_msix_disable(adapter);
2995 be_stats_cleanup(adapter);
2997 be_ctrl_cleanup(adapter);
2999 be_sriov_disable(adapter);
3000 free_netdev(netdev);
3001 pci_set_drvdata(pdev, NULL);
3003 pci_release_regions(pdev);
3005 pci_disable_device(pdev);
3007 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3011 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3013 struct be_adapter *adapter = pci_get_drvdata(pdev);
3014 struct net_device *netdev = adapter->netdev;
3017 be_setup_wol(adapter, true);
3019 netif_device_detach(netdev);
3020 if (netif_running(netdev)) {
3025 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3028 pci_save_state(pdev);
3029 pci_disable_device(pdev);
3030 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3034 static int be_resume(struct pci_dev *pdev)
3037 struct be_adapter *adapter = pci_get_drvdata(pdev);
3038 struct net_device *netdev = adapter->netdev;
3040 netif_device_detach(netdev);
3042 status = pci_enable_device(pdev);
3046 pci_set_power_state(pdev, 0);
3047 pci_restore_state(pdev);
3049 /* tell fw we're ready to fire cmds */
3050 status = be_cmd_fw_init(adapter);
3055 if (netif_running(netdev)) {
3060 netif_device_attach(netdev);
3063 be_setup_wol(adapter, false);
3068 * An FLR will stop BE from DMAing any data.
3070 static void be_shutdown(struct pci_dev *pdev)
3072 struct be_adapter *adapter = pci_get_drvdata(pdev);
3073 struct net_device *netdev = adapter->netdev;
3075 netif_device_detach(netdev);
3077 be_cmd_reset_function(adapter);
3080 be_setup_wol(adapter, true);
3082 pci_disable_device(pdev);
3085 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3086 pci_channel_state_t state)
3088 struct be_adapter *adapter = pci_get_drvdata(pdev);
3089 struct net_device *netdev = adapter->netdev;
3091 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3093 adapter->eeh_err = true;
3095 netif_device_detach(netdev);
3097 if (netif_running(netdev)) {
3104 if (state == pci_channel_io_perm_failure)
3105 return PCI_ERS_RESULT_DISCONNECT;
3107 pci_disable_device(pdev);
3109 return PCI_ERS_RESULT_NEED_RESET;
3112 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3114 struct be_adapter *adapter = pci_get_drvdata(pdev);
3117 dev_info(&adapter->pdev->dev, "EEH reset\n");
3118 adapter->eeh_err = false;
3120 status = pci_enable_device(pdev);
3122 return PCI_ERS_RESULT_DISCONNECT;
3124 pci_set_master(pdev);
3125 pci_set_power_state(pdev, 0);
3126 pci_restore_state(pdev);
3128 /* Check if card is ok and fw is ready */
3129 status = be_cmd_POST(adapter);
3131 return PCI_ERS_RESULT_DISCONNECT;
3133 return PCI_ERS_RESULT_RECOVERED;
3136 static void be_eeh_resume(struct pci_dev *pdev)
3139 struct be_adapter *adapter = pci_get_drvdata(pdev);
3140 struct net_device *netdev = adapter->netdev;
3142 dev_info(&adapter->pdev->dev, "EEH resume\n");
3144 pci_save_state(pdev);
3146 /* tell fw we're ready to fire cmds */
3147 status = be_cmd_fw_init(adapter);
3151 status = be_setup(adapter);
3155 if (netif_running(netdev)) {
3156 status = be_open(netdev);
3160 netif_device_attach(netdev);
3163 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3166 static struct pci_error_handlers be_eeh_handlers = {
3167 .error_detected = be_eeh_err_detected,
3168 .slot_reset = be_eeh_reset,
3169 .resume = be_eeh_resume,
3172 static struct pci_driver be_driver = {
3174 .id_table = be_dev_ids,
3176 .remove = be_remove,
3177 .suspend = be_suspend,
3178 .resume = be_resume,
3179 .shutdown = be_shutdown,
3180 .err_handler = &be_eeh_handlers
3183 static int __init be_init_module(void)
3185 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3186 rx_frag_size != 2048) {
3187 printk(KERN_WARNING DRV_NAME
3188 " : Module param rx_frag_size must be 2048/4096/8192."
3190 rx_frag_size = 2048;
3194 printk(KERN_WARNING DRV_NAME
3195 " : Module param num_vfs must not be greater than 32."
3200 return pci_register_driver(&be_driver);
3202 module_init(be_init_module);
3204 static void __exit be_exit_module(void)
3206 pci_unregister_driver(&be_driver);
3208 module_exit(be_exit_module);