1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #include <linux/if_vlan.h>
45 #include <linux/pci.h>
46 #include <linux/tcp.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include "vxge-main.h"
53 MODULE_LICENSE("Dual BSD/GPL");
54 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter");
57 static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
65 MODULE_DEVICE_TABLE(pci, vxge_id_table);
67 VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
68 VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
69 VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
70 VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
71 VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
72 VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
74 static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
75 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
76 static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
77 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
78 module_param_array(bw_percentage, uint, NULL, 0);
80 static struct vxge_drv_config *driver_config;
82 static inline int is_vxge_card_up(struct vxgedev *vdev)
84 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
87 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
89 unsigned long flags = 0;
90 struct sk_buff **skb_ptr = NULL;
91 struct sk_buff **temp;
92 #define NR_SKB_COMPLETED 128
93 struct sk_buff *completed[NR_SKB_COMPLETED];
100 if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
101 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
102 NR_SKB_COMPLETED, &more);
103 spin_unlock_irqrestore(&fifo->tx_lock, flags);
106 for (temp = completed; temp != skb_ptr; temp++)
107 dev_kfree_skb_irq(*temp);
111 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
115 /* Complete all transmits */
116 for (i = 0; i < vdev->no_of_vpath; i++)
117 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
120 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
123 struct vxge_ring *ring;
125 /* Complete all receives*/
126 for (i = 0; i < vdev->no_of_vpath; i++) {
127 ring = &vdev->vpaths[i].ring;
128 vxge_hw_vpath_poll_rx(ring->handle);
133 * MultiQ manipulation helper functions
135 void vxge_stop_all_tx_queue(struct vxgedev *vdev)
138 struct net_device *dev = vdev->ndev;
140 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
141 for (i = 0; i < vdev->no_of_vpath; i++)
142 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP;
144 netif_tx_stop_all_queues(dev);
147 void vxge_stop_tx_queue(struct vxge_fifo *fifo)
149 struct net_device *dev = fifo->ndev;
151 struct netdev_queue *txq = NULL;
152 if (fifo->tx_steering_type == TX_MULTIQ_STEERING)
153 txq = netdev_get_tx_queue(dev, fifo->driver_id);
155 txq = netdev_get_tx_queue(dev, 0);
156 fifo->queue_state = VPATH_QUEUE_STOP;
159 netif_tx_stop_queue(txq);
162 void vxge_start_all_tx_queue(struct vxgedev *vdev)
165 struct net_device *dev = vdev->ndev;
167 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
168 for (i = 0; i < vdev->no_of_vpath; i++)
169 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
171 netif_tx_start_all_queues(dev);
174 static void vxge_wake_all_tx_queue(struct vxgedev *vdev)
177 struct net_device *dev = vdev->ndev;
179 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
180 for (i = 0; i < vdev->no_of_vpath; i++)
181 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
183 netif_tx_wake_all_queues(dev);
186 void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb)
188 struct net_device *dev = fifo->ndev;
190 int vpath_no = fifo->driver_id;
191 struct netdev_queue *txq = NULL;
192 if (fifo->tx_steering_type == TX_MULTIQ_STEERING) {
193 txq = netdev_get_tx_queue(dev, vpath_no);
194 if (netif_tx_queue_stopped(txq))
195 netif_tx_wake_queue(txq);
197 txq = netdev_get_tx_queue(dev, 0);
198 if (fifo->queue_state == VPATH_QUEUE_STOP)
199 if (netif_tx_queue_stopped(txq)) {
200 fifo->queue_state = VPATH_QUEUE_START;
201 netif_tx_wake_queue(txq);
207 * vxge_callback_link_up
209 * This function is called during interrupt context to notify link up state
213 vxge_callback_link_up(struct __vxge_hw_device *hldev)
215 struct net_device *dev = hldev->ndev;
216 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
218 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
219 vdev->ndev->name, __func__, __LINE__);
220 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
221 vdev->stats.link_up++;
223 netif_carrier_on(vdev->ndev);
224 vxge_wake_all_tx_queue(vdev);
226 vxge_debug_entryexit(VXGE_TRACE,
227 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
231 * vxge_callback_link_down
233 * This function is called during interrupt context to notify link down state
237 vxge_callback_link_down(struct __vxge_hw_device *hldev)
239 struct net_device *dev = hldev->ndev;
240 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
242 vxge_debug_entryexit(VXGE_TRACE,
243 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
244 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
246 vdev->stats.link_down++;
247 netif_carrier_off(vdev->ndev);
248 vxge_stop_all_tx_queue(vdev);
250 vxge_debug_entryexit(VXGE_TRACE,
251 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
259 static struct sk_buff*
260 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
262 struct net_device *dev;
264 struct vxge_rx_priv *rx_priv;
267 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
268 ring->ndev->name, __func__, __LINE__);
270 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
272 /* try to allocate skb first. this one may fail */
273 skb = netdev_alloc_skb(dev, skb_size +
274 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
276 vxge_debug_mem(VXGE_ERR,
277 "%s: out of memory to allocate SKB", dev->name);
278 ring->stats.skb_alloc_fail++;
282 vxge_debug_mem(VXGE_TRACE,
283 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
284 __func__, __LINE__, skb);
286 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
289 rx_priv->skb_data = NULL;
290 rx_priv->data_size = skb_size;
291 vxge_debug_entryexit(VXGE_TRACE,
292 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
300 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
302 struct vxge_rx_priv *rx_priv;
305 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
306 ring->ndev->name, __func__, __LINE__);
307 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
309 rx_priv->skb_data = rx_priv->skb->data;
310 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
311 rx_priv->data_size, PCI_DMA_FROMDEVICE);
313 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
314 ring->stats.pci_map_fail++;
317 vxge_debug_mem(VXGE_TRACE,
318 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
319 ring->ndev->name, __func__, __LINE__,
320 (unsigned long long)dma_addr);
321 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
323 rx_priv->data_dma = dma_addr;
324 vxge_debug_entryexit(VXGE_TRACE,
325 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
331 * vxge_rx_initial_replenish
332 * Allocation of RxD as an initial replenish procedure.
334 static enum vxge_hw_status
335 vxge_rx_initial_replenish(void *dtrh, void *userdata)
337 struct vxge_ring *ring = (struct vxge_ring *)userdata;
338 struct vxge_rx_priv *rx_priv;
340 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
341 ring->ndev->name, __func__, __LINE__);
342 if (vxge_rx_alloc(dtrh, ring,
343 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
346 if (vxge_rx_map(dtrh, ring)) {
347 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
348 dev_kfree_skb(rx_priv->skb);
352 vxge_debug_entryexit(VXGE_TRACE,
353 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
359 vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
360 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
363 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
364 ring->ndev->name, __func__, __LINE__);
365 skb_record_rx_queue(skb, ring->driver_id);
366 skb->protocol = eth_type_trans(skb, ring->ndev);
368 ring->stats.rx_frms++;
369 ring->stats.rx_bytes += pkt_length;
371 if (skb->pkt_type == PACKET_MULTICAST)
372 ring->stats.rx_mcast++;
374 vxge_debug_rx(VXGE_TRACE,
375 "%s: %s:%d skb protocol = %d",
376 ring->ndev->name, __func__, __LINE__, skb->protocol);
378 if (ring->gro_enable) {
379 if (ring->vlgrp && ext_info->vlan &&
380 (ring->vlan_tag_strip ==
381 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
382 vlan_gro_receive(ring->napi_p, ring->vlgrp,
383 ext_info->vlan, skb);
385 napi_gro_receive(ring->napi_p, skb);
387 if (ring->vlgrp && vlan &&
388 (ring->vlan_tag_strip ==
389 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
390 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
392 netif_receive_skb(skb);
394 vxge_debug_entryexit(VXGE_TRACE,
395 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
398 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
399 struct vxge_rx_priv *rx_priv)
401 pci_dma_sync_single_for_device(ring->pdev,
402 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
404 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
405 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
408 static inline void vxge_post(int *dtr_cnt, void **first_dtr,
409 void *post_dtr, struct __vxge_hw_ring *ringh)
411 int dtr_count = *dtr_cnt;
412 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
414 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
415 *first_dtr = post_dtr;
417 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
419 *dtr_cnt = dtr_count;
425 * If the interrupt is because of a received frame or if the receive ring
426 * contains fresh as yet un-processed frames, this function is called.
429 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
430 u8 t_code, void *userdata)
432 struct vxge_ring *ring = (struct vxge_ring *)userdata;
433 struct net_device *dev = ring->ndev;
434 unsigned int dma_sizes;
435 void *first_dtr = NULL;
441 struct vxge_rx_priv *rx_priv;
442 struct vxge_hw_ring_rxd_info ext_info;
443 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
444 ring->ndev->name, __func__, __LINE__);
445 ring->pkts_processed = 0;
447 vxge_hw_ring_replenish(ringh, 0);
450 prefetch((char *)dtr + L1_CACHE_BYTES);
451 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
453 data_size = rx_priv->data_size;
454 data_dma = rx_priv->data_dma;
455 prefetch(rx_priv->skb_data);
457 vxge_debug_rx(VXGE_TRACE,
458 "%s: %s:%d skb = 0x%p",
459 ring->ndev->name, __func__, __LINE__, skb);
461 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
462 pkt_length = dma_sizes;
464 pkt_length -= ETH_FCS_LEN;
466 vxge_debug_rx(VXGE_TRACE,
467 "%s: %s:%d Packet Length = %d",
468 ring->ndev->name, __func__, __LINE__, pkt_length);
470 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
472 /* check skb validity */
475 prefetch((char *)skb + L1_CACHE_BYTES);
476 if (unlikely(t_code)) {
478 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
481 ring->stats.rx_errors++;
482 vxge_debug_rx(VXGE_TRACE,
483 "%s: %s :%d Rx T_code is %d",
484 ring->ndev->name, __func__,
487 /* If the t_code is not supported and if the
488 * t_code is other than 0x5 (unparseable packet
489 * such as unknown UPV6 header), Drop it !!!
491 vxge_re_pre_post(dtr, ring, rx_priv);
493 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
494 ring->stats.rx_dropped++;
499 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
501 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
503 if (!vxge_rx_map(dtr, ring)) {
504 skb_put(skb, pkt_length);
506 pci_unmap_single(ring->pdev, data_dma,
507 data_size, PCI_DMA_FROMDEVICE);
509 vxge_hw_ring_rxd_pre_post(ringh, dtr);
510 vxge_post(&dtr_cnt, &first_dtr, dtr,
513 dev_kfree_skb(rx_priv->skb);
515 rx_priv->data_size = data_size;
516 vxge_re_pre_post(dtr, ring, rx_priv);
518 vxge_post(&dtr_cnt, &first_dtr, dtr,
520 ring->stats.rx_dropped++;
524 vxge_re_pre_post(dtr, ring, rx_priv);
526 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
527 ring->stats.rx_dropped++;
531 struct sk_buff *skb_up;
533 skb_up = netdev_alloc_skb(dev, pkt_length +
534 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
535 if (skb_up != NULL) {
537 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
539 pci_dma_sync_single_for_cpu(ring->pdev,
543 vxge_debug_mem(VXGE_TRACE,
544 "%s: %s:%d skb_up = %p",
545 ring->ndev->name, __func__,
547 memcpy(skb_up->data, skb->data, pkt_length);
549 vxge_re_pre_post(dtr, ring, rx_priv);
551 vxge_post(&dtr_cnt, &first_dtr, dtr,
553 /* will netif_rx small SKB instead */
555 skb_put(skb, pkt_length);
557 vxge_re_pre_post(dtr, ring, rx_priv);
559 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
560 vxge_debug_rx(VXGE_ERR,
561 "%s: vxge_rx_1b_compl: out of "
562 "memory", dev->name);
563 ring->stats.skb_alloc_fail++;
568 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
569 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
570 ring->rx_csum && /* Offload Rx side CSUM */
571 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
572 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
573 skb->ip_summed = CHECKSUM_UNNECESSARY;
575 skb->ip_summed = CHECKSUM_NONE;
577 vxge_rx_complete(ring, skb, ext_info.vlan,
578 pkt_length, &ext_info);
581 ring->pkts_processed++;
585 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
586 &t_code) == VXGE_HW_OK);
589 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
591 vxge_debug_entryexit(VXGE_TRACE,
600 * If an interrupt was raised to indicate DMA complete of the Tx packet,
601 * this function is called. It identifies the last TxD whose buffer was
602 * freed and frees all skbs whose data have already DMA'ed into the NICs
606 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
607 enum vxge_hw_fifo_tcode t_code, void *userdata,
608 struct sk_buff ***skb_ptr, int nr_skb, int *more)
610 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
611 struct sk_buff *skb, **done_skb = *skb_ptr;
614 vxge_debug_entryexit(VXGE_TRACE,
615 "%s:%d Entered....", __func__, __LINE__);
621 struct vxge_tx_priv *txd_priv =
622 vxge_hw_fifo_txdl_private_get(dtr);
625 frg_cnt = skb_shinfo(skb)->nr_frags;
626 frag = &skb_shinfo(skb)->frags[0];
628 vxge_debug_tx(VXGE_TRACE,
629 "%s: %s:%d fifo_hw = %p dtr = %p "
630 "tcode = 0x%x", fifo->ndev->name, __func__,
631 __LINE__, fifo_hw, dtr, t_code);
632 /* check skb validity */
634 vxge_debug_tx(VXGE_TRACE,
635 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
636 fifo->ndev->name, __func__, __LINE__,
637 skb, txd_priv, frg_cnt);
638 if (unlikely(t_code)) {
639 fifo->stats.tx_errors++;
640 vxge_debug_tx(VXGE_ERR,
641 "%s: tx: dtr %p completed due to "
642 "error t_code %01x", fifo->ndev->name,
644 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
647 /* for unfragmented skb */
648 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
649 skb_headlen(skb), PCI_DMA_TODEVICE);
651 for (j = 0; j < frg_cnt; j++) {
652 pci_unmap_page(fifo->pdev,
653 txd_priv->dma_buffers[i++],
654 frag->size, PCI_DMA_TODEVICE);
658 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
660 /* Updating the statistics block */
661 fifo->stats.tx_frms++;
662 fifo->stats.tx_bytes += skb->len;
672 if (pkt_cnt > fifo->indicate_max_pkts)
675 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
676 &dtr, &t_code) == VXGE_HW_OK);
679 vxge_wake_tx_queue(fifo, skb);
681 vxge_debug_entryexit(VXGE_TRACE,
682 "%s: %s:%d Exiting...",
683 fifo->ndev->name, __func__, __LINE__);
687 /* select a vpath to transmit the packet */
688 static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
691 u16 queue_len, counter = 0;
692 if (skb->protocol == htons(ETH_P_IP)) {
698 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
699 th = (struct tcphdr *)(((unsigned char *)ip) +
702 queue_len = vdev->no_of_vpath;
703 counter = (ntohs(th->source) +
705 vdev->vpath_selector[queue_len - 1];
706 if (counter >= queue_len)
707 counter = queue_len - 1;
709 if (ip->protocol == IPPROTO_UDP) {
719 static enum vxge_hw_status vxge_search_mac_addr_in_list(
720 struct vxge_vpath *vpath, u64 del_mac)
722 struct list_head *entry, *next;
723 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
724 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
730 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
732 struct macInfo mac_info;
733 u8 *mac_address = NULL;
734 u64 mac_addr = 0, vpath_vector = 0;
736 enum vxge_hw_status status = VXGE_HW_OK;
737 struct vxge_vpath *vpath = NULL;
738 struct __vxge_hw_device *hldev;
740 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
742 mac_address = (u8 *)&mac_addr;
743 memcpy(mac_address, mac_header, ETH_ALEN);
745 /* Is this mac address already in the list? */
746 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
747 vpath = &vdev->vpaths[vpath_idx];
748 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
752 memset(&mac_info, 0, sizeof(struct macInfo));
753 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
755 /* Any vpath has room to add mac address to its da table? */
756 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
757 vpath = &vdev->vpaths[vpath_idx];
758 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
759 /* Add this mac address to this vpath */
760 mac_info.vpath_no = vpath_idx;
761 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
762 status = vxge_add_mac_addr(vdev, &mac_info);
763 if (status != VXGE_HW_OK)
769 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
771 mac_info.vpath_no = vpath_idx;
772 /* Is the first vpath already selected as catch-basin ? */
773 vpath = &vdev->vpaths[vpath_idx];
774 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
775 /* Add this mac address to this vpath */
776 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
781 /* Select first vpath as catch-basin */
782 vpath_vector = vxge_mBIT(vpath->device_id);
783 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
784 vxge_hw_mgmt_reg_type_mrpcim,
787 struct vxge_hw_mrpcim_reg,
790 if (status != VXGE_HW_OK) {
791 vxge_debug_tx(VXGE_ERR,
792 "%s: Unable to set the vpath-%d in catch-basin mode",
793 VXGE_DRIVER_NAME, vpath->device_id);
797 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
805 * @skb : the socket buffer containing the Tx data.
806 * @dev : device pointer.
808 * This function is the Tx entry point of the driver. Neterion NIC supports
809 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
810 * NOTE: when device cant queue the pkt, just the trans_start variable will
814 vxge_xmit(struct sk_buff *skb, struct net_device *dev)
816 struct vxge_fifo *fifo = NULL;
819 struct vxgedev *vdev = NULL;
820 enum vxge_hw_status status;
821 int frg_cnt, first_frg_len;
823 int i = 0, j = 0, avail;
825 struct vxge_tx_priv *txdl_priv = NULL;
826 struct __vxge_hw_fifo *fifo_hw;
828 unsigned long flags = 0;
830 int do_spin_tx_lock = 1;
832 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
833 dev->name, __func__, __LINE__);
835 /* A buffer with no data will be dropped */
836 if (unlikely(skb->len <= 0)) {
837 vxge_debug_tx(VXGE_ERR,
838 "%s: Buffer has no data..", dev->name);
843 vdev = (struct vxgedev *)netdev_priv(dev);
845 if (unlikely(!is_vxge_card_up(vdev))) {
846 vxge_debug_tx(VXGE_ERR,
847 "%s: vdev not initialized", dev->name);
852 if (vdev->config.addr_learn_en) {
853 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
854 if (vpath_no == -EPERM) {
855 vxge_debug_tx(VXGE_ERR,
856 "%s: Failed to store the mac address",
863 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
864 vpath_no = skb_get_queue_mapping(skb);
865 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
866 vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock);
868 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
870 if (vpath_no >= vdev->no_of_vpath)
873 fifo = &vdev->vpaths[vpath_no].fifo;
874 fifo_hw = fifo->handle;
877 spin_lock_irqsave(&fifo->tx_lock, flags);
879 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
880 return NETDEV_TX_LOCKED;
883 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) {
884 if (netif_subqueue_stopped(dev, skb)) {
885 spin_unlock_irqrestore(&fifo->tx_lock, flags);
886 return NETDEV_TX_BUSY;
888 } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) {
889 if (netif_queue_stopped(dev)) {
890 spin_unlock_irqrestore(&fifo->tx_lock, flags);
891 return NETDEV_TX_BUSY;
894 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
896 vxge_debug_tx(VXGE_ERR,
897 "%s: No free TXDs available", dev->name);
898 fifo->stats.txd_not_free++;
899 vxge_stop_tx_queue(fifo);
903 /* Last TXD? Stop tx queue to avoid dropping packets. TX
904 * completion will resume the queue.
907 vxge_stop_tx_queue(fifo);
909 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
910 if (unlikely(status != VXGE_HW_OK)) {
911 vxge_debug_tx(VXGE_ERR,
912 "%s: Out of descriptors .", dev->name);
913 fifo->stats.txd_out_of_desc++;
914 vxge_stop_tx_queue(fifo);
918 vxge_debug_tx(VXGE_TRACE,
919 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
920 dev->name, __func__, __LINE__,
921 fifo_hw, dtr, dtr_priv);
923 if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
924 u16 vlan_tag = vlan_tx_tag_get(skb);
925 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
928 first_frg_len = skb_headlen(skb);
930 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
933 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
934 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
935 vxge_stop_tx_queue(fifo);
936 fifo->stats.pci_map_fail++;
940 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
941 txdl_priv->skb = skb;
942 txdl_priv->dma_buffers[j] = dma_pointer;
944 frg_cnt = skb_shinfo(skb)->nr_frags;
945 vxge_debug_tx(VXGE_TRACE,
946 "%s: %s:%d skb = %p txdl_priv = %p "
947 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
948 __func__, __LINE__, skb, txdl_priv,
949 frg_cnt, (unsigned long long)dma_pointer);
951 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
954 frag = &skb_shinfo(skb)->frags[0];
955 for (i = 0; i < frg_cnt; i++) {
956 /* ignore 0 length fragment */
961 (u64)pci_map_page(fifo->pdev, frag->page,
962 frag->page_offset, frag->size,
965 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
967 vxge_debug_tx(VXGE_TRACE,
968 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
969 dev->name, __func__, __LINE__, i,
970 (unsigned long long)dma_pointer);
972 txdl_priv->dma_buffers[j] = dma_pointer;
973 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
978 offload_type = vxge_offload_type(skb);
980 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
982 int mss = vxge_tcp_mss(skb);
984 vxge_debug_tx(VXGE_TRACE,
985 "%s: %s:%d mss = %d",
986 dev->name, __func__, __LINE__, mss);
987 vxge_hw_fifo_txdl_mss_set(dtr, mss);
989 vxge_assert(skb->len <=
990 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
996 if (skb->ip_summed == CHECKSUM_PARTIAL)
997 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
998 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
999 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
1000 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
1002 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
1004 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1006 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1008 VXGE_COMPLETE_VPATH_TX(fifo);
1009 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
1010 dev->name, __func__, __LINE__);
1011 return NETDEV_TX_OK;
1014 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
1018 frag = &skb_shinfo(skb)->frags[0];
1020 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
1021 skb_headlen(skb), PCI_DMA_TODEVICE);
1023 for (; j < i; j++) {
1024 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
1025 frag->size, PCI_DMA_TODEVICE);
1029 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
1032 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1033 VXGE_COMPLETE_VPATH_TX(fifo);
1035 return NETDEV_TX_OK;
1041 * Function will be called by hw function to abort all outstanding receive
1045 vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1047 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1048 struct vxge_rx_priv *rx_priv =
1049 vxge_hw_ring_rxd_private_get(dtrh);
1051 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1052 ring->ndev->name, __func__, __LINE__);
1053 if (state != VXGE_HW_RXD_STATE_POSTED)
1056 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1057 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1059 dev_kfree_skb(rx_priv->skb);
1060 rx_priv->skb_data = NULL;
1062 vxge_debug_entryexit(VXGE_TRACE,
1063 "%s: %s:%d Exiting...",
1064 ring->ndev->name, __func__, __LINE__);
1070 * Function will be called to abort all outstanding tx descriptors
1073 vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1075 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1077 int i = 0, j, frg_cnt;
1078 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1079 struct sk_buff *skb = txd_priv->skb;
1081 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1083 if (state != VXGE_HW_TXDL_STATE_POSTED)
1086 /* check skb validity */
1088 frg_cnt = skb_shinfo(skb)->nr_frags;
1089 frag = &skb_shinfo(skb)->frags[0];
1091 /* for unfragmented skb */
1092 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1093 skb_headlen(skb), PCI_DMA_TODEVICE);
1095 for (j = 0; j < frg_cnt; j++) {
1096 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1097 frag->size, PCI_DMA_TODEVICE);
1103 vxge_debug_entryexit(VXGE_TRACE,
1104 "%s:%d Exiting...", __func__, __LINE__);
1108 * vxge_set_multicast
1109 * @dev: pointer to the device structure
1111 * Entry point for multicast address enable/disable
1112 * This function is a driver entry point which gets called by the kernel
1113 * whenever multicast addresses must be enabled/disabled. This also gets
1114 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1115 * determine, if multicast address must be enabled or if promiscuous mode
1116 * is to be disabled etc.
1118 static void vxge_set_multicast(struct net_device *dev)
1120 struct dev_mc_list *mclist;
1121 struct vxgedev *vdev;
1122 int i, mcast_cnt = 0;
1123 struct __vxge_hw_device *hldev;
1124 enum vxge_hw_status status = VXGE_HW_OK;
1125 struct macInfo mac_info;
1127 struct vxge_mac_addrs *mac_entry;
1128 struct list_head *list_head;
1129 struct list_head *entry, *next;
1130 u8 *mac_address = NULL;
1132 vxge_debug_entryexit(VXGE_TRACE,
1133 "%s:%d", __func__, __LINE__);
1135 vdev = (struct vxgedev *)netdev_priv(dev);
1136 hldev = (struct __vxge_hw_device *)vdev->devh;
1138 if (unlikely(!is_vxge_card_up(vdev)))
1141 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1142 for (i = 0; i < vdev->no_of_vpath; i++) {
1143 vxge_assert(vdev->vpaths[i].is_open);
1144 status = vxge_hw_vpath_mcast_enable(
1145 vdev->vpaths[i].handle);
1146 vdev->all_multi_flg = 1;
1148 } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1149 for (i = 0; i < vdev->no_of_vpath; i++) {
1150 vxge_assert(vdev->vpaths[i].is_open);
1151 status = vxge_hw_vpath_mcast_disable(
1152 vdev->vpaths[i].handle);
1153 vdev->all_multi_flg = 1;
1157 if (status != VXGE_HW_OK)
1158 vxge_debug_init(VXGE_ERR,
1159 "failed to %s multicast, status %d",
1160 dev->flags & IFF_ALLMULTI ?
1161 "enable" : "disable", status);
1163 if (!vdev->config.addr_learn_en) {
1164 if (dev->flags & IFF_PROMISC) {
1165 for (i = 0; i < vdev->no_of_vpath; i++) {
1166 vxge_assert(vdev->vpaths[i].is_open);
1167 status = vxge_hw_vpath_promisc_enable(
1168 vdev->vpaths[i].handle);
1171 for (i = 0; i < vdev->no_of_vpath; i++) {
1172 vxge_assert(vdev->vpaths[i].is_open);
1173 status = vxge_hw_vpath_promisc_disable(
1174 vdev->vpaths[i].handle);
1179 memset(&mac_info, 0, sizeof(struct macInfo));
1180 /* Update individual M_CAST address list */
1181 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1183 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1184 list_head = &vdev->vpaths[0].mac_addr_list;
1185 if ((netdev_mc_count(dev) +
1186 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1187 vdev->vpaths[0].max_mac_addr_cnt)
1188 goto _set_all_mcast;
1190 /* Delete previous MC's */
1191 for (i = 0; i < mcast_cnt; i++) {
1192 if (!list_empty(list_head))
1193 mac_entry = (struct vxge_mac_addrs *)
1194 list_first_entry(list_head,
1195 struct vxge_mac_addrs,
1198 list_for_each_safe(entry, next, list_head) {
1200 mac_entry = (struct vxge_mac_addrs *) entry;
1201 /* Copy the mac address to delete */
1202 mac_address = (u8 *)&mac_entry->macaddr;
1203 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1205 /* Is this a multicast address */
1206 if (0x01 & mac_info.macaddr[0]) {
1207 for (vpath_idx = 0; vpath_idx <
1210 mac_info.vpath_no = vpath_idx;
1211 status = vxge_del_mac_addr(
1220 netdev_for_each_mc_addr(mclist, dev) {
1221 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
1222 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1224 mac_info.vpath_no = vpath_idx;
1225 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1226 status = vxge_add_mac_addr(vdev, &mac_info);
1227 if (status != VXGE_HW_OK) {
1228 vxge_debug_init(VXGE_ERR,
1229 "%s:%d Setting individual"
1230 "multicast address failed",
1231 __func__, __LINE__);
1232 goto _set_all_mcast;
1239 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1240 /* Delete previous MC's */
1241 for (i = 0; i < mcast_cnt; i++) {
1243 list_for_each_safe(entry, next, list_head) {
1245 mac_entry = (struct vxge_mac_addrs *) entry;
1246 /* Copy the mac address to delete */
1247 mac_address = (u8 *)&mac_entry->macaddr;
1248 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1250 /* Is this a multicast address */
1251 if (0x01 & mac_info.macaddr[0])
1255 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1257 mac_info.vpath_no = vpath_idx;
1258 status = vxge_del_mac_addr(vdev, &mac_info);
1262 /* Enable all multicast */
1263 for (i = 0; i < vdev->no_of_vpath; i++) {
1264 vxge_assert(vdev->vpaths[i].is_open);
1265 status = vxge_hw_vpath_mcast_enable(
1266 vdev->vpaths[i].handle);
1267 if (status != VXGE_HW_OK) {
1268 vxge_debug_init(VXGE_ERR,
1269 "%s:%d Enabling all multicasts failed",
1270 __func__, __LINE__);
1272 vdev->all_multi_flg = 1;
1274 dev->flags |= IFF_ALLMULTI;
1277 vxge_debug_entryexit(VXGE_TRACE,
1278 "%s:%d Exiting...", __func__, __LINE__);
1283 * @dev: pointer to the device structure
1285 * Update entry "0" (default MAC addr)
1287 static int vxge_set_mac_addr(struct net_device *dev, void *p)
1289 struct sockaddr *addr = p;
1290 struct vxgedev *vdev;
1291 struct __vxge_hw_device *hldev;
1292 enum vxge_hw_status status = VXGE_HW_OK;
1293 struct macInfo mac_info_new, mac_info_old;
1296 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1298 vdev = (struct vxgedev *)netdev_priv(dev);
1301 if (!is_valid_ether_addr(addr->sa_data))
1304 memset(&mac_info_new, 0, sizeof(struct macInfo));
1305 memset(&mac_info_old, 0, sizeof(struct macInfo));
1307 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1308 __func__, __LINE__);
1310 /* Get the old address */
1311 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1313 /* Copy the new address */
1314 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1316 /* First delete the old mac address from all the vpaths
1317 as we can't specify the index while adding new mac address */
1318 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1319 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1320 if (!vpath->is_open) {
1321 /* This can happen when this interface is added/removed
1322 to the bonding interface. Delete this station address
1323 from the linked list */
1324 vxge_mac_list_del(vpath, &mac_info_old);
1326 /* Add this new address to the linked list
1327 for later restoring */
1328 vxge_mac_list_add(vpath, &mac_info_new);
1332 /* Delete the station address */
1333 mac_info_old.vpath_no = vpath_idx;
1334 status = vxge_del_mac_addr(vdev, &mac_info_old);
1337 if (unlikely(!is_vxge_card_up(vdev))) {
1338 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1342 /* Set this mac address to all the vpaths */
1343 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1344 mac_info_new.vpath_no = vpath_idx;
1345 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1346 status = vxge_add_mac_addr(vdev, &mac_info_new);
1347 if (status != VXGE_HW_OK)
1351 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1357 * vxge_vpath_intr_enable
1358 * @vdev: pointer to vdev
1359 * @vp_id: vpath for which to enable the interrupts
1361 * Enables the interrupts for the vpath
1363 void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1365 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1366 int msix_id, alarm_msix_id;
1367 int tim_msix_id[4] = {[0 ...3] = 0};
1369 vxge_hw_vpath_intr_enable(vpath->handle);
1371 if (vdev->config.intr_type == INTA)
1372 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1374 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1376 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1378 tim_msix_id[0] = msix_id;
1379 tim_msix_id[1] = msix_id + 1;
1380 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1383 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1384 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1386 /* enable the alarm vector */
1387 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
1392 * vxge_vpath_intr_disable
1393 * @vdev: pointer to vdev
1394 * @vp_id: vpath for which to disable the interrupts
1396 * Disables the interrupts for the vpath
1398 void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1400 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1403 vxge_hw_vpath_intr_disable(vpath->handle);
1405 if (vdev->config.intr_type == INTA)
1406 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1408 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1409 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1410 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1412 /* disable the alarm vector */
1413 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1414 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1420 * @vdev: pointer to vdev
1421 * @vp_id: vpath to reset
1425 static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1427 enum vxge_hw_status status = VXGE_HW_OK;
1430 /* check if device is down already */
1431 if (unlikely(!is_vxge_card_up(vdev)))
1434 /* is device reset already scheduled */
1435 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1438 if (vdev->vpaths[vp_id].handle) {
1439 if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle)
1441 if (is_vxge_card_up(vdev) &&
1442 vxge_hw_vpath_recover_from_reset(
1443 vdev->vpaths[vp_id].handle)
1445 vxge_debug_init(VXGE_ERR,
1446 "vxge_hw_vpath_recover_from_reset"
1447 "failed for vpath:%d", vp_id);
1451 vxge_debug_init(VXGE_ERR,
1452 "vxge_hw_vpath_reset failed for"
1457 return VXGE_HW_FAIL;
1459 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1460 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1462 /* Enable all broadcast */
1463 vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle);
1465 /* Enable the interrupts */
1466 vxge_vpath_intr_enable(vdev, vp_id);
1470 /* Enable the flow of traffic through the vpath */
1471 vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle);
1474 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle);
1475 vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK;
1477 /* Vpath reset done */
1478 clear_bit(vp_id, &vdev->vp_reset);
1480 /* Start the vpath queue */
1481 vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL);
1486 static int do_vxge_reset(struct vxgedev *vdev, int event)
1488 enum vxge_hw_status status;
1489 int ret = 0, vp_id, i;
1491 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1493 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1494 /* check if device is down already */
1495 if (unlikely(!is_vxge_card_up(vdev)))
1498 /* is reset already scheduled */
1499 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1503 if (event == VXGE_LL_FULL_RESET) {
1504 /* wait for all the vpath reset to complete */
1505 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1506 while (test_bit(vp_id, &vdev->vp_reset))
1510 /* if execution mode is set to debug, don't reset the adapter */
1511 if (unlikely(vdev->exec_mode)) {
1512 vxge_debug_init(VXGE_ERR,
1513 "%s: execution mode is debug, returning..",
1515 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1516 vxge_stop_all_tx_queue(vdev);
1521 if (event == VXGE_LL_FULL_RESET) {
1522 vxge_hw_device_intr_disable(vdev->devh);
1524 switch (vdev->cric_err_event) {
1525 case VXGE_HW_EVENT_UNKNOWN:
1526 vxge_stop_all_tx_queue(vdev);
1527 vxge_debug_init(VXGE_ERR,
1528 "fatal: %s: Disabling device due to"
1533 case VXGE_HW_EVENT_RESET_START:
1535 case VXGE_HW_EVENT_RESET_COMPLETE:
1536 case VXGE_HW_EVENT_LINK_DOWN:
1537 case VXGE_HW_EVENT_LINK_UP:
1538 case VXGE_HW_EVENT_ALARM_CLEARED:
1539 case VXGE_HW_EVENT_ECCERR:
1540 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1543 case VXGE_HW_EVENT_FIFO_ERR:
1544 case VXGE_HW_EVENT_VPATH_ERR:
1546 case VXGE_HW_EVENT_CRITICAL_ERR:
1547 vxge_stop_all_tx_queue(vdev);
1548 vxge_debug_init(VXGE_ERR,
1549 "fatal: %s: Disabling device due to"
1552 /* SOP or device reset required */
1553 /* This event is not currently used */
1556 case VXGE_HW_EVENT_SERR:
1557 vxge_stop_all_tx_queue(vdev);
1558 vxge_debug_init(VXGE_ERR,
1559 "fatal: %s: Disabling device due to"
1564 case VXGE_HW_EVENT_SRPCIM_SERR:
1565 case VXGE_HW_EVENT_MRPCIM_SERR:
1568 case VXGE_HW_EVENT_SLOT_FREEZE:
1569 vxge_stop_all_tx_queue(vdev);
1570 vxge_debug_init(VXGE_ERR,
1571 "fatal: %s: Disabling device due to"
1582 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1583 vxge_stop_all_tx_queue(vdev);
1585 if (event == VXGE_LL_FULL_RESET) {
1586 status = vxge_reset_all_vpaths(vdev);
1587 if (status != VXGE_HW_OK) {
1588 vxge_debug_init(VXGE_ERR,
1589 "fatal: %s: can not reset vpaths",
1596 if (event == VXGE_LL_COMPL_RESET) {
1597 for (i = 0; i < vdev->no_of_vpath; i++)
1598 if (vdev->vpaths[i].handle) {
1599 if (vxge_hw_vpath_recover_from_reset(
1600 vdev->vpaths[i].handle)
1602 vxge_debug_init(VXGE_ERR,
1603 "vxge_hw_vpath_recover_"
1604 "from_reset failed for vpath: "
1610 vxge_debug_init(VXGE_ERR,
1611 "vxge_hw_vpath_reset failed for "
1618 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1619 /* Reprogram the DA table with populated mac addresses */
1620 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1621 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1622 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1625 /* enable vpath interrupts */
1626 for (i = 0; i < vdev->no_of_vpath; i++)
1627 vxge_vpath_intr_enable(vdev, i);
1629 vxge_hw_device_intr_enable(vdev->devh);
1633 /* Indicate card up */
1634 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1636 /* Get the traffic to flow through the vpaths */
1637 for (i = 0; i < vdev->no_of_vpath; i++) {
1638 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1640 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1643 vxge_wake_all_tx_queue(vdev);
1647 vxge_debug_entryexit(VXGE_TRACE,
1648 "%s:%d Exiting...", __func__, __LINE__);
1650 /* Indicate reset done */
1651 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1652 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1658 * @vdev: pointer to ll device
1660 * driver may reset the chip on events of serr, eccerr, etc
1662 int vxge_reset(struct vxgedev *vdev)
1664 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1669 * vxge_poll - Receive handler when Receive Polling is used.
1670 * @dev: pointer to the device structure.
1671 * @budget: Number of packets budgeted to be processed in this iteration.
1673 * This function comes into picture only if Receive side is being handled
1674 * through polling (called NAPI in linux). It mostly does what the normal
1675 * Rx interrupt handler does in terms of descriptor and packet processing
1676 * but not in an interrupt context. Also it will process a specified number
1677 * of packets at most in one iteration. This value is passed down by the
1678 * kernel as the function argument 'budget'.
1680 static int vxge_poll_msix(struct napi_struct *napi, int budget)
1682 struct vxge_ring *ring =
1683 container_of(napi, struct vxge_ring, napi);
1684 int budget_org = budget;
1685 ring->budget = budget;
1687 vxge_hw_vpath_poll_rx(ring->handle);
1689 if (ring->pkts_processed < budget_org) {
1690 napi_complete(napi);
1691 /* Re enable the Rx interrupts for the vpath */
1692 vxge_hw_channel_msix_unmask(
1693 (struct __vxge_hw_channel *)ring->handle,
1694 ring->rx_vector_no);
1697 return ring->pkts_processed;
1700 static int vxge_poll_inta(struct napi_struct *napi, int budget)
1702 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1703 int pkts_processed = 0;
1705 int budget_org = budget;
1706 struct vxge_ring *ring;
1708 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
1709 pci_get_drvdata(vdev->pdev);
1711 for (i = 0; i < vdev->no_of_vpath; i++) {
1712 ring = &vdev->vpaths[i].ring;
1713 ring->budget = budget;
1714 vxge_hw_vpath_poll_rx(ring->handle);
1715 pkts_processed += ring->pkts_processed;
1716 budget -= ring->pkts_processed;
1721 VXGE_COMPLETE_ALL_TX(vdev);
1723 if (pkts_processed < budget_org) {
1724 napi_complete(napi);
1725 /* Re enable the Rx interrupts for the ring */
1726 vxge_hw_device_unmask_all(hldev);
1727 vxge_hw_device_flush_io(hldev);
1730 return pkts_processed;
1733 #ifdef CONFIG_NET_POLL_CONTROLLER
1735 * vxge_netpoll - netpoll event handler entry point
1736 * @dev : pointer to the device structure.
1738 * This function will be called by upper layer to check for events on the
1739 * interface in situations where interrupts are disabled. It is used for
1740 * specific in-kernel networking tasks, such as remote consoles and kernel
1741 * debugging over the network (example netdump in RedHat).
1743 static void vxge_netpoll(struct net_device *dev)
1745 struct __vxge_hw_device *hldev;
1746 struct vxgedev *vdev;
1748 vdev = (struct vxgedev *)netdev_priv(dev);
1749 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1751 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1753 if (pci_channel_offline(vdev->pdev))
1756 disable_irq(dev->irq);
1757 vxge_hw_device_clear_tx_rx(hldev);
1759 vxge_hw_device_clear_tx_rx(hldev);
1760 VXGE_COMPLETE_ALL_RX(vdev);
1761 VXGE_COMPLETE_ALL_TX(vdev);
1763 enable_irq(dev->irq);
1765 vxge_debug_entryexit(VXGE_TRACE,
1766 "%s:%d Exiting...", __func__, __LINE__);
1771 /* RTH configuration */
1772 static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1774 enum vxge_hw_status status = VXGE_HW_OK;
1775 struct vxge_hw_rth_hash_types hash_types;
1776 u8 itable[256] = {0}; /* indirection table */
1777 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1782 * - itable with bucket numbers
1783 * - mtable with bucket-to-vpath mapping
1785 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1786 itable[index] = index;
1787 mtable[index] = index % vdev->no_of_vpath;
1790 /* Fill RTH hash types */
1791 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1792 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1793 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1794 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1795 hash_types.hash_type_tcpipv6ex_en =
1796 vdev->config.rth_hash_type_tcpipv6ex;
1797 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1799 /* set indirection table, bucket-to-vpath mapping */
1800 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1803 vdev->config.rth_bkt_sz);
1804 if (status != VXGE_HW_OK) {
1805 vxge_debug_init(VXGE_ERR,
1806 "RTH indirection table configuration failed "
1807 "for vpath:%d", vdev->vpaths[0].device_id);
1812 * Because the itable_set() method uses the active_table field
1813 * for the target virtual path the RTH config should be updated
1814 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1815 * when steering frames.
1817 for (index = 0; index < vdev->no_of_vpath; index++) {
1818 status = vxge_hw_vpath_rts_rth_set(
1819 vdev->vpaths[index].handle,
1820 vdev->config.rth_algorithm,
1822 vdev->config.rth_bkt_sz);
1824 if (status != VXGE_HW_OK) {
1825 vxge_debug_init(VXGE_ERR,
1826 "RTH configuration failed for vpath:%d",
1827 vdev->vpaths[index].device_id);
1835 int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1837 struct vxge_mac_addrs *new_mac_entry;
1838 u8 *mac_address = NULL;
1840 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1843 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1844 if (!new_mac_entry) {
1845 vxge_debug_mem(VXGE_ERR,
1846 "%s: memory allocation failed",
1851 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1853 /* Copy the new mac address to the list */
1854 mac_address = (u8 *)&new_mac_entry->macaddr;
1855 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1857 new_mac_entry->state = mac->state;
1858 vpath->mac_addr_cnt++;
1860 /* Is this a multicast address */
1861 if (0x01 & mac->macaddr[0])
1862 vpath->mcast_addr_cnt++;
1867 /* Add a mac address to DA table */
1868 enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1870 enum vxge_hw_status status = VXGE_HW_OK;
1871 struct vxge_vpath *vpath;
1872 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1874 if (0x01 & mac->macaddr[0]) /* multicast address */
1875 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1877 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1879 vpath = &vdev->vpaths[mac->vpath_no];
1880 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1881 mac->macmask, duplicate_mode);
1882 if (status != VXGE_HW_OK) {
1883 vxge_debug_init(VXGE_ERR,
1884 "DA config add entry failed for vpath:%d",
1887 if (FALSE == vxge_mac_list_add(vpath, mac))
1893 int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1895 struct list_head *entry, *next;
1897 u8 *mac_address = (u8 *) (&del_mac);
1899 /* Copy the mac address to delete from the list */
1900 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1902 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1903 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1905 kfree((struct vxge_mac_addrs *)entry);
1906 vpath->mac_addr_cnt--;
1908 /* Is this a multicast address */
1909 if (0x01 & mac->macaddr[0])
1910 vpath->mcast_addr_cnt--;
1917 /* delete a mac address from DA table */
1918 enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1920 enum vxge_hw_status status = VXGE_HW_OK;
1921 struct vxge_vpath *vpath;
1923 vpath = &vdev->vpaths[mac->vpath_no];
1924 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1926 if (status != VXGE_HW_OK) {
1927 vxge_debug_init(VXGE_ERR,
1928 "DA config delete entry failed for vpath:%d",
1931 vxge_mac_list_del(vpath, mac);
1935 /* list all mac addresses from DA table */
1937 static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1938 struct macInfo *mac)
1940 enum vxge_hw_status status = VXGE_HW_OK;
1941 unsigned char macmask[ETH_ALEN];
1942 unsigned char macaddr[ETH_ALEN];
1944 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1946 if (status != VXGE_HW_OK) {
1947 vxge_debug_init(VXGE_ERR,
1948 "DA config list entry failed for vpath:%d",
1953 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1955 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1957 if (status != VXGE_HW_OK)
1964 /* Store all vlan ids from the list to the vid table */
1965 enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1967 enum vxge_hw_status status = VXGE_HW_OK;
1968 struct vxgedev *vdev = vpath->vdev;
1971 if (vdev->vlgrp && vpath->is_open) {
1973 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1974 if (!vlan_group_get_device(vdev->vlgrp, vid))
1976 /* Add these vlan to the vid table */
1977 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1984 /* Store all mac addresses from the list to the DA table */
1985 enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1987 enum vxge_hw_status status = VXGE_HW_OK;
1988 struct macInfo mac_info;
1989 u8 *mac_address = NULL;
1990 struct list_head *entry, *next;
1992 memset(&mac_info, 0, sizeof(struct macInfo));
1994 if (vpath->is_open) {
1996 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1999 ((struct vxge_mac_addrs *)entry)->macaddr;
2000 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
2001 ((struct vxge_mac_addrs *)entry)->state =
2002 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
2003 /* does this mac address already exist in da table? */
2004 status = vxge_search_mac_addr_in_da_table(vpath,
2006 if (status != VXGE_HW_OK) {
2007 /* Add this mac address to the DA table */
2008 status = vxge_hw_vpath_mac_addr_add(
2009 vpath->handle, mac_info.macaddr,
2011 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
2012 if (status != VXGE_HW_OK) {
2013 vxge_debug_init(VXGE_ERR,
2014 "DA add entry failed for vpath:%d",
2016 ((struct vxge_mac_addrs *)entry)->state
2017 = VXGE_LL_MAC_ADDR_IN_LIST;
2027 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
2030 enum vxge_hw_status status = VXGE_HW_OK;
2032 for (i = 0; i < vdev->no_of_vpath; i++)
2033 if (vdev->vpaths[i].handle) {
2034 if (vxge_hw_vpath_reset(vdev->vpaths[i].handle)
2036 if (is_vxge_card_up(vdev) &&
2037 vxge_hw_vpath_recover_from_reset(
2038 vdev->vpaths[i].handle)
2040 vxge_debug_init(VXGE_ERR,
2041 "vxge_hw_vpath_recover_"
2042 "from_reset failed for vpath: "
2047 vxge_debug_init(VXGE_ERR,
2048 "vxge_hw_vpath_reset failed for "
2057 void vxge_close_vpaths(struct vxgedev *vdev, int index)
2060 for (i = index; i < vdev->no_of_vpath; i++) {
2061 if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) {
2062 vxge_hw_vpath_close(vdev->vpaths[i].handle);
2063 vdev->stats.vpaths_open--;
2065 vdev->vpaths[i].is_open = 0;
2066 vdev->vpaths[i].handle = NULL;
2071 int vxge_open_vpaths(struct vxgedev *vdev)
2073 enum vxge_hw_status status;
2076 struct vxge_hw_vpath_attr attr;
2078 for (i = 0; i < vdev->no_of_vpath; i++) {
2079 vxge_assert(vdev->vpaths[i].is_configured);
2080 attr.vp_id = vdev->vpaths[i].device_id;
2081 attr.fifo_attr.callback = vxge_xmit_compl;
2082 attr.fifo_attr.txdl_term = vxge_tx_term;
2083 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2084 attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo;
2086 attr.ring_attr.callback = vxge_rx_1b_compl;
2087 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2088 attr.ring_attr.rxd_term = vxge_rx_term;
2089 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2090 attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring;
2092 vdev->vpaths[i].ring.ndev = vdev->ndev;
2093 vdev->vpaths[i].ring.pdev = vdev->pdev;
2094 status = vxge_hw_vpath_open(vdev->devh, &attr,
2095 &(vdev->vpaths[i].handle));
2096 if (status == VXGE_HW_OK) {
2097 vdev->vpaths[i].fifo.handle =
2098 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2099 vdev->vpaths[i].ring.handle =
2100 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2101 vdev->vpaths[i].fifo.tx_steering_type =
2102 vdev->config.tx_steering_type;
2103 vdev->vpaths[i].fifo.ndev = vdev->ndev;
2104 vdev->vpaths[i].fifo.pdev = vdev->pdev;
2105 vdev->vpaths[i].fifo.indicate_max_pkts =
2106 vdev->config.fifo_indicate_max_pkts;
2107 vdev->vpaths[i].ring.rx_vector_no = 0;
2108 vdev->vpaths[i].ring.rx_csum = vdev->rx_csum;
2109 vdev->vpaths[i].is_open = 1;
2110 vdev->vp_handles[i] = vdev->vpaths[i].handle;
2111 vdev->vpaths[i].ring.gro_enable =
2112 vdev->config.gro_enable;
2113 vdev->vpaths[i].ring.vlan_tag_strip =
2114 vdev->vlan_tag_strip;
2115 vdev->stats.vpaths_open++;
2117 vdev->stats.vpath_open_fail++;
2118 vxge_debug_init(VXGE_ERR,
2119 "%s: vpath: %d failed to open "
2121 vdev->ndev->name, vdev->vpaths[i].device_id,
2123 vxge_close_vpaths(vdev, 0);
2128 ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
2130 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2137 * @irq: the irq of the device.
2138 * @dev_id: a void pointer to the hldev structure of the Titan device
2139 * @ptregs: pointer to the registers pushed on the stack.
2141 * This function is the ISR handler of the device when napi is enabled. It
2142 * identifies the reason for the interrupt and calls the relevant service
2145 static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2147 struct net_device *dev;
2148 struct __vxge_hw_device *hldev;
2150 enum vxge_hw_status status;
2151 struct vxgedev *vdev = (struct vxgedev *) dev_id;;
2153 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2156 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
2158 if (pci_channel_offline(vdev->pdev))
2161 if (unlikely(!is_vxge_card_up(vdev)))
2164 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2166 if (status == VXGE_HW_OK) {
2167 vxge_hw_device_mask_all(hldev);
2170 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2171 vdev->vpaths_deployed >>
2172 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2174 vxge_hw_device_clear_tx_rx(hldev);
2175 napi_schedule(&vdev->napi);
2176 vxge_debug_intr(VXGE_TRACE,
2177 "%s:%d Exiting...", __func__, __LINE__);
2180 vxge_hw_device_unmask_all(hldev);
2181 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2182 (status == VXGE_HW_ERR_CRITICAL) ||
2183 (status == VXGE_HW_ERR_FIFO))) {
2184 vxge_hw_device_mask_all(hldev);
2185 vxge_hw_device_flush_io(hldev);
2187 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2190 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2194 #ifdef CONFIG_PCI_MSI
2197 vxge_tx_msix_handle(int irq, void *dev_id)
2199 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2201 VXGE_COMPLETE_VPATH_TX(fifo);
2207 vxge_rx_msix_napi_handle(int irq, void *dev_id)
2209 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2211 /* MSIX_IDX for Rx is 1 */
2212 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2213 ring->rx_vector_no);
2215 napi_schedule(&ring->napi);
2220 vxge_alarm_msix_handle(int irq, void *dev_id)
2223 enum vxge_hw_status status;
2224 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2225 struct vxgedev *vdev = vpath->vdev;
2227 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2229 for (i = 0; i < vdev->no_of_vpath; i++) {
2230 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
2233 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2235 if (status == VXGE_HW_OK) {
2237 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2241 vxge_debug_intr(VXGE_ERR,
2242 "%s: vxge_hw_vpath_alarm_process failed %x ",
2243 VXGE_DRIVER_NAME, status);
2248 static int vxge_alloc_msix(struct vxgedev *vdev)
2252 int alarm_msix_id = 0, msix_intr_vect = 0;
2255 /* Tx/Rx MSIX Vectors count */
2256 vdev->intr_cnt = vdev->no_of_vpath * 2;
2258 /* Alarm MSIX Vectors count */
2261 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2262 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
2264 if (!vdev->entries) {
2265 vxge_debug_init(VXGE_ERR,
2266 "%s: memory allocation failed",
2271 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
2273 if (!vdev->vxge_entries) {
2274 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2276 kfree(vdev->entries);
2280 /* Last vector in the list is used for alarm */
2281 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2282 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
2284 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2286 /* Initialize the fifo vector */
2287 vdev->entries[j].entry = msix_intr_vect;
2288 vdev->vxge_entries[j].entry = msix_intr_vect;
2289 vdev->vxge_entries[j].in_use = 0;
2292 /* Initialize the ring vector */
2293 vdev->entries[j].entry = msix_intr_vect + 1;
2294 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2295 vdev->vxge_entries[j].in_use = 0;
2299 /* Initialize the alarm vector */
2300 vdev->entries[j].entry = alarm_msix_id;
2301 vdev->vxge_entries[j].entry = alarm_msix_id;
2302 vdev->vxge_entries[j].in_use = 0;
2304 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2305 /* if driver request exceeeds available irq's, request with a small
2309 vxge_debug_init(VXGE_ERR,
2310 "%s: MSI-X enable failed for %d vectors, available: %d",
2311 VXGE_DRIVER_NAME, intr_cnt, ret);
2312 vdev->max_vpath_supported = vdev->no_of_vpath;
2313 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2315 /* Reset the alarm vector setting */
2316 vdev->entries[j].entry = 0;
2317 vdev->vxge_entries[j].entry = 0;
2319 /* Initialize the alarm vector with new setting */
2320 vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
2321 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
2322 vdev->vxge_entries[intr_cnt - 1].in_use = 0;
2324 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2326 vxge_debug_init(VXGE_ERR,
2327 "%s: MSI-X enabled for %d vectors",
2328 VXGE_DRIVER_NAME, intr_cnt);
2332 vxge_debug_init(VXGE_ERR,
2333 "%s: MSI-X enable failed for %d vectors, ret: %d",
2334 VXGE_DRIVER_NAME, intr_cnt, ret);
2335 kfree(vdev->entries);
2336 kfree(vdev->vxge_entries);
2337 vdev->entries = NULL;
2338 vdev->vxge_entries = NULL;
2344 static int vxge_enable_msix(struct vxgedev *vdev)
2348 enum vxge_hw_status status;
2349 /* 0 - Tx, 1 - Rx */
2351 int alarm_msix_id = 0, msix_intr_vect = 0;
2354 /* allocate msix vectors */
2355 ret = vxge_alloc_msix(vdev);
2357 /* Last vector in the list is used for alarm */
2359 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2360 for (i = 0; i < vdev->no_of_vpath; i++) {
2362 /* If fifo or ring are not enabled
2363 the MSIX vector for that should be set to 0
2364 Hence initializeing this array to all 0s.
2366 memset(tim_msix_id, 0, sizeof(tim_msix_id));
2367 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2368 tim_msix_id[0] = msix_intr_vect;
2370 tim_msix_id[1] = msix_intr_vect + 1;
2371 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
2373 status = vxge_hw_vpath_msix_set(
2374 vdev->vpaths[i].handle,
2375 tim_msix_id, alarm_msix_id);
2376 if (status != VXGE_HW_OK) {
2377 vxge_debug_init(VXGE_ERR,
2378 "vxge_hw_vpath_msix_set "
2379 "failed with status : %x", status);
2380 kfree(vdev->entries);
2381 kfree(vdev->vxge_entries);
2382 pci_disable_msix(vdev->pdev);
2391 static void vxge_rem_msix_isr(struct vxgedev *vdev)
2395 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
2397 if (vdev->vxge_entries[intr_cnt].in_use) {
2398 synchronize_irq(vdev->entries[intr_cnt].vector);
2399 free_irq(vdev->entries[intr_cnt].vector,
2400 vdev->vxge_entries[intr_cnt].arg);
2401 vdev->vxge_entries[intr_cnt].in_use = 0;
2405 kfree(vdev->entries);
2406 kfree(vdev->vxge_entries);
2407 vdev->entries = NULL;
2408 vdev->vxge_entries = NULL;
2410 if (vdev->config.intr_type == MSI_X)
2411 pci_disable_msix(vdev->pdev);
2415 static void vxge_rem_isr(struct vxgedev *vdev)
2417 struct __vxge_hw_device *hldev;
2418 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2420 #ifdef CONFIG_PCI_MSI
2421 if (vdev->config.intr_type == MSI_X) {
2422 vxge_rem_msix_isr(vdev);
2425 if (vdev->config.intr_type == INTA) {
2426 synchronize_irq(vdev->pdev->irq);
2427 free_irq(vdev->pdev->irq, vdev);
2431 static int vxge_add_isr(struct vxgedev *vdev)
2434 #ifdef CONFIG_PCI_MSI
2435 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2436 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2438 if (vdev->config.intr_type == MSI_X)
2439 ret = vxge_enable_msix(vdev);
2442 vxge_debug_init(VXGE_ERR,
2443 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2444 vxge_debug_init(VXGE_ERR,
2445 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2446 vdev->config.intr_type = INTA;
2449 if (vdev->config.intr_type == MSI_X) {
2451 intr_idx < (vdev->no_of_vpath *
2452 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2454 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2459 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2460 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
2461 vdev->ndev->name, pci_fun, vp_idx,
2462 vdev->entries[intr_cnt].entry);
2464 vdev->entries[intr_cnt].vector,
2465 vxge_tx_msix_handle, 0,
2466 vdev->desc[intr_cnt],
2467 &vdev->vpaths[vp_idx].fifo);
2468 vdev->vxge_entries[intr_cnt].arg =
2469 &vdev->vpaths[vp_idx].fifo;
2473 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2474 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
2475 vdev->ndev->name, pci_fun, vp_idx,
2476 vdev->entries[intr_cnt].entry);
2478 vdev->entries[intr_cnt].vector,
2479 vxge_rx_msix_napi_handle,
2481 vdev->desc[intr_cnt],
2482 &vdev->vpaths[vp_idx].ring);
2483 vdev->vxge_entries[intr_cnt].arg =
2484 &vdev->vpaths[vp_idx].ring;
2490 vxge_debug_init(VXGE_ERR,
2491 "%s: MSIX - %d Registration failed",
2492 vdev->ndev->name, intr_cnt);
2493 vxge_rem_msix_isr(vdev);
2494 vdev->config.intr_type = INTA;
2495 vxge_debug_init(VXGE_ERR,
2496 "%s: Defaulting to INTA"
2497 , vdev->ndev->name);
2502 /* We requested for this msix interrupt */
2503 vdev->vxge_entries[intr_cnt].in_use = 1;
2504 vxge_hw_vpath_msix_unmask(
2505 vdev->vpaths[vp_idx].handle,
2510 /* Point to next vpath handler */
2511 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2512 (vp_idx < (vdev->no_of_vpath - 1)))
2516 intr_cnt = vdev->max_vpath_supported * 2;
2517 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2518 "%s:vxge Alarm fn: %d MSI-X: %d",
2519 vdev->ndev->name, pci_fun,
2520 vdev->entries[intr_cnt].entry);
2521 /* For Alarm interrupts */
2522 ret = request_irq(vdev->entries[intr_cnt].vector,
2523 vxge_alarm_msix_handle, 0,
2524 vdev->desc[intr_cnt],
2525 &vdev->vpaths[vp_idx]);
2527 vxge_debug_init(VXGE_ERR,
2528 "%s: MSIX - %d Registration failed",
2529 vdev->ndev->name, intr_cnt);
2530 vxge_rem_msix_isr(vdev);
2531 vdev->config.intr_type = INTA;
2532 vxge_debug_init(VXGE_ERR,
2533 "%s: Defaulting to INTA",
2538 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2540 vdev->vxge_entries[intr_cnt].in_use = 1;
2541 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
2545 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2547 if (vdev->config.intr_type == INTA) {
2548 vxge_hw_device_set_intr_type(vdev->devh,
2549 VXGE_HW_INTR_MODE_IRQLINE);
2550 vxge_hw_vpath_tti_ci_set(vdev->devh,
2551 vdev->vpaths[0].device_id);
2552 ret = request_irq((int) vdev->pdev->irq,
2554 IRQF_SHARED, vdev->desc[0], vdev);
2556 vxge_debug_init(VXGE_ERR,
2557 "%s %s-%d: ISR registration failed",
2558 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2561 vxge_debug_init(VXGE_TRACE,
2562 "new %s-%d line allocated",
2563 "IRQ", vdev->pdev->irq);
2569 static void vxge_poll_vp_reset(unsigned long data)
2571 struct vxgedev *vdev = (struct vxgedev *)data;
2574 for (i = 0; i < vdev->no_of_vpath; i++) {
2575 if (test_bit(i, &vdev->vp_reset)) {
2576 vxge_reset_vpath(vdev, i);
2580 if (j && (vdev->config.intr_type != MSI_X)) {
2581 vxge_hw_device_unmask_all(vdev->devh);
2582 vxge_hw_device_flush_io(vdev->devh);
2585 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2588 static void vxge_poll_vp_lockup(unsigned long data)
2590 struct vxgedev *vdev = (struct vxgedev *)data;
2592 struct vxge_ring *ring;
2593 enum vxge_hw_status status = VXGE_HW_OK;
2595 for (i = 0; i < vdev->no_of_vpath; i++) {
2596 ring = &vdev->vpaths[i].ring;
2597 /* Did this vpath received any packets */
2598 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
2599 status = vxge_hw_vpath_check_leak(ring->handle);
2601 /* Did it received any packets last time */
2602 if ((VXGE_HW_FAIL == status) &&
2603 (VXGE_HW_FAIL == ring->last_status)) {
2605 /* schedule vpath reset */
2606 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2608 /* disable interrupts for this vpath */
2609 vxge_vpath_intr_disable(vdev, i);
2611 /* stop the queue for this vpath */
2612 vxge_stop_tx_queue(&vdev->vpaths[i].
2618 ring->stats.prev_rx_frms = ring->stats.rx_frms;
2619 ring->last_status = status;
2622 /* Check every 1 milli second */
2623 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2628 * @dev: pointer to the device structure.
2630 * This function is the open entry point of the driver. It mainly calls a
2631 * function to allocate Rx buffers and inserts them into the buffer
2632 * descriptors and then enables the Rx part of the NIC.
2633 * Return value: '0' on success and an appropriate (-)ve integer as
2634 * defined in errno.h file on failure.
2637 vxge_open(struct net_device *dev)
2639 enum vxge_hw_status status;
2640 struct vxgedev *vdev;
2641 struct __vxge_hw_device *hldev;
2644 u64 val64, function_mode;
2645 vxge_debug_entryexit(VXGE_TRACE,
2646 "%s: %s:%d", dev->name, __func__, __LINE__);
2648 vdev = (struct vxgedev *)netdev_priv(dev);
2649 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2650 function_mode = vdev->config.device_hw_info.function_mode;
2652 /* make sure you have link off by default every time Nic is
2654 netif_carrier_off(dev);
2657 status = vxge_open_vpaths(vdev);
2658 if (status != VXGE_HW_OK) {
2659 vxge_debug_init(VXGE_ERR,
2660 "%s: fatal: Vpath open failed", vdev->ndev->name);
2665 vdev->mtu = dev->mtu;
2667 status = vxge_add_isr(vdev);
2668 if (status != VXGE_HW_OK) {
2669 vxge_debug_init(VXGE_ERR,
2670 "%s: fatal: ISR add failed", dev->name);
2676 if (vdev->config.intr_type != MSI_X) {
2677 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2678 vdev->config.napi_weight);
2679 napi_enable(&vdev->napi);
2680 for (i = 0; i < vdev->no_of_vpath; i++)
2681 vdev->vpaths[i].ring.napi_p = &vdev->napi;
2683 for (i = 0; i < vdev->no_of_vpath; i++) {
2684 netif_napi_add(dev, &vdev->vpaths[i].ring.napi,
2685 vxge_poll_msix, vdev->config.napi_weight);
2686 napi_enable(&vdev->vpaths[i].ring.napi);
2687 vdev->vpaths[i].ring.napi_p =
2688 &vdev->vpaths[i].ring.napi;
2693 if (vdev->config.rth_steering) {
2694 status = vxge_rth_configure(vdev);
2695 if (status != VXGE_HW_OK) {
2696 vxge_debug_init(VXGE_ERR,
2697 "%s: fatal: RTH configuration failed",
2704 for (i = 0; i < vdev->no_of_vpath; i++) {
2705 /* set initial mtu before enabling the device */
2706 status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle,
2708 if (status != VXGE_HW_OK) {
2709 vxge_debug_init(VXGE_ERR,
2710 "%s: fatal: can not set new MTU", dev->name);
2716 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2717 vxge_debug_init(vdev->level_trace,
2718 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2719 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2721 /* Reprogram the DA table with populated mac addresses */
2722 for (i = 0; i < vdev->no_of_vpath; i++) {
2723 vxge_restore_vpath_mac_addr(&vdev->vpaths[i]);
2724 vxge_restore_vpath_vid_table(&vdev->vpaths[i]);
2727 /* Enable vpath to sniff all unicast/multicast traffic that not
2728 * addressed to them. We allow promiscous mode for PF only
2732 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2733 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2735 vxge_hw_mgmt_reg_write(vdev->devh,
2736 vxge_hw_mgmt_reg_type_mrpcim,
2738 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2739 rxmac_authorize_all_addr),
2742 vxge_hw_mgmt_reg_write(vdev->devh,
2743 vxge_hw_mgmt_reg_type_mrpcim,
2745 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2746 rxmac_authorize_all_vid),
2749 vxge_set_multicast(dev);
2751 /* Enabling Bcast and mcast for all vpath */
2752 for (i = 0; i < vdev->no_of_vpath; i++) {
2753 status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle);
2754 if (status != VXGE_HW_OK)
2755 vxge_debug_init(VXGE_ERR,
2756 "%s : Can not enable bcast for vpath "
2757 "id %d", dev->name, i);
2758 if (vdev->config.addr_learn_en) {
2760 vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
2761 if (status != VXGE_HW_OK)
2762 vxge_debug_init(VXGE_ERR,
2763 "%s : Can not enable mcast for vpath "
2764 "id %d", dev->name, i);
2768 vxge_hw_device_setpause_data(vdev->devh, 0,
2769 vdev->config.tx_pause_enable,
2770 vdev->config.rx_pause_enable);
2772 if (vdev->vp_reset_timer.function == NULL)
2773 vxge_os_timer(vdev->vp_reset_timer,
2774 vxge_poll_vp_reset, vdev, (HZ/2));
2776 if (vdev->vp_lockup_timer.function == NULL)
2777 vxge_os_timer(vdev->vp_lockup_timer,
2778 vxge_poll_vp_lockup, vdev, (HZ/2));
2780 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2784 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2785 netif_carrier_on(vdev->ndev);
2786 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
2787 vdev->stats.link_up++;
2790 vxge_hw_device_intr_enable(vdev->devh);
2794 for (i = 0; i < vdev->no_of_vpath; i++) {
2795 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
2797 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
2800 vxge_start_all_tx_queue(vdev);
2807 if (vdev->config.intr_type != MSI_X)
2808 napi_disable(&vdev->napi);
2810 for (i = 0; i < vdev->no_of_vpath; i++)
2811 napi_disable(&vdev->vpaths[i].ring.napi);
2815 vxge_close_vpaths(vdev, 0);
2817 vxge_debug_entryexit(VXGE_TRACE,
2818 "%s: %s:%d Exiting...",
2819 dev->name, __func__, __LINE__);
2823 /* Loop throught the mac address list and delete all the entries */
2824 void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2827 struct list_head *entry, *next;
2828 if (list_empty(&vpath->mac_addr_list))
2831 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2833 kfree((struct vxge_mac_addrs *)entry);
2837 static void vxge_napi_del_all(struct vxgedev *vdev)
2840 if (vdev->config.intr_type != MSI_X)
2841 netif_napi_del(&vdev->napi);
2843 for (i = 0; i < vdev->no_of_vpath; i++)
2844 netif_napi_del(&vdev->vpaths[i].ring.napi);
2849 int do_vxge_close(struct net_device *dev, int do_io)
2851 enum vxge_hw_status status;
2852 struct vxgedev *vdev;
2853 struct __vxge_hw_device *hldev;
2855 u64 val64, vpath_vector;
2856 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2857 dev->name, __func__, __LINE__);
2859 vdev = (struct vxgedev *)netdev_priv(dev);
2860 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2862 if (unlikely(!is_vxge_card_up(vdev)))
2865 /* If vxge_handle_crit_err task is executing,
2866 * wait till it completes. */
2867 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2870 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2872 /* Put the vpath back in normal mode */
2873 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2874 status = vxge_hw_mgmt_reg_read(vdev->devh,
2875 vxge_hw_mgmt_reg_type_mrpcim,
2878 struct vxge_hw_mrpcim_reg,
2879 rts_mgr_cbasin_cfg),
2882 if (status == VXGE_HW_OK) {
2883 val64 &= ~vpath_vector;
2884 status = vxge_hw_mgmt_reg_write(vdev->devh,
2885 vxge_hw_mgmt_reg_type_mrpcim,
2888 struct vxge_hw_mrpcim_reg,
2889 rts_mgr_cbasin_cfg),
2893 /* Remove the function 0 from promiscous mode */
2894 vxge_hw_mgmt_reg_write(vdev->devh,
2895 vxge_hw_mgmt_reg_type_mrpcim,
2897 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2898 rxmac_authorize_all_addr),
2901 vxge_hw_mgmt_reg_write(vdev->devh,
2902 vxge_hw_mgmt_reg_type_mrpcim,
2904 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2905 rxmac_authorize_all_vid),
2910 del_timer_sync(&vdev->vp_lockup_timer);
2912 del_timer_sync(&vdev->vp_reset_timer);
2915 if (vdev->config.intr_type != MSI_X)
2916 napi_disable(&vdev->napi);
2918 for (i = 0; i < vdev->no_of_vpath; i++)
2919 napi_disable(&vdev->vpaths[i].ring.napi);
2922 netif_carrier_off(vdev->ndev);
2923 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
2924 vxge_stop_all_tx_queue(vdev);
2926 /* Note that at this point xmit() is stopped by upper layer */
2928 vxge_hw_device_intr_disable(vdev->devh);
2934 vxge_napi_del_all(vdev);
2937 vxge_reset_all_vpaths(vdev);
2939 vxge_close_vpaths(vdev, 0);
2941 vxge_debug_entryexit(VXGE_TRACE,
2942 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2944 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2951 * @dev: device pointer.
2953 * This is the stop entry point of the driver. It needs to undo exactly
2954 * whatever was done by the open entry point, thus it's usually referred to
2955 * as the close function.Among other things this function mainly stops the
2956 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2957 * Return value: '0' on success and an appropriate (-)ve integer as
2958 * defined in errno.h file on failure.
2961 vxge_close(struct net_device *dev)
2963 do_vxge_close(dev, 1);
2969 * @dev: net device pointer.
2970 * @new_mtu :the new MTU size for the device.
2972 * A driver entry point to change MTU size for the device. Before changing
2973 * the MTU the device must be stopped.
2975 static int vxge_change_mtu(struct net_device *dev, int new_mtu)
2977 struct vxgedev *vdev = netdev_priv(dev);
2979 vxge_debug_entryexit(vdev->level_trace,
2980 "%s:%d", __func__, __LINE__);
2981 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
2982 vxge_debug_init(vdev->level_err,
2983 "%s: mtu size is invalid", dev->name);
2987 /* check if device is down already */
2988 if (unlikely(!is_vxge_card_up(vdev))) {
2989 /* just store new value, will use later on open() */
2991 vxge_debug_init(vdev->level_err,
2992 "%s", "device is down on MTU change");
2996 vxge_debug_init(vdev->level_trace,
2997 "trying to apply new MTU %d", new_mtu);
2999 if (vxge_close(dev))
3003 vdev->mtu = new_mtu;
3008 vxge_debug_init(vdev->level_trace,
3009 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3011 vxge_debug_entryexit(vdev->level_trace,
3012 "%s:%d Exiting...", __func__, __LINE__);
3019 * @dev: pointer to the device structure
3021 * Updates the device statistics structure. This function updates the device
3022 * statistics structure in the net_device structure and returns a pointer
3025 static struct net_device_stats *
3026 vxge_get_stats(struct net_device *dev)
3028 struct vxgedev *vdev;
3029 struct net_device_stats *net_stats;
3032 vdev = netdev_priv(dev);
3034 net_stats = &vdev->stats.net_stats;
3036 memset(net_stats, 0, sizeof(struct net_device_stats));
3038 for (k = 0; k < vdev->no_of_vpath; k++) {
3039 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
3040 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
3041 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
3042 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
3043 net_stats->rx_dropped +=
3044 vdev->vpaths[k].ring.stats.rx_dropped;
3046 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
3047 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
3048 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
3056 * @dev: Device pointer.
3057 * @ifr: An IOCTL specific structure, that can contain a pointer to
3058 * a proprietary structure used to pass information to the driver.
3059 * @cmd: This is used to distinguish between the different commands that
3060 * can be passed to the IOCTL functions.
3062 * Entry point for the Ioctl.
3064 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3071 * @dev: pointer to net device structure
3073 * Watchdog for transmit side.
3074 * This function is triggered if the Tx Queue is stopped
3075 * for a pre-defined amount of time when the Interface is still up.
3078 vxge_tx_watchdog(struct net_device *dev)
3080 struct vxgedev *vdev;
3082 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3084 vdev = (struct vxgedev *)netdev_priv(dev);
3086 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3089 vxge_debug_entryexit(VXGE_TRACE,
3090 "%s:%d Exiting...", __func__, __LINE__);
3094 * vxge_vlan_rx_register
3095 * @dev: net device pointer.
3098 * Vlan group registration
3101 vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3103 struct vxgedev *vdev;
3104 struct vxge_vpath *vpath;
3107 enum vxge_hw_status status;
3110 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3112 vdev = (struct vxgedev *)netdev_priv(dev);
3114 vpath = &vdev->vpaths[0];
3115 if ((NULL == grp) && (vpath->is_open)) {
3116 /* Get the first vlan */
3117 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3119 while (status == VXGE_HW_OK) {
3121 /* Delete this vlan from the vid table */
3122 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3123 vpath = &vdev->vpaths[vp];
3124 if (!vpath->is_open)
3127 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3130 /* Get the next vlan to be deleted */
3131 vpath = &vdev->vpaths[0];
3132 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3138 for (i = 0; i < vdev->no_of_vpath; i++) {
3139 if (vdev->vpaths[i].is_configured)
3140 vdev->vpaths[i].ring.vlgrp = grp;
3143 vxge_debug_entryexit(VXGE_TRACE,
3144 "%s:%d Exiting...", __func__, __LINE__);
3148 * vxge_vlan_rx_add_vid
3149 * @dev: net device pointer.
3152 * Add the vlan id to the devices vlan id table
3155 vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3157 struct vxgedev *vdev;
3158 struct vxge_vpath *vpath;
3161 vdev = (struct vxgedev *)netdev_priv(dev);
3163 /* Add these vlan to the vid table */
3164 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3165 vpath = &vdev->vpaths[vp_id];
3166 if (!vpath->is_open)
3168 vxge_hw_vpath_vid_add(vpath->handle, vid);
3173 * vxge_vlan_rx_add_vid
3174 * @dev: net device pointer.
3177 * Remove the vlan id from the device's vlan id table
3180 vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3182 struct vxgedev *vdev;
3183 struct vxge_vpath *vpath;
3186 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3188 vdev = (struct vxgedev *)netdev_priv(dev);
3190 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3192 /* Delete this vlan from the vid table */
3193 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3194 vpath = &vdev->vpaths[vp_id];
3195 if (!vpath->is_open)
3197 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3199 vxge_debug_entryexit(VXGE_TRACE,
3200 "%s:%d Exiting...", __func__, __LINE__);
3203 static const struct net_device_ops vxge_netdev_ops = {
3204 .ndo_open = vxge_open,
3205 .ndo_stop = vxge_close,
3206 .ndo_get_stats = vxge_get_stats,
3207 .ndo_start_xmit = vxge_xmit,
3208 .ndo_validate_addr = eth_validate_addr,
3209 .ndo_set_multicast_list = vxge_set_multicast,
3211 .ndo_do_ioctl = vxge_ioctl,
3213 .ndo_set_mac_address = vxge_set_mac_addr,
3214 .ndo_change_mtu = vxge_change_mtu,
3215 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3216 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3217 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3219 .ndo_tx_timeout = vxge_tx_watchdog,
3220 #ifdef CONFIG_NET_POLL_CONTROLLER
3221 .ndo_poll_controller = vxge_netpoll,
3225 int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3226 struct vxge_config *config,
3227 int high_dma, int no_of_vpath,
3228 struct vxgedev **vdev_out)
3230 struct net_device *ndev;
3231 enum vxge_hw_status status = VXGE_HW_OK;
3232 struct vxgedev *vdev;
3233 int i, ret = 0, no_of_queue = 1;
3237 if (config->tx_steering_type == TX_MULTIQ_STEERING)
3238 no_of_queue = no_of_vpath;
3240 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3244 vxge_hw_device_trace_level_get(hldev),
3245 "%s : device allocation failed", __func__);
3250 vxge_debug_entryexit(
3251 vxge_hw_device_trace_level_get(hldev),
3252 "%s: %s:%d Entering...",
3253 ndev->name, __func__, __LINE__);
3255 vdev = netdev_priv(ndev);
3256 memset(vdev, 0, sizeof(struct vxgedev));
3260 vdev->pdev = hldev->pdev;
3261 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3262 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3264 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3266 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
3267 NETIF_F_HW_VLAN_FILTER;
3268 /* Driver entry points */
3269 ndev->irq = vdev->pdev->irq;
3270 ndev->base_addr = (unsigned long) hldev->bar0;
3272 ndev->netdev_ops = &vxge_netdev_ops;
3274 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3276 initialize_ethtool_ops(ndev);
3278 /* Allocate memory for vpath */
3279 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3280 no_of_vpath, GFP_KERNEL);
3281 if (!vdev->vpaths) {
3282 vxge_debug_init(VXGE_ERR,
3283 "%s: vpath memory allocation failed",
3289 ndev->features |= NETIF_F_SG;
3291 ndev->features |= NETIF_F_HW_CSUM;
3292 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3293 "%s : checksuming enabled", __func__);
3296 ndev->features |= NETIF_F_HIGHDMA;
3297 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3298 "%s : using High DMA", __func__);
3301 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3303 if (vdev->config.gro_enable)
3304 ndev->features |= NETIF_F_GRO;
3306 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
3307 ndev->real_num_tx_queues = no_of_vpath;
3310 ndev->features |= NETIF_F_LLTX;
3313 for (i = 0; i < no_of_vpath; i++)
3314 spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
3316 if (register_netdev(ndev)) {
3317 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3318 "%s: %s : device registration failed!",
3319 ndev->name, __func__);
3324 /* Set the factory defined MAC address initially */
3325 ndev->addr_len = ETH_ALEN;
3327 /* Make Link state as off at this point, when the Link change
3328 * interrupt comes the state will be automatically changed to
3331 netif_carrier_off(ndev);
3333 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3334 "%s: Ethernet device registered",
3339 /* Resetting the Device stats */
3340 status = vxge_hw_mrpcim_stats_access(
3342 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3347 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3349 vxge_hw_device_trace_level_get(hldev),
3350 "%s: device stats clear returns"
3351 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3353 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3354 "%s: %s:%d Exiting...",
3355 ndev->name, __func__, __LINE__);
3359 kfree(vdev->vpaths);
3367 * vxge_device_unregister
3369 * This function will unregister and free network device
3372 vxge_device_unregister(struct __vxge_hw_device *hldev)
3374 struct vxgedev *vdev;
3375 struct net_device *dev;
3377 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3378 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3383 vdev = netdev_priv(dev);
3384 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3385 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3386 level_trace = vdev->level_trace;
3388 vxge_debug_entryexit(level_trace,
3389 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3391 memcpy(buf, vdev->ndev->name, IFNAMSIZ);
3393 /* in 2.6 will call stop() if device is up */
3394 unregister_netdev(dev);
3396 flush_scheduled_work();
3398 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
3399 vxge_debug_entryexit(level_trace,
3400 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3404 * vxge_callback_crit_err
3406 * This function is called by the alarm handler in interrupt context.
3407 * Driver must analyze it based on the event type.
3410 vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3411 enum vxge_hw_event type, u64 vp_id)
3413 struct net_device *dev = hldev->ndev;
3414 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
3417 vxge_debug_entryexit(vdev->level_trace,
3418 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3420 /* Note: This event type should be used for device wide
3421 * indications only - Serious errors, Slot freeze and critical errors
3423 vdev->cric_err_event = type;
3425 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++)
3426 if (vdev->vpaths[vpath_idx].device_id == vp_id)
3429 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3430 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3431 vxge_debug_init(VXGE_ERR,
3432 "%s: Slot is frozen", vdev->ndev->name);
3433 } else if (type == VXGE_HW_EVENT_SERR) {
3434 vxge_debug_init(VXGE_ERR,
3435 "%s: Encountered Serious Error",
3437 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3438 vxge_debug_init(VXGE_ERR,
3439 "%s: Encountered Critical Error",
3443 if ((type == VXGE_HW_EVENT_SERR) ||
3444 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3445 if (unlikely(vdev->exec_mode))
3446 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3447 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3448 vxge_hw_device_mask_all(hldev);
3449 if (unlikely(vdev->exec_mode))
3450 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3451 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3452 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3454 if (unlikely(vdev->exec_mode))
3455 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3457 /* check if this vpath is already set for reset */
3458 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3460 /* disable interrupts for this vpath */
3461 vxge_vpath_intr_disable(vdev, vpath_idx);
3463 /* stop the queue for this vpath */
3464 vxge_stop_tx_queue(&vdev->vpaths[vpath_idx].
3470 vxge_debug_entryexit(vdev->level_trace,
3471 "%s: %s:%d Exiting...",
3472 vdev->ndev->name, __func__, __LINE__);
3475 static void verify_bandwidth(void)
3477 int i, band_width, total = 0, equal_priority = 0;
3479 /* 1. If user enters 0 for some fifo, give equal priority to all */
3480 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3481 if (bw_percentage[i] == 0) {
3487 if (!equal_priority) {
3488 /* 2. If sum exceeds 100, give equal priority to all */
3489 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3490 if (bw_percentage[i] == 0xFF)
3493 total += bw_percentage[i];
3494 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3501 if (!equal_priority) {
3502 /* Is all the bandwidth consumed? */
3503 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3504 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3505 /* Split rest of bw equally among next VPs*/
3507 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3508 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3509 if (band_width < 2) /* min of 2% */
3512 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3518 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3522 if (equal_priority) {
3523 vxge_debug_init(VXGE_ERR,
3524 "%s: Assigning equal bandwidth to all the vpaths",
3526 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3527 VXGE_HW_MAX_VIRTUAL_PATHS;
3528 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3529 bw_percentage[i] = bw_percentage[0];
3536 * Vpath configuration
3538 static int __devinit vxge_config_vpaths(
3539 struct vxge_hw_device_config *device_config,
3540 u64 vpath_mask, struct vxge_config *config_param)
3542 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3543 u32 txdl_size, txdl_per_memblock;
3545 temp = driver_config->vpath_per_dev;
3546 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3547 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3548 /* No more CPU. Return vpath number as zero.*/
3549 if (driver_config->g_no_cpus == -1)
3552 if (!driver_config->g_no_cpus)
3553 driver_config->g_no_cpus = num_online_cpus();
3555 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3556 if (!driver_config->vpath_per_dev)
3557 driver_config->vpath_per_dev = 1;
3559 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3560 if (!vxge_bVALn(vpath_mask, i, 1))
3564 if (default_no_vpath < driver_config->vpath_per_dev)
3565 driver_config->vpath_per_dev = default_no_vpath;
3567 driver_config->g_no_cpus = driver_config->g_no_cpus -
3568 (driver_config->vpath_per_dev * 2);
3569 if (driver_config->g_no_cpus <= 0)
3570 driver_config->g_no_cpus = -1;
3573 if (driver_config->vpath_per_dev == 1) {
3574 vxge_debug_ll_config(VXGE_TRACE,
3575 "%s: Disable tx and rx steering, "
3576 "as single vpath is configured", VXGE_DRIVER_NAME);
3577 config_param->rth_steering = NO_STEERING;
3578 config_param->tx_steering_type = NO_STEERING;
3579 device_config->rth_en = 0;
3582 /* configure bandwidth */
3583 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3584 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3586 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3587 device_config->vp_config[i].vp_id = i;
3588 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3589 if (no_of_vpaths < driver_config->vpath_per_dev) {
3590 if (!vxge_bVALn(vpath_mask, i, 1)) {
3591 vxge_debug_ll_config(VXGE_TRACE,
3592 "%s: vpath: %d is not available",
3593 VXGE_DRIVER_NAME, i);
3596 vxge_debug_ll_config(VXGE_TRACE,
3597 "%s: vpath: %d available",
3598 VXGE_DRIVER_NAME, i);
3602 vxge_debug_ll_config(VXGE_TRACE,
3603 "%s: vpath: %d is not configured, "
3604 "max_config_vpath exceeded",
3605 VXGE_DRIVER_NAME, i);
3609 /* Configure Tx fifo's */
3610 device_config->vp_config[i].fifo.enable =
3611 VXGE_HW_FIFO_ENABLE;
3612 device_config->vp_config[i].fifo.max_frags =
3614 device_config->vp_config[i].fifo.memblock_size =
3615 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3617 txdl_size = device_config->vp_config[i].fifo.max_frags *
3618 sizeof(struct vxge_hw_fifo_txd);
3619 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3621 device_config->vp_config[i].fifo.fifo_blocks =
3622 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3624 device_config->vp_config[i].fifo.intr =
3625 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3627 /* Configure tti properties */
3628 device_config->vp_config[i].tti.intr_enable =
3629 VXGE_HW_TIM_INTR_ENABLE;
3631 device_config->vp_config[i].tti.btimer_val =
3632 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3634 device_config->vp_config[i].tti.timer_ac_en =
3635 VXGE_HW_TIM_TIMER_AC_ENABLE;
3637 /* For msi-x with napi (each vector
3638 has a handler of its own) -
3639 Set CI to OFF for all vpaths */
3640 device_config->vp_config[i].tti.timer_ci_en =
3641 VXGE_HW_TIM_TIMER_CI_DISABLE;
3643 device_config->vp_config[i].tti.timer_ri_en =
3644 VXGE_HW_TIM_TIMER_RI_DISABLE;
3646 device_config->vp_config[i].tti.util_sel =
3647 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3649 device_config->vp_config[i].tti.ltimer_val =
3650 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3652 device_config->vp_config[i].tti.rtimer_val =
3653 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3655 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3656 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3657 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3658 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3659 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3660 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3661 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3663 /* Configure Rx rings */
3664 device_config->vp_config[i].ring.enable =
3665 VXGE_HW_RING_ENABLE;
3667 device_config->vp_config[i].ring.ring_blocks =
3668 VXGE_HW_DEF_RING_BLOCKS;
3669 device_config->vp_config[i].ring.buffer_mode =
3670 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3671 device_config->vp_config[i].ring.rxds_limit =
3672 VXGE_HW_DEF_RING_RXDS_LIMIT;
3673 device_config->vp_config[i].ring.scatter_mode =
3674 VXGE_HW_RING_SCATTER_MODE_A;
3676 /* Configure rti properties */
3677 device_config->vp_config[i].rti.intr_enable =
3678 VXGE_HW_TIM_INTR_ENABLE;
3680 device_config->vp_config[i].rti.btimer_val =
3681 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3683 device_config->vp_config[i].rti.timer_ac_en =
3684 VXGE_HW_TIM_TIMER_AC_ENABLE;
3686 device_config->vp_config[i].rti.timer_ci_en =
3687 VXGE_HW_TIM_TIMER_CI_DISABLE;
3689 device_config->vp_config[i].rti.timer_ri_en =
3690 VXGE_HW_TIM_TIMER_RI_DISABLE;
3692 device_config->vp_config[i].rti.util_sel =
3693 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3695 device_config->vp_config[i].rti.urange_a =
3697 device_config->vp_config[i].rti.urange_b =
3699 device_config->vp_config[i].rti.urange_c =
3701 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3702 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3703 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3704 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3706 device_config->vp_config[i].rti.rtimer_val =
3707 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3709 device_config->vp_config[i].rti.ltimer_val =
3710 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3712 device_config->vp_config[i].rpa_strip_vlan_tag =
3716 driver_config->vpath_per_dev = temp;
3717 return no_of_vpaths;
3720 /* initialize device configuratrions */
3721 static void __devinit vxge_device_config_init(
3722 struct vxge_hw_device_config *device_config,
3725 /* Used for CQRQ/SRQ. */
3726 device_config->dma_blockpool_initial =
3727 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3729 device_config->dma_blockpool_max =
3730 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3732 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3733 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3735 #ifndef CONFIG_PCI_MSI
3736 vxge_debug_init(VXGE_ERR,
3737 "%s: This Kernel does not support "
3738 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3742 /* Configure whether MSI-X or IRQL. */
3743 switch (*intr_type) {
3745 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3749 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3752 /* Timer period between device poll */
3753 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3755 /* Configure mac based steering. */
3756 device_config->rts_mac_en = addr_learn_en;
3758 /* Configure Vpaths */
3759 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3761 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3763 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3764 device_config->dma_blockpool_initial);
3765 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3766 device_config->dma_blockpool_max);
3767 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3768 device_config->intr_mode);
3769 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3770 device_config->device_poll_millis);
3771 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3772 device_config->rts_mac_en);
3773 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3774 device_config->rth_en);
3775 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3776 device_config->rth_it_type);
3779 static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3783 vxge_debug_init(VXGE_TRACE,
3784 "%s: %d Vpath(s) opened",
3785 vdev->ndev->name, vdev->no_of_vpath);
3787 switch (vdev->config.intr_type) {
3789 vxge_debug_init(VXGE_TRACE,
3790 "%s: Interrupt type INTA", vdev->ndev->name);
3794 vxge_debug_init(VXGE_TRACE,
3795 "%s: Interrupt type MSI-X", vdev->ndev->name);
3799 if (vdev->config.rth_steering) {
3800 vxge_debug_init(VXGE_TRACE,
3801 "%s: RTH steering enabled for TCP_IPV4",
3804 vxge_debug_init(VXGE_TRACE,
3805 "%s: RTH steering disabled", vdev->ndev->name);
3808 switch (vdev->config.tx_steering_type) {
3810 vxge_debug_init(VXGE_TRACE,
3811 "%s: Tx steering disabled", vdev->ndev->name);
3813 case TX_PRIORITY_STEERING:
3814 vxge_debug_init(VXGE_TRACE,
3815 "%s: Unsupported tx steering option",
3817 vxge_debug_init(VXGE_TRACE,
3818 "%s: Tx steering disabled", vdev->ndev->name);
3819 vdev->config.tx_steering_type = 0;
3821 case TX_VLAN_STEERING:
3822 vxge_debug_init(VXGE_TRACE,
3823 "%s: Unsupported tx steering option",
3825 vxge_debug_init(VXGE_TRACE,
3826 "%s: Tx steering disabled", vdev->ndev->name);
3827 vdev->config.tx_steering_type = 0;
3829 case TX_MULTIQ_STEERING:
3830 vxge_debug_init(VXGE_TRACE,
3831 "%s: Tx multiqueue steering enabled",
3834 case TX_PORT_STEERING:
3835 vxge_debug_init(VXGE_TRACE,
3836 "%s: Tx port steering enabled",
3840 vxge_debug_init(VXGE_ERR,
3841 "%s: Unsupported tx steering type",
3843 vxge_debug_init(VXGE_TRACE,
3844 "%s: Tx steering disabled", vdev->ndev->name);
3845 vdev->config.tx_steering_type = 0;
3848 if (vdev->config.gro_enable) {
3849 vxge_debug_init(VXGE_ERR,
3850 "%s: Generic receive offload enabled",
3853 vxge_debug_init(VXGE_TRACE,
3854 "%s: Generic receive offload disabled",
3857 if (vdev->config.addr_learn_en)
3858 vxge_debug_init(VXGE_TRACE,
3859 "%s: MAC Address learning enabled", vdev->ndev->name);
3861 vxge_debug_init(VXGE_TRACE,
3862 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3864 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3865 if (!vxge_bVALn(vpath_mask, i, 1))
3867 vxge_debug_ll_config(VXGE_TRACE,
3868 "%s: MTU size - %d", vdev->ndev->name,
3869 ((struct __vxge_hw_device *)(vdev->devh))->
3870 config.vp_config[i].mtu);
3871 vxge_debug_init(VXGE_TRACE,
3872 "%s: VLAN tag stripping %s", vdev->ndev->name,
3873 ((struct __vxge_hw_device *)(vdev->devh))->
3874 config.vp_config[i].rpa_strip_vlan_tag
3875 ? "Enabled" : "Disabled");
3876 vxge_debug_init(VXGE_TRACE,
3877 "%s: Ring blocks : %d", vdev->ndev->name,
3878 ((struct __vxge_hw_device *)(vdev->devh))->
3879 config.vp_config[i].ring.ring_blocks);
3880 vxge_debug_init(VXGE_TRACE,
3881 "%s: Fifo blocks : %d", vdev->ndev->name,
3882 ((struct __vxge_hw_device *)(vdev->devh))->
3883 config.vp_config[i].fifo.fifo_blocks);
3884 vxge_debug_ll_config(VXGE_TRACE,
3885 "%s: Max frags : %d", vdev->ndev->name,
3886 ((struct __vxge_hw_device *)(vdev->devh))->
3887 config.vp_config[i].fifo.max_frags);
3894 * vxge_pm_suspend - vxge power management suspend entry point
3897 static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
3902 * vxge_pm_resume - vxge power management resume entry point
3905 static int vxge_pm_resume(struct pci_dev *pdev)
3913 * vxge_io_error_detected - called when PCI error is detected
3914 * @pdev: Pointer to PCI device
3915 * @state: The current pci connection state
3917 * This function is called after a PCI bus error affecting
3918 * this device has been detected.
3920 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3921 pci_channel_state_t state)
3923 struct __vxge_hw_device *hldev =
3924 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3925 struct net_device *netdev = hldev->ndev;
3927 netif_device_detach(netdev);
3929 if (state == pci_channel_io_perm_failure)
3930 return PCI_ERS_RESULT_DISCONNECT;
3932 if (netif_running(netdev)) {
3933 /* Bring down the card, while avoiding PCI I/O */
3934 do_vxge_close(netdev, 0);
3937 pci_disable_device(pdev);
3939 return PCI_ERS_RESULT_NEED_RESET;
3943 * vxge_io_slot_reset - called after the pci bus has been reset.
3944 * @pdev: Pointer to PCI device
3946 * Restart the card from scratch, as if from a cold-boot.
3947 * At this point, the card has exprienced a hard reset,
3948 * followed by fixups by BIOS, and has its config space
3949 * set up identically to what it was at cold boot.
3951 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3953 struct __vxge_hw_device *hldev =
3954 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3955 struct net_device *netdev = hldev->ndev;
3957 struct vxgedev *vdev = netdev_priv(netdev);
3959 if (pci_enable_device(pdev)) {
3960 printk(KERN_ERR "%s: "
3961 "Cannot re-enable device after reset\n",
3963 return PCI_ERS_RESULT_DISCONNECT;
3966 pci_set_master(pdev);
3969 return PCI_ERS_RESULT_RECOVERED;
3973 * vxge_io_resume - called when traffic can start flowing again.
3974 * @pdev: Pointer to PCI device
3976 * This callback is called when the error recovery driver tells
3977 * us that its OK to resume normal operation.
3979 static void vxge_io_resume(struct pci_dev *pdev)
3981 struct __vxge_hw_device *hldev =
3982 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3983 struct net_device *netdev = hldev->ndev;
3985 if (netif_running(netdev)) {
3986 if (vxge_open(netdev)) {
3987 printk(KERN_ERR "%s: "
3988 "Can't bring device back up after reset\n",
3994 netif_device_attach(netdev);
3999 * @pdev : structure containing the PCI related information of the device.
4000 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4002 * This function is called when a new PCI device gets detected and initializes
4005 * returns 0 on success and negative on failure.
4008 static int __devinit
4009 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4011 struct __vxge_hw_device *hldev;
4012 enum vxge_hw_status status;
4016 struct vxgedev *vdev;
4017 struct vxge_config ll_config;
4018 struct vxge_hw_device_config *device_config = NULL;
4019 struct vxge_hw_device_attr attr;
4020 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4022 struct vxge_mac_addrs *entry;
4023 static int bus = -1, device = -1;
4026 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4029 if (bus != pdev->bus->number)
4031 if (device != PCI_SLOT(pdev->devfn))
4034 bus = pdev->bus->number;
4035 device = PCI_SLOT(pdev->devfn);
4038 if (driver_config->config_dev_cnt &&
4039 (driver_config->config_dev_cnt !=
4040 driver_config->total_dev_cnt))
4041 vxge_debug_init(VXGE_ERR,
4042 "%s: Configured %d of %d devices",
4044 driver_config->config_dev_cnt,
4045 driver_config->total_dev_cnt);
4046 driver_config->config_dev_cnt = 0;
4047 driver_config->total_dev_cnt = 0;
4048 driver_config->g_no_cpus = 0;
4051 driver_config->vpath_per_dev = max_config_vpath;
4053 driver_config->total_dev_cnt++;
4054 if (++driver_config->config_dev_cnt > max_config_dev) {
4059 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4061 if (!device_config) {
4063 vxge_debug_init(VXGE_ERR,
4064 "device_config : malloc failed %s %d",
4065 __FILE__, __LINE__);
4069 memset(&ll_config, 0, sizeof(struct vxge_config));
4070 ll_config.tx_steering_type = TX_MULTIQ_STEERING;
4071 ll_config.intr_type = MSI_X;
4072 ll_config.napi_weight = NEW_NAPI_WEIGHT;
4073 ll_config.rth_steering = RTH_STEERING;
4075 /* get the default configuration parameters */
4076 vxge_hw_device_config_default_get(device_config);
4078 /* initialize configuration parameters */
4079 vxge_device_config_init(device_config, &ll_config.intr_type);
4081 ret = pci_enable_device(pdev);
4083 vxge_debug_init(VXGE_ERR,
4084 "%s : can not enable PCI device", __func__);
4088 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4089 vxge_debug_ll_config(VXGE_TRACE,
4090 "%s : using 64bit DMA", __func__);
4094 if (pci_set_consistent_dma_mask(pdev,
4095 DMA_BIT_MASK(64))) {
4096 vxge_debug_init(VXGE_ERR,
4097 "%s : unable to obtain 64bit DMA for "
4098 "consistent allocations", __func__);
4102 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4103 vxge_debug_ll_config(VXGE_TRACE,
4104 "%s : using 32bit DMA", __func__);
4110 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
4111 vxge_debug_init(VXGE_ERR,
4112 "%s : request regions failed", __func__);
4117 pci_set_master(pdev);
4119 attr.bar0 = pci_ioremap_bar(pdev, 0);
4121 vxge_debug_init(VXGE_ERR,
4122 "%s : cannot remap io memory bar0", __func__);
4126 vxge_debug_ll_config(VXGE_TRACE,
4127 "pci ioremap bar0: %p:0x%llx",
4129 (unsigned long long)pci_resource_start(pdev, 0));
4131 status = vxge_hw_device_hw_info_get(attr.bar0,
4132 &ll_config.device_hw_info);
4133 if (status != VXGE_HW_OK) {
4134 vxge_debug_init(VXGE_ERR,
4135 "%s: Reading of hardware info failed."
4136 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4141 if (ll_config.device_hw_info.fw_version.major !=
4142 VXGE_DRIVER_FW_VERSION_MAJOR) {
4143 vxge_debug_init(VXGE_ERR,
4144 "%s: Incorrect firmware version."
4145 "Please upgrade the firmware to version 1.x.x",
4151 vpath_mask = ll_config.device_hw_info.vpath_mask;
4152 if (vpath_mask == 0) {
4153 vxge_debug_ll_config(VXGE_TRACE,
4154 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4159 vxge_debug_ll_config(VXGE_TRACE,
4160 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4161 (unsigned long long)vpath_mask);
4163 /* Check how many vpaths are available */
4164 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4165 if (!((vpath_mask) & vxge_mBIT(i)))
4167 max_vpath_supported++;
4170 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4171 if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
4172 ll_config.device_hw_info.function_mode) &&
4173 (max_config_dev > 1) && (pdev->is_physfn)) {
4174 ret = pci_enable_sriov(pdev, max_config_dev - 1);
4176 vxge_debug_ll_config(VXGE_ERR,
4177 "Failed to enable SRIOV: %d \n", ret);
4181 * Configure vpaths and get driver configured number of vpaths
4182 * which is less than or equal to the maximum vpaths per function.
4184 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
4186 vxge_debug_ll_config(VXGE_ERR,
4187 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4192 /* Setting driver callbacks */
4193 attr.uld_callbacks.link_up = vxge_callback_link_up;
4194 attr.uld_callbacks.link_down = vxge_callback_link_down;
4195 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4197 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4198 if (status != VXGE_HW_OK) {
4199 vxge_debug_init(VXGE_ERR,
4200 "Failed to initialize device (%d)", status);
4205 /* if FCS stripping is not disabled in MAC fail driver load */
4206 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
4207 vxge_debug_init(VXGE_ERR,
4208 "%s: FCS stripping is not disabled in MAC"
4209 " failing driver load", VXGE_DRIVER_NAME);
4214 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4216 /* set private device info */
4217 pci_set_drvdata(pdev, hldev);
4219 ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4220 ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4221 ll_config.addr_learn_en = addr_learn_en;
4222 ll_config.rth_algorithm = RTH_ALG_JENKINS;
4223 ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
4224 ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
4225 ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4226 ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4227 ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4228 ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4229 ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
4230 ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4231 ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4233 if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
4239 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4240 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4241 vxge_hw_device_trace_level_get(hldev));
4243 /* set private HW device info */
4244 hldev->ndev = vdev->ndev;
4245 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4246 vdev->bar0 = attr.bar0;
4247 vdev->max_vpath_supported = max_vpath_supported;
4248 vdev->no_of_vpath = no_of_vpath;
4250 /* Virtual Path count */
4251 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4252 if (!vxge_bVALn(vpath_mask, i, 1))
4254 if (j >= vdev->no_of_vpath)
4257 vdev->vpaths[j].is_configured = 1;
4258 vdev->vpaths[j].device_id = i;
4259 vdev->vpaths[j].fifo.driver_id = j;
4260 vdev->vpaths[j].ring.driver_id = j;
4261 vdev->vpaths[j].vdev = vdev;
4262 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4263 memcpy((u8 *)vdev->vpaths[j].macaddr,
4264 (u8 *)ll_config.device_hw_info.mac_addrs[i],
4267 /* Initialize the mac address list header */
4268 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4270 vdev->vpaths[j].mac_addr_cnt = 0;
4271 vdev->vpaths[j].mcast_addr_cnt = 0;
4274 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4275 vdev->max_config_port = max_config_port;
4277 vdev->vlan_tag_strip = vlan_tag_strip;
4279 /* map the hashing selector table to the configured vpaths */
4280 for (i = 0; i < vdev->no_of_vpath; i++)
4281 vdev->vpath_selector[i] = vpath_selector[i];
4283 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4285 ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4286 ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4287 ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4289 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4290 vdev->ndev->name, ll_config.device_hw_info.serial_number);
4292 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4293 vdev->ndev->name, ll_config.device_hw_info.part_number);
4295 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4296 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4298 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4299 vdev->ndev->name, macaddr);
4301 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4302 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4304 vxge_debug_init(VXGE_TRACE,
4305 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4306 ll_config.device_hw_info.fw_version.version,
4307 ll_config.device_hw_info.fw_date.date);
4310 switch (ll_config.device_hw_info.function_mode) {
4311 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4312 vxge_debug_init(VXGE_TRACE,
4313 "%s: Single Function Mode Enabled", vdev->ndev->name);
4315 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4316 vxge_debug_init(VXGE_TRACE,
4317 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4319 case VXGE_HW_FUNCTION_MODE_SRIOV:
4320 vxge_debug_init(VXGE_TRACE,
4321 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4323 case VXGE_HW_FUNCTION_MODE_MRIOV:
4324 vxge_debug_init(VXGE_TRACE,
4325 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4330 vxge_print_parm(vdev, vpath_mask);
4332 /* Store the fw version for ethttool option */
4333 strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
4334 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4335 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4337 /* Copy the station mac address to the list */
4338 for (i = 0; i < vdev->no_of_vpath; i++) {
4339 entry = (struct vxge_mac_addrs *)
4340 kzalloc(sizeof(struct vxge_mac_addrs),
4342 if (NULL == entry) {
4343 vxge_debug_init(VXGE_ERR,
4344 "%s: mac_addr_list : memory allocation failed",
4349 macaddr = (u8 *)&entry->macaddr;
4350 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4351 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4352 vdev->vpaths[i].mac_addr_cnt = 1;
4355 kfree(device_config);
4358 * INTA is shared in multi-function mode. This is unlike the INTA
4359 * implementation in MR mode, where each VH has its own INTA message.
4360 * - INTA is masked (disabled) as long as at least one function sets
4361 * its TITAN_MASK_ALL_INT.ALARM bit.
4362 * - INTA is unmasked (enabled) when all enabled functions have cleared
4363 * their own TITAN_MASK_ALL_INT.ALARM bit.
4364 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4365 * Though this driver leaves the top level interrupts unmasked while
4366 * leaving the required module interrupt bits masked on exit, there
4367 * could be a rougue driver around that does not follow this procedure
4368 * resulting in a failure to generate interrupts. The following code is
4369 * present to prevent such a failure.
4372 if (ll_config.device_hw_info.function_mode ==
4373 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4374 if (vdev->config.intr_type == INTA)
4375 vxge_hw_device_unmask_all(hldev);
4377 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4378 vdev->ndev->name, __func__, __LINE__);
4380 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4381 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4382 vxge_hw_device_trace_level_get(hldev));
4387 for (i = 0; i < vdev->no_of_vpath; i++)
4388 vxge_free_mac_add_list(&vdev->vpaths[i]);
4390 vxge_device_unregister(hldev);
4392 pci_disable_sriov(pdev);
4393 vxge_hw_device_terminate(hldev);
4397 pci_release_regions(pdev);
4399 pci_disable_device(pdev);
4401 kfree(device_config);
4402 driver_config->config_dev_cnt--;
4403 pci_set_drvdata(pdev, NULL);
4408 * vxge_rem_nic - Free the PCI device
4409 * @pdev: structure containing the PCI related information of the device.
4410 * Description: This function is called by the Pci subsystem to release a
4411 * PCI device and free up all resource held up by the device.
4413 static void __devexit
4414 vxge_remove(struct pci_dev *pdev)
4416 struct __vxge_hw_device *hldev;
4417 struct vxgedev *vdev = NULL;
4418 struct net_device *dev;
4420 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4421 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4425 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4430 vdev = netdev_priv(dev);
4432 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4433 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4434 level_trace = vdev->level_trace;
4436 vxge_debug_entryexit(level_trace,
4437 "%s:%d", __func__, __LINE__);
4439 vxge_debug_init(level_trace,
4440 "%s : removing PCI device...", __func__);
4441 vxge_device_unregister(hldev);
4443 for (i = 0; i < vdev->no_of_vpath; i++) {
4444 vxge_free_mac_add_list(&vdev->vpaths[i]);
4445 vdev->vpaths[i].mcast_addr_cnt = 0;
4446 vdev->vpaths[i].mac_addr_cnt = 0;
4449 kfree(vdev->vpaths);
4451 iounmap(vdev->bar0);
4453 pci_disable_sriov(pdev);
4455 /* we are safe to free it now */
4458 vxge_debug_init(level_trace,
4459 "%s:%d Device unregistered", __func__, __LINE__);
4461 vxge_hw_device_terminate(hldev);
4463 pci_disable_device(pdev);
4464 pci_release_regions(pdev);
4465 pci_set_drvdata(pdev, NULL);
4466 vxge_debug_entryexit(level_trace,
4467 "%s:%d Exiting...", __func__, __LINE__);
4470 static struct pci_error_handlers vxge_err_handler = {
4471 .error_detected = vxge_io_error_detected,
4472 .slot_reset = vxge_io_slot_reset,
4473 .resume = vxge_io_resume,
4476 static struct pci_driver vxge_driver = {
4477 .name = VXGE_DRIVER_NAME,
4478 .id_table = vxge_id_table,
4479 .probe = vxge_probe,
4480 .remove = __devexit_p(vxge_remove),
4482 .suspend = vxge_pm_suspend,
4483 .resume = vxge_pm_resume,
4485 .err_handler = &vxge_err_handler,
4493 snprintf(version, 32, "%s", DRV_VERSION);
4495 printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n",
4497 printk(KERN_CRIT "%s: Driver version: %s\n",
4498 VXGE_DRIVER_NAME, version);
4502 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4506 ret = pci_register_driver(&vxge_driver);
4508 if (driver_config->config_dev_cnt &&
4509 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4510 vxge_debug_init(VXGE_ERR,
4511 "%s: Configured %d of %d devices",
4512 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4513 driver_config->total_dev_cnt);
4516 kfree(driver_config);
4524 pci_unregister_driver(&vxge_driver);
4525 kfree(driver_config);
4527 module_init(vxge_starter);
4528 module_exit(vxge_closer);