2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46 static atomic_t devices_found;
48 #define VMXNET3_MAX_DEVICES 10
49 static int enable_mq = 1;
50 static int irq_share_mode;
53 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
56 * Enable/Disable the given intr
59 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
61 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
66 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
68 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
73 * Enable/Disable all intrs used by the device
76 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
80 for (i = 0; i < adapter->intr.num_intrs; i++)
81 vmxnet3_enable_intr(adapter, i);
82 adapter->shared->devRead.intrConf.intrCtrl &=
83 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
88 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
92 adapter->shared->devRead.intrConf.intrCtrl |=
93 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
94 for (i = 0; i < adapter->intr.num_intrs; i++)
95 vmxnet3_disable_intr(adapter, i);
100 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
102 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
107 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
114 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
117 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
122 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
125 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
130 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
134 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
139 * Check the link state. This may start or stop the tx queue.
142 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
148 spin_lock_irqsave(&adapter->cmd_lock, flags);
149 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
150 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
151 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
153 adapter->link_speed = ret >> 16;
154 if (ret & 1) { /* Link is up. */
155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
156 adapter->netdev->name, adapter->link_speed);
157 if (!netif_carrier_ok(adapter->netdev))
158 netif_carrier_on(adapter->netdev);
161 for (i = 0; i < adapter->num_tx_queues; i++)
162 vmxnet3_tq_start(&adapter->tx_queue[i],
166 printk(KERN_INFO "%s: NIC Link is Down\n",
167 adapter->netdev->name);
168 if (netif_carrier_ok(adapter->netdev))
169 netif_carrier_off(adapter->netdev);
172 for (i = 0; i < adapter->num_tx_queues; i++)
173 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
179 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
183 u32 events = le32_to_cpu(adapter->shared->ecr);
187 vmxnet3_ack_events(adapter, events);
189 /* Check if link state has changed */
190 if (events & VMXNET3_ECR_LINK)
191 vmxnet3_check_link(adapter, true);
193 /* Check if there is an error on xmit/recv queues */
194 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
195 spin_lock_irqsave(&adapter->cmd_lock, flags);
196 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
197 VMXNET3_CMD_GET_QUEUE_STATUS);
198 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
200 for (i = 0; i < adapter->num_tx_queues; i++)
201 if (adapter->tqd_start[i].status.stopped)
202 dev_err(&adapter->netdev->dev,
203 "%s: tq[%d] error 0x%x\n",
204 adapter->netdev->name, i, le32_to_cpu(
205 adapter->tqd_start[i].status.error));
206 for (i = 0; i < adapter->num_rx_queues; i++)
207 if (adapter->rqd_start[i].status.stopped)
208 dev_err(&adapter->netdev->dev,
209 "%s: rq[%d] error 0x%x\n",
210 adapter->netdev->name, i,
211 adapter->rqd_start[i].status.error);
213 schedule_work(&adapter->work);
217 #ifdef __BIG_ENDIAN_BITFIELD
219 * The device expects the bitfields in shared structures to be written in
220 * little endian. When CPU is big endian, the following routines are used to
221 * correctly read and write into ABI.
222 * The general technique used here is : double word bitfields are defined in
223 * opposite order for big endian architecture. Then before reading them in
224 * driver the complete double word is translated using le32_to_cpu. Similarly
225 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
226 * double words into required format.
227 * In order to avoid touching bits in shared structure more than once, temporary
228 * descriptors are used. These are passed as srcDesc to following functions.
230 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
231 struct Vmxnet3_RxDesc *dstDesc)
233 u32 *src = (u32 *)srcDesc + 2;
234 u32 *dst = (u32 *)dstDesc + 2;
235 dstDesc->addr = le64_to_cpu(srcDesc->addr);
236 *dst = le32_to_cpu(*src);
237 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
240 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
241 struct Vmxnet3_TxDesc *dstDesc)
244 u32 *src = (u32 *)(srcDesc + 1);
245 u32 *dst = (u32 *)(dstDesc + 1);
247 /* Working backwards so that the gen bit is set at the end. */
248 for (i = 2; i > 0; i--) {
251 *dst = cpu_to_le32(*src);
256 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
257 struct Vmxnet3_RxCompDesc *dstDesc)
260 u32 *src = (u32 *)srcDesc;
261 u32 *dst = (u32 *)dstDesc;
262 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
263 *dst = le32_to_cpu(*src);
270 /* Used to read bitfield values from double words. */
271 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
273 u32 temp = le32_to_cpu(*bitfield);
274 u32 mask = ((1 << size) - 1) << pos;
282 #endif /* __BIG_ENDIAN_BITFIELD */
284 #ifdef __BIG_ENDIAN_BITFIELD
286 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
287 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
288 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
289 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
290 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
291 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
292 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
293 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
294 VMXNET3_TCD_GEN_SIZE)
295 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
296 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
297 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
299 vmxnet3_RxCompToCPU((rcd), (tmp)); \
301 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
303 vmxnet3_RxDescToCPU((rxd), (tmp)); \
308 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
309 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
310 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
311 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
312 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
313 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
315 #endif /* __BIG_ENDIAN_BITFIELD */
319 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
320 struct pci_dev *pdev)
322 if (tbi->map_type == VMXNET3_MAP_SINGLE)
323 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
325 else if (tbi->map_type == VMXNET3_MAP_PAGE)
326 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
329 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
331 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
336 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
337 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
342 /* no out of order completion */
343 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
344 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
346 skb = tq->buf_info[eop_idx].skb;
348 tq->buf_info[eop_idx].skb = NULL;
350 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
352 while (tq->tx_ring.next2comp != eop_idx) {
353 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
356 /* update next2comp w/o tx_lock. Since we are marking more,
357 * instead of less, tx ring entries avail, the worst case is
358 * that the tx routine incorrectly re-queues a pkt due to
359 * insufficient tx ring entries.
361 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
365 dev_kfree_skb_any(skb);
371 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
372 struct vmxnet3_adapter *adapter)
375 union Vmxnet3_GenericDesc *gdesc;
377 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
378 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
379 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
380 &gdesc->tcd), tq, adapter->pdev,
383 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
384 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
388 spin_lock(&tq->tx_lock);
389 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
390 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
391 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
392 netif_carrier_ok(adapter->netdev))) {
393 vmxnet3_tq_wake(tq, adapter);
395 spin_unlock(&tq->tx_lock);
402 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
403 struct vmxnet3_adapter *adapter)
407 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
408 struct vmxnet3_tx_buf_info *tbi;
410 tbi = tq->buf_info + tq->tx_ring.next2comp;
412 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
414 dev_kfree_skb_any(tbi->skb);
417 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
420 /* sanity check, verify all buffers are indeed unmapped and freed */
421 for (i = 0; i < tq->tx_ring.size; i++) {
422 BUG_ON(tq->buf_info[i].skb != NULL ||
423 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
426 tq->tx_ring.gen = VMXNET3_INIT_GEN;
427 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
429 tq->comp_ring.gen = VMXNET3_INIT_GEN;
430 tq->comp_ring.next2proc = 0;
435 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
436 struct vmxnet3_adapter *adapter)
438 if (tq->tx_ring.base) {
439 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
440 sizeof(struct Vmxnet3_TxDesc),
441 tq->tx_ring.base, tq->tx_ring.basePA);
442 tq->tx_ring.base = NULL;
444 if (tq->data_ring.base) {
445 pci_free_consistent(adapter->pdev, tq->data_ring.size *
446 sizeof(struct Vmxnet3_TxDataDesc),
447 tq->data_ring.base, tq->data_ring.basePA);
448 tq->data_ring.base = NULL;
450 if (tq->comp_ring.base) {
451 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
452 sizeof(struct Vmxnet3_TxCompDesc),
453 tq->comp_ring.base, tq->comp_ring.basePA);
454 tq->comp_ring.base = NULL;
461 /* Destroy all tx queues */
463 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
467 for (i = 0; i < adapter->num_tx_queues; i++)
468 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
473 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
474 struct vmxnet3_adapter *adapter)
478 /* reset the tx ring contents to 0 and reset the tx ring states */
479 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
480 sizeof(struct Vmxnet3_TxDesc));
481 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
482 tq->tx_ring.gen = VMXNET3_INIT_GEN;
484 memset(tq->data_ring.base, 0, tq->data_ring.size *
485 sizeof(struct Vmxnet3_TxDataDesc));
487 /* reset the tx comp ring contents to 0 and reset comp ring states */
488 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
489 sizeof(struct Vmxnet3_TxCompDesc));
490 tq->comp_ring.next2proc = 0;
491 tq->comp_ring.gen = VMXNET3_INIT_GEN;
493 /* reset the bookkeeping data */
494 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
495 for (i = 0; i < tq->tx_ring.size; i++)
496 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
498 /* stats are not reset */
503 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
504 struct vmxnet3_adapter *adapter)
506 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
507 tq->comp_ring.base || tq->buf_info);
509 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
510 * sizeof(struct Vmxnet3_TxDesc),
511 &tq->tx_ring.basePA);
512 if (!tq->tx_ring.base) {
513 printk(KERN_ERR "%s: failed to allocate tx ring\n",
514 adapter->netdev->name);
518 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
520 sizeof(struct Vmxnet3_TxDataDesc),
521 &tq->data_ring.basePA);
522 if (!tq->data_ring.base) {
523 printk(KERN_ERR "%s: failed to allocate data ring\n",
524 adapter->netdev->name);
528 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
530 sizeof(struct Vmxnet3_TxCompDesc),
531 &tq->comp_ring.basePA);
532 if (!tq->comp_ring.base) {
533 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
534 adapter->netdev->name);
538 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
541 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
542 adapter->netdev->name);
549 vmxnet3_tq_destroy(tq, adapter);
554 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
558 for (i = 0; i < adapter->num_tx_queues; i++)
559 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
563 * starting from ring->next2fill, allocate rx buffers for the given ring
564 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
565 * are allocated or allocation fails
569 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
570 int num_to_alloc, struct vmxnet3_adapter *adapter)
572 int num_allocated = 0;
573 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
574 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
577 while (num_allocated <= num_to_alloc) {
578 struct vmxnet3_rx_buf_info *rbi;
579 union Vmxnet3_GenericDesc *gd;
581 rbi = rbi_base + ring->next2fill;
582 gd = ring->base + ring->next2fill;
584 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
585 if (rbi->skb == NULL) {
586 rbi->skb = dev_alloc_skb(rbi->len +
588 if (unlikely(rbi->skb == NULL)) {
589 rq->stats.rx_buf_alloc_failure++;
592 rbi->skb->dev = adapter->netdev;
594 skb_reserve(rbi->skb, NET_IP_ALIGN);
595 rbi->dma_addr = pci_map_single(adapter->pdev,
596 rbi->skb->data, rbi->len,
599 /* rx buffer skipped by the device */
601 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
603 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
604 rbi->len != PAGE_SIZE);
606 if (rbi->page == NULL) {
607 rbi->page = alloc_page(GFP_ATOMIC);
608 if (unlikely(rbi->page == NULL)) {
609 rq->stats.rx_buf_alloc_failure++;
612 rbi->dma_addr = pci_map_page(adapter->pdev,
613 rbi->page, 0, PAGE_SIZE,
616 /* rx buffers skipped by the device */
618 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
621 BUG_ON(rbi->dma_addr == 0);
622 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
623 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
626 /* Fill the last buffer but dont mark it ready, or else the
627 * device will think that the queue is full */
628 if (num_allocated == num_to_alloc)
631 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
633 vmxnet3_cmd_ring_adv_next2fill(ring);
635 rq->uncommitted[ring_idx] += num_allocated;
637 dev_dbg(&adapter->netdev->dev,
638 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
639 "%u, uncommited %u\n", num_allocated, ring->next2fill,
640 ring->next2comp, rq->uncommitted[ring_idx]);
642 /* so that the device can distinguish a full ring and an empty ring */
643 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
645 return num_allocated;
650 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
651 struct vmxnet3_rx_buf_info *rbi)
653 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
654 skb_shinfo(skb)->nr_frags;
656 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
658 __skb_frag_set_page(frag, rbi->page);
659 frag->page_offset = 0;
660 skb_frag_size_set(frag, rcd->len);
661 skb->data_len += rcd->len;
662 skb->truesize += PAGE_SIZE;
663 skb_shinfo(skb)->nr_frags++;
668 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
669 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
670 struct vmxnet3_adapter *adapter)
673 unsigned long buf_offset;
675 union Vmxnet3_GenericDesc *gdesc;
676 struct vmxnet3_tx_buf_info *tbi = NULL;
678 BUG_ON(ctx->copy_size > skb_headlen(skb));
680 /* use the previous gen bit for the SOP desc */
681 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
683 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
684 gdesc = ctx->sop_txd; /* both loops below can be skipped */
686 /* no need to map the buffer if headers are copied */
687 if (ctx->copy_size) {
688 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
689 tq->tx_ring.next2fill *
690 sizeof(struct Vmxnet3_TxDataDesc));
691 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
692 ctx->sop_txd->dword[3] = 0;
694 tbi = tq->buf_info + tq->tx_ring.next2fill;
695 tbi->map_type = VMXNET3_MAP_NONE;
697 dev_dbg(&adapter->netdev->dev,
698 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
699 tq->tx_ring.next2fill,
700 le64_to_cpu(ctx->sop_txd->txd.addr),
701 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
702 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
704 /* use the right gen for non-SOP desc */
705 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
708 /* linear part can use multiple tx desc if it's big */
709 len = skb_headlen(skb) - ctx->copy_size;
710 buf_offset = ctx->copy_size;
714 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
718 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
719 /* spec says that for TxDesc.len, 0 == 2^14 */
722 tbi = tq->buf_info + tq->tx_ring.next2fill;
723 tbi->map_type = VMXNET3_MAP_SINGLE;
724 tbi->dma_addr = pci_map_single(adapter->pdev,
725 skb->data + buf_offset, buf_size,
730 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
731 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
733 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
734 gdesc->dword[2] = cpu_to_le32(dw2);
737 dev_dbg(&adapter->netdev->dev,
738 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
739 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
740 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
741 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
742 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
745 buf_offset += buf_size;
748 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
749 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
751 tbi = tq->buf_info + tq->tx_ring.next2fill;
752 tbi->map_type = VMXNET3_MAP_PAGE;
753 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
754 0, skb_frag_size(frag),
757 tbi->len = skb_frag_size(frag);
759 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
760 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
762 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
763 gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
766 dev_dbg(&adapter->netdev->dev,
767 "txd[%u]: 0x%llu %u %u\n",
768 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
769 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
770 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
771 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
774 ctx->eop_txd = gdesc;
776 /* set the last buf_info for the pkt */
778 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
782 /* Init all tx queues */
784 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
788 for (i = 0; i < adapter->num_tx_queues; i++)
789 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
794 * parse and copy relevant protocol headers:
795 * For a tso pkt, relevant headers are L2/3/4 including options
796 * For a pkt requesting csum offloading, they are L2/3 and may include L4
797 * if it's a TCP/UDP pkt
800 * -1: error happens during parsing
801 * 0: protocol headers parsed, but too big to be copied
802 * 1: protocol headers parsed and copied
805 * 1. related *ctx fields are updated.
806 * 2. ctx->copy_size is # of bytes copied
807 * 3. the portion copied is guaranteed to be in the linear part
811 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
812 struct vmxnet3_tx_ctx *ctx,
813 struct vmxnet3_adapter *adapter)
815 struct Vmxnet3_TxDataDesc *tdd;
817 if (ctx->mss) { /* TSO */
818 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
819 ctx->l4_hdr_size = ((struct tcphdr *)
820 skb_transport_header(skb))->doff * 4;
821 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
823 if (skb->ip_summed == CHECKSUM_PARTIAL) {
824 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
827 struct iphdr *iph = (struct iphdr *)
828 skb_network_header(skb);
829 if (iph->protocol == IPPROTO_TCP)
830 ctx->l4_hdr_size = ((struct tcphdr *)
831 skb_transport_header(skb))->doff * 4;
832 else if (iph->protocol == IPPROTO_UDP)
834 sizeof(struct udphdr);
836 ctx->l4_hdr_size = 0;
838 /* for simplicity, don't copy L4 headers */
839 ctx->l4_hdr_size = 0;
841 ctx->copy_size = ctx->eth_ip_hdr_size +
844 ctx->eth_ip_hdr_size = 0;
845 ctx->l4_hdr_size = 0;
846 /* copy as much as allowed */
847 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
851 /* make sure headers are accessible directly */
852 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
856 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
857 tq->stats.oversized_hdr++;
862 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
864 memcpy(tdd->data, skb->data, ctx->copy_size);
865 dev_dbg(&adapter->netdev->dev,
866 "copy %u bytes to dataRing[%u]\n",
867 ctx->copy_size, tq->tx_ring.next2fill);
876 vmxnet3_prepare_tso(struct sk_buff *skb,
877 struct vmxnet3_tx_ctx *ctx)
879 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
881 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
883 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
886 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
887 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
894 * Transmits a pkt thru a given tq
896 * NETDEV_TX_OK: descriptors are setup successfully
897 * NETDEV_TX_OK: error occurred, the pkt is dropped
898 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
901 * 1. tx ring may be changed
902 * 2. tq stats may be updated accordingly
903 * 3. shared->txNumDeferred may be updated
907 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
908 struct vmxnet3_adapter *adapter, struct net_device *netdev)
913 struct vmxnet3_tx_ctx ctx;
914 union Vmxnet3_GenericDesc *gdesc;
915 #ifdef __BIG_ENDIAN_BITFIELD
916 /* Use temporary descriptor to avoid touching bits multiple times */
917 union Vmxnet3_GenericDesc tempTxDesc;
920 /* conservatively estimate # of descriptors to use */
921 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
922 skb_shinfo(skb)->nr_frags + 1;
924 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
926 ctx.mss = skb_shinfo(skb)->gso_size;
928 if (skb_header_cloned(skb)) {
929 if (unlikely(pskb_expand_head(skb, 0, 0,
931 tq->stats.drop_tso++;
934 tq->stats.copy_skb_header++;
936 vmxnet3_prepare_tso(skb, &ctx);
938 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
940 /* non-tso pkts must not use more than
941 * VMXNET3_MAX_TXD_PER_PKT entries
943 if (skb_linearize(skb) != 0) {
944 tq->stats.drop_too_many_frags++;
947 tq->stats.linearized++;
949 /* recalculate the # of descriptors to use */
950 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
954 spin_lock_irqsave(&tq->tx_lock, flags);
956 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
957 tq->stats.tx_ring_full++;
958 dev_dbg(&adapter->netdev->dev,
959 "tx queue stopped on %s, next2comp %u"
960 " next2fill %u\n", adapter->netdev->name,
961 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
963 vmxnet3_tq_stop(tq, adapter);
964 spin_unlock_irqrestore(&tq->tx_lock, flags);
965 return NETDEV_TX_BUSY;
969 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
971 BUG_ON(ret <= 0 && ctx.copy_size != 0);
972 /* hdrs parsed, check against other limits */
974 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
975 VMXNET3_MAX_TX_BUF_SIZE)) {
979 if (skb->ip_summed == CHECKSUM_PARTIAL) {
980 if (unlikely(ctx.eth_ip_hdr_size +
982 VMXNET3_MAX_CSUM_OFFSET)) {
988 tq->stats.drop_hdr_inspect_err++;
989 goto unlock_drop_pkt;
992 /* fill tx descs related to addr & len */
993 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
995 /* setup the EOP desc */
996 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
998 /* setup the SOP desc */
999 #ifdef __BIG_ENDIAN_BITFIELD
1000 gdesc = &tempTxDesc;
1001 gdesc->dword[2] = ctx.sop_txd->dword[2];
1002 gdesc->dword[3] = ctx.sop_txd->dword[3];
1004 gdesc = ctx.sop_txd;
1007 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1008 gdesc->txd.om = VMXNET3_OM_TSO;
1009 gdesc->txd.msscof = ctx.mss;
1010 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1011 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1013 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1014 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1015 gdesc->txd.om = VMXNET3_OM_CSUM;
1016 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1020 gdesc->txd.msscof = 0;
1022 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1025 if (vlan_tx_tag_present(skb)) {
1027 gdesc->txd.tci = vlan_tx_tag_get(skb);
1030 /* finally flips the GEN bit of the SOP desc. */
1031 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1033 #ifdef __BIG_ENDIAN_BITFIELD
1034 /* Finished updating in bitfields of Tx Desc, so write them in original
1037 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1038 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1039 gdesc = ctx.sop_txd;
1041 dev_dbg(&adapter->netdev->dev,
1042 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1043 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
1044 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1045 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1047 spin_unlock_irqrestore(&tq->tx_lock, flags);
1049 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1050 le32_to_cpu(tq->shared->txThreshold)) {
1051 tq->shared->txNumDeferred = 0;
1052 VMXNET3_WRITE_BAR0_REG(adapter,
1053 VMXNET3_REG_TXPROD + tq->qid * 8,
1054 tq->tx_ring.next2fill);
1057 return NETDEV_TX_OK;
1060 tq->stats.drop_oversized_hdr++;
1062 spin_unlock_irqrestore(&tq->tx_lock, flags);
1064 tq->stats.drop_total++;
1066 return NETDEV_TX_OK;
1071 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1073 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1075 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1076 return vmxnet3_tq_xmit(skb,
1077 &adapter->tx_queue[skb->queue_mapping],
1083 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1084 struct sk_buff *skb,
1085 union Vmxnet3_GenericDesc *gdesc)
1087 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1088 /* typical case: TCP/UDP over IP and both csums are correct */
1089 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1090 VMXNET3_RCD_CSUM_OK) {
1091 skb->ip_summed = CHECKSUM_UNNECESSARY;
1092 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1093 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1094 BUG_ON(gdesc->rcd.frg);
1096 if (gdesc->rcd.csum) {
1097 skb->csum = htons(gdesc->rcd.csum);
1098 skb->ip_summed = CHECKSUM_PARTIAL;
1100 skb_checksum_none_assert(skb);
1104 skb_checksum_none_assert(skb);
1110 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1111 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1113 rq->stats.drop_err++;
1115 rq->stats.drop_fcs++;
1117 rq->stats.drop_total++;
1120 * We do not unmap and chain the rx buffer to the skb.
1121 * We basically pretend this buffer is not used and will be recycled
1122 * by vmxnet3_rq_alloc_rx_buf()
1126 * ctx->skb may be NULL if this is the first and the only one
1130 dev_kfree_skb_irq(ctx->skb);
1137 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1138 struct vmxnet3_adapter *adapter, int quota)
1140 static const u32 rxprod_reg[2] = {
1141 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1144 bool skip_page_frags = false;
1145 struct Vmxnet3_RxCompDesc *rcd;
1146 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1147 #ifdef __BIG_ENDIAN_BITFIELD
1148 struct Vmxnet3_RxDesc rxCmdDesc;
1149 struct Vmxnet3_RxCompDesc rxComp;
1151 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1153 while (rcd->gen == rq->comp_ring.gen) {
1154 struct vmxnet3_rx_buf_info *rbi;
1155 struct sk_buff *skb, *new_skb = NULL;
1156 struct page *new_page = NULL;
1158 struct Vmxnet3_RxDesc *rxd;
1160 struct vmxnet3_cmd_ring *ring = NULL;
1161 if (num_rxd >= quota) {
1162 /* we may stop even before we see the EOP desc of
1168 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1170 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1171 ring = rq->rx_ring + ring_idx;
1172 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1174 rbi = rq->buf_info[ring_idx] + idx;
1176 BUG_ON(rxd->addr != rbi->dma_addr ||
1177 rxd->len != rbi->len);
1179 if (unlikely(rcd->eop && rcd->err)) {
1180 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1184 if (rcd->sop) { /* first buf of the pkt */
1185 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1186 rcd->rqID != rq->qid);
1188 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1189 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1191 if (unlikely(rcd->len == 0)) {
1192 /* Pretend the rx buffer is skipped. */
1193 BUG_ON(!(rcd->sop && rcd->eop));
1194 dev_dbg(&adapter->netdev->dev,
1195 "rxRing[%u][%u] 0 length\n",
1200 skip_page_frags = false;
1201 ctx->skb = rbi->skb;
1202 new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
1203 if (new_skb == NULL) {
1204 /* Skb allocation failed, do not handover this
1205 * skb to stack. Reuse it. Drop the existing pkt
1207 rq->stats.rx_buf_alloc_failure++;
1209 rq->stats.drop_total++;
1210 skip_page_frags = true;
1214 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1215 PCI_DMA_FROMDEVICE);
1217 skb_put(ctx->skb, rcd->len);
1219 /* Immediate refill */
1220 new_skb->dev = adapter->netdev;
1221 skb_reserve(new_skb, NET_IP_ALIGN);
1223 rbi->dma_addr = pci_map_single(adapter->pdev,
1224 rbi->skb->data, rbi->len,
1225 PCI_DMA_FROMDEVICE);
1226 rxd->addr = cpu_to_le64(rbi->dma_addr);
1227 rxd->len = rbi->len;
1230 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1232 /* non SOP buffer must be type 1 in most cases */
1233 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1234 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1236 /* If an sop buffer was dropped, skip all
1237 * following non-sop fragments. They will be reused.
1239 if (skip_page_frags)
1242 new_page = alloc_page(GFP_ATOMIC);
1243 if (unlikely(new_page == NULL)) {
1244 /* Replacement page frag could not be allocated.
1245 * Reuse this page. Drop the pkt and free the
1246 * skb which contained this page as a frag. Skip
1247 * processing all the following non-sop frags.
1249 rq->stats.rx_buf_alloc_failure++;
1250 dev_kfree_skb(ctx->skb);
1252 skip_page_frags = true;
1257 pci_unmap_page(adapter->pdev,
1258 rbi->dma_addr, rbi->len,
1259 PCI_DMA_FROMDEVICE);
1261 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1264 /* Immediate refill */
1265 rbi->page = new_page;
1266 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
1268 PCI_DMA_FROMDEVICE);
1269 rxd->addr = cpu_to_le64(rbi->dma_addr);
1270 rxd->len = rbi->len;
1276 skb->len += skb->data_len;
1278 vmxnet3_rx_csum(adapter, skb,
1279 (union Vmxnet3_GenericDesc *)rcd);
1280 skb->protocol = eth_type_trans(skb, adapter->netdev);
1282 if (unlikely(rcd->ts))
1283 __vlan_hwaccel_put_tag(skb, rcd->tci);
1285 if (adapter->netdev->features & NETIF_F_LRO)
1286 netif_receive_skb(skb);
1288 napi_gro_receive(&rq->napi, skb);
1294 /* device may have skipped some rx descs */
1295 ring->next2comp = idx;
1296 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1297 ring = rq->rx_ring + ring_idx;
1298 while (num_to_alloc) {
1299 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1303 /* Recv desc is ready to be used by the device */
1304 rxd->gen = ring->gen;
1305 vmxnet3_cmd_ring_adv_next2fill(ring);
1309 /* if needed, update the register */
1310 if (unlikely(rq->shared->updateRxProd)) {
1311 VMXNET3_WRITE_BAR0_REG(adapter,
1312 rxprod_reg[ring_idx] + rq->qid * 8,
1314 rq->uncommitted[ring_idx] = 0;
1317 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1318 vmxnet3_getRxComp(rcd,
1319 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1327 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1328 struct vmxnet3_adapter *adapter)
1331 struct Vmxnet3_RxDesc *rxd;
1333 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1334 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1335 #ifdef __BIG_ENDIAN_BITFIELD
1336 struct Vmxnet3_RxDesc rxDesc;
1338 vmxnet3_getRxDesc(rxd,
1339 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1341 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1342 rq->buf_info[ring_idx][i].skb) {
1343 pci_unmap_single(adapter->pdev, rxd->addr,
1344 rxd->len, PCI_DMA_FROMDEVICE);
1345 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1346 rq->buf_info[ring_idx][i].skb = NULL;
1347 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1348 rq->buf_info[ring_idx][i].page) {
1349 pci_unmap_page(adapter->pdev, rxd->addr,
1350 rxd->len, PCI_DMA_FROMDEVICE);
1351 put_page(rq->buf_info[ring_idx][i].page);
1352 rq->buf_info[ring_idx][i].page = NULL;
1356 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1357 rq->rx_ring[ring_idx].next2fill =
1358 rq->rx_ring[ring_idx].next2comp = 0;
1359 rq->uncommitted[ring_idx] = 0;
1362 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1363 rq->comp_ring.next2proc = 0;
1368 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1372 for (i = 0; i < adapter->num_rx_queues; i++)
1373 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1377 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1378 struct vmxnet3_adapter *adapter)
1383 /* all rx buffers must have already been freed */
1384 for (i = 0; i < 2; i++) {
1385 if (rq->buf_info[i]) {
1386 for (j = 0; j < rq->rx_ring[i].size; j++)
1387 BUG_ON(rq->buf_info[i][j].page != NULL);
1392 kfree(rq->buf_info[0]);
1394 for (i = 0; i < 2; i++) {
1395 if (rq->rx_ring[i].base) {
1396 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1397 * sizeof(struct Vmxnet3_RxDesc),
1398 rq->rx_ring[i].base,
1399 rq->rx_ring[i].basePA);
1400 rq->rx_ring[i].base = NULL;
1402 rq->buf_info[i] = NULL;
1405 if (rq->comp_ring.base) {
1406 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1407 sizeof(struct Vmxnet3_RxCompDesc),
1408 rq->comp_ring.base, rq->comp_ring.basePA);
1409 rq->comp_ring.base = NULL;
1415 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1416 struct vmxnet3_adapter *adapter)
1420 /* initialize buf_info */
1421 for (i = 0; i < rq->rx_ring[0].size; i++) {
1423 /* 1st buf for a pkt is skbuff */
1424 if (i % adapter->rx_buf_per_pkt == 0) {
1425 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1426 rq->buf_info[0][i].len = adapter->skb_buf_size;
1427 } else { /* subsequent bufs for a pkt is frag */
1428 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1429 rq->buf_info[0][i].len = PAGE_SIZE;
1432 for (i = 0; i < rq->rx_ring[1].size; i++) {
1433 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1434 rq->buf_info[1][i].len = PAGE_SIZE;
1437 /* reset internal state and allocate buffers for both rings */
1438 for (i = 0; i < 2; i++) {
1439 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1440 rq->uncommitted[i] = 0;
1442 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1443 sizeof(struct Vmxnet3_RxDesc));
1444 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1446 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1448 /* at least has 1 rx buffer for the 1st ring */
1451 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1453 /* reset the comp ring */
1454 rq->comp_ring.next2proc = 0;
1455 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1456 sizeof(struct Vmxnet3_RxCompDesc));
1457 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1460 rq->rx_ctx.skb = NULL;
1462 /* stats are not reset */
1468 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1472 for (i = 0; i < adapter->num_rx_queues; i++) {
1473 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1474 if (unlikely(err)) {
1475 dev_err(&adapter->netdev->dev, "%s: failed to "
1476 "initialize rx queue%i\n",
1477 adapter->netdev->name, i);
1487 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1491 struct vmxnet3_rx_buf_info *bi;
1493 for (i = 0; i < 2; i++) {
1495 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1496 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1497 &rq->rx_ring[i].basePA);
1498 if (!rq->rx_ring[i].base) {
1499 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1500 adapter->netdev->name, i);
1505 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1506 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1507 &rq->comp_ring.basePA);
1508 if (!rq->comp_ring.base) {
1509 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1510 adapter->netdev->name);
1514 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1515 rq->rx_ring[1].size);
1516 bi = kzalloc(sz, GFP_KERNEL);
1518 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1519 adapter->netdev->name);
1522 rq->buf_info[0] = bi;
1523 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1528 vmxnet3_rq_destroy(rq, adapter);
1534 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1538 for (i = 0; i < adapter->num_rx_queues; i++) {
1539 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1540 if (unlikely(err)) {
1541 dev_err(&adapter->netdev->dev,
1542 "%s: failed to create rx queue%i\n",
1543 adapter->netdev->name, i);
1549 vmxnet3_rq_destroy_all(adapter);
1554 /* Multiple queue aware polling function for tx and rx */
1557 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1559 int rcd_done = 0, i;
1560 if (unlikely(adapter->shared->ecr))
1561 vmxnet3_process_events(adapter);
1562 for (i = 0; i < adapter->num_tx_queues; i++)
1563 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1565 for (i = 0; i < adapter->num_rx_queues; i++)
1566 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1573 vmxnet3_poll(struct napi_struct *napi, int budget)
1575 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1576 struct vmxnet3_rx_queue, napi);
1579 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1581 if (rxd_done < budget) {
1582 napi_complete(napi);
1583 vmxnet3_enable_all_intrs(rx_queue->adapter);
1589 * NAPI polling function for MSI-X mode with multiple Rx queues
1590 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1594 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1596 struct vmxnet3_rx_queue *rq = container_of(napi,
1597 struct vmxnet3_rx_queue, napi);
1598 struct vmxnet3_adapter *adapter = rq->adapter;
1601 /* When sharing interrupt with corresponding tx queue, process
1602 * tx completions in that queue as well
1604 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1605 struct vmxnet3_tx_queue *tq =
1606 &adapter->tx_queue[rq - adapter->rx_queue];
1607 vmxnet3_tq_tx_complete(tq, adapter);
1610 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1612 if (rxd_done < budget) {
1613 napi_complete(napi);
1614 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1620 #ifdef CONFIG_PCI_MSI
1623 * Handle completion interrupts on tx queues
1624 * Returns whether or not the intr is handled
1628 vmxnet3_msix_tx(int irq, void *data)
1630 struct vmxnet3_tx_queue *tq = data;
1631 struct vmxnet3_adapter *adapter = tq->adapter;
1633 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1634 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1636 /* Handle the case where only one irq is allocate for all tx queues */
1637 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1639 for (i = 0; i < adapter->num_tx_queues; i++) {
1640 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1641 vmxnet3_tq_tx_complete(txq, adapter);
1644 vmxnet3_tq_tx_complete(tq, adapter);
1646 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1653 * Handle completion interrupts on rx queues. Returns whether or not the
1658 vmxnet3_msix_rx(int irq, void *data)
1660 struct vmxnet3_rx_queue *rq = data;
1661 struct vmxnet3_adapter *adapter = rq->adapter;
1663 /* disable intr if needed */
1664 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1665 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1666 napi_schedule(&rq->napi);
1672 *----------------------------------------------------------------------------
1674 * vmxnet3_msix_event --
1676 * vmxnet3 msix event intr handler
1679 * whether or not the intr is handled
1681 *----------------------------------------------------------------------------
1685 vmxnet3_msix_event(int irq, void *data)
1687 struct net_device *dev = data;
1688 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1690 /* disable intr if needed */
1691 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1692 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1694 if (adapter->shared->ecr)
1695 vmxnet3_process_events(adapter);
1697 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1702 #endif /* CONFIG_PCI_MSI */
1705 /* Interrupt handler for vmxnet3 */
1707 vmxnet3_intr(int irq, void *dev_id)
1709 struct net_device *dev = dev_id;
1710 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1712 if (adapter->intr.type == VMXNET3_IT_INTX) {
1713 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1714 if (unlikely(icr == 0))
1720 /* disable intr if needed */
1721 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1722 vmxnet3_disable_all_intrs(adapter);
1724 napi_schedule(&adapter->rx_queue[0].napi);
1729 #ifdef CONFIG_NET_POLL_CONTROLLER
1731 /* netpoll callback. */
1733 vmxnet3_netpoll(struct net_device *netdev)
1735 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1738 switch (adapter->intr.type) {
1739 case VMXNET3_IT_MSIX:
1740 for (i = 0; i < adapter->num_rx_queues; i++)
1741 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
1743 case VMXNET3_IT_MSI:
1745 vmxnet3_intr(0, adapter->netdev);
1750 #endif /* CONFIG_NET_POLL_CONTROLLER */
1753 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1755 struct vmxnet3_intr *intr = &adapter->intr;
1759 #ifdef CONFIG_PCI_MSI
1760 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1761 for (i = 0; i < adapter->num_tx_queues; i++) {
1762 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1763 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1764 adapter->netdev->name, vector);
1766 intr->msix_entries[vector].vector,
1768 adapter->tx_queue[i].name,
1769 &adapter->tx_queue[i]);
1771 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1772 adapter->netdev->name, vector);
1775 dev_err(&adapter->netdev->dev,
1776 "Failed to request irq for MSIX, %s, "
1778 adapter->tx_queue[i].name, err);
1782 /* Handle the case where only 1 MSIx was allocated for
1784 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1785 for (; i < adapter->num_tx_queues; i++)
1786 adapter->tx_queue[i].comp_ring.intr_idx
1791 adapter->tx_queue[i].comp_ring.intr_idx
1795 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1798 for (i = 0; i < adapter->num_rx_queues; i++) {
1799 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1800 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1801 adapter->netdev->name, vector);
1803 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1804 adapter->netdev->name, vector);
1805 err = request_irq(intr->msix_entries[vector].vector,
1807 adapter->rx_queue[i].name,
1808 &(adapter->rx_queue[i]));
1810 printk(KERN_ERR "Failed to request irq for MSIX"
1812 adapter->rx_queue[i].name, err);
1816 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1819 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1820 adapter->netdev->name, vector);
1821 err = request_irq(intr->msix_entries[vector].vector,
1822 vmxnet3_msix_event, 0,
1823 intr->event_msi_vector_name, adapter->netdev);
1824 intr->event_intr_idx = vector;
1826 } else if (intr->type == VMXNET3_IT_MSI) {
1827 adapter->num_rx_queues = 1;
1828 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1829 adapter->netdev->name, adapter->netdev);
1832 adapter->num_rx_queues = 1;
1833 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1834 IRQF_SHARED, adapter->netdev->name,
1836 #ifdef CONFIG_PCI_MSI
1839 intr->num_intrs = vector + 1;
1841 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1842 ":%d\n", adapter->netdev->name, intr->type, err);
1844 /* Number of rx queues will not change after this */
1845 for (i = 0; i < adapter->num_rx_queues; i++) {
1846 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1848 rq->qid2 = i + adapter->num_rx_queues;
1853 /* init our intr settings */
1854 for (i = 0; i < intr->num_intrs; i++)
1855 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1856 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1857 adapter->intr.event_intr_idx = 0;
1858 for (i = 0; i < adapter->num_tx_queues; i++)
1859 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1860 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1863 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1864 "allocated\n", adapter->netdev->name, intr->type,
1865 intr->mask_mode, intr->num_intrs);
1873 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1875 struct vmxnet3_intr *intr = &adapter->intr;
1876 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1878 switch (intr->type) {
1879 #ifdef CONFIG_PCI_MSI
1880 case VMXNET3_IT_MSIX:
1884 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1885 for (i = 0; i < adapter->num_tx_queues; i++) {
1886 free_irq(intr->msix_entries[vector++].vector,
1887 &(adapter->tx_queue[i]));
1888 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1893 for (i = 0; i < adapter->num_rx_queues; i++) {
1894 free_irq(intr->msix_entries[vector++].vector,
1895 &(adapter->rx_queue[i]));
1898 free_irq(intr->msix_entries[vector].vector,
1900 BUG_ON(vector >= intr->num_intrs);
1904 case VMXNET3_IT_MSI:
1905 free_irq(adapter->pdev->irq, adapter->netdev);
1907 case VMXNET3_IT_INTX:
1908 free_irq(adapter->pdev->irq, adapter->netdev);
1917 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1919 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1922 /* allow untagged pkts */
1923 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1925 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1926 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1931 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1933 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1935 if (!(netdev->flags & IFF_PROMISC)) {
1936 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1937 unsigned long flags;
1939 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1940 spin_lock_irqsave(&adapter->cmd_lock, flags);
1941 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1942 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1943 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1946 set_bit(vid, adapter->active_vlans);
1951 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1953 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1955 if (!(netdev->flags & IFF_PROMISC)) {
1956 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1957 unsigned long flags;
1959 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1960 spin_lock_irqsave(&adapter->cmd_lock, flags);
1961 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1962 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1963 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1966 clear_bit(vid, adapter->active_vlans);
1971 vmxnet3_copy_mc(struct net_device *netdev)
1974 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
1976 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1978 /* We may be called with BH disabled */
1979 buf = kmalloc(sz, GFP_ATOMIC);
1981 struct netdev_hw_addr *ha;
1984 netdev_for_each_mc_addr(ha, netdev)
1985 memcpy(buf + i++ * ETH_ALEN, ha->addr,
1994 vmxnet3_set_mc(struct net_device *netdev)
1996 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1997 unsigned long flags;
1998 struct Vmxnet3_RxFilterConf *rxConf =
1999 &adapter->shared->devRead.rxFilterConf;
2000 u8 *new_table = NULL;
2001 u32 new_mode = VMXNET3_RXM_UCAST;
2003 if (netdev->flags & IFF_PROMISC) {
2004 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2005 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2007 new_mode |= VMXNET3_RXM_PROMISC;
2009 vmxnet3_restore_vlan(adapter);
2012 if (netdev->flags & IFF_BROADCAST)
2013 new_mode |= VMXNET3_RXM_BCAST;
2015 if (netdev->flags & IFF_ALLMULTI)
2016 new_mode |= VMXNET3_RXM_ALL_MULTI;
2018 if (!netdev_mc_empty(netdev)) {
2019 new_table = vmxnet3_copy_mc(netdev);
2021 new_mode |= VMXNET3_RXM_MCAST;
2022 rxConf->mfTableLen = cpu_to_le16(
2023 netdev_mc_count(netdev) * ETH_ALEN);
2024 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2027 printk(KERN_INFO "%s: failed to copy mcast list"
2028 ", setting ALL_MULTI\n", netdev->name);
2029 new_mode |= VMXNET3_RXM_ALL_MULTI;
2034 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2035 rxConf->mfTableLen = 0;
2036 rxConf->mfTablePA = 0;
2039 spin_lock_irqsave(&adapter->cmd_lock, flags);
2040 if (new_mode != rxConf->rxMode) {
2041 rxConf->rxMode = cpu_to_le32(new_mode);
2042 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2043 VMXNET3_CMD_UPDATE_RX_MODE);
2044 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2045 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2048 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2049 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2050 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2056 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2060 for (i = 0; i < adapter->num_rx_queues; i++)
2061 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2066 * Set up driver_shared based on settings in adapter.
2070 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2072 struct Vmxnet3_DriverShared *shared = adapter->shared;
2073 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2074 struct Vmxnet3_TxQueueConf *tqc;
2075 struct Vmxnet3_RxQueueConf *rqc;
2078 memset(shared, 0, sizeof(*shared));
2080 /* driver settings */
2081 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2082 devRead->misc.driverInfo.version = cpu_to_le32(
2083 VMXNET3_DRIVER_VERSION_NUM);
2084 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2085 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2086 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2087 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2088 *((u32 *)&devRead->misc.driverInfo.gos));
2089 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2090 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2092 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2093 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2095 /* set up feature flags */
2096 if (adapter->netdev->features & NETIF_F_RXCSUM)
2097 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2099 if (adapter->netdev->features & NETIF_F_LRO) {
2100 devRead->misc.uptFeatures |= UPT1_F_LRO;
2101 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2103 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2104 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2106 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2107 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2108 devRead->misc.queueDescLen = cpu_to_le32(
2109 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2110 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2112 /* tx queue settings */
2113 devRead->misc.numTxQueues = adapter->num_tx_queues;
2114 for (i = 0; i < adapter->num_tx_queues; i++) {
2115 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2116 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2117 tqc = &adapter->tqd_start[i].conf;
2118 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2119 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2120 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2121 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
2122 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2123 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2124 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2125 tqc->ddLen = cpu_to_le32(
2126 sizeof(struct vmxnet3_tx_buf_info) *
2128 tqc->intrIdx = tq->comp_ring.intr_idx;
2131 /* rx queue settings */
2132 devRead->misc.numRxQueues = adapter->num_rx_queues;
2133 for (i = 0; i < adapter->num_rx_queues; i++) {
2134 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2135 rqc = &adapter->rqd_start[i].conf;
2136 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2137 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2138 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2139 rqc->ddPA = cpu_to_le64(virt_to_phys(
2141 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2142 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2143 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2144 rqc->ddLen = cpu_to_le32(
2145 sizeof(struct vmxnet3_rx_buf_info) *
2146 (rqc->rxRingSize[0] +
2147 rqc->rxRingSize[1]));
2148 rqc->intrIdx = rq->comp_ring.intr_idx;
2152 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2155 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2156 devRead->misc.uptFeatures |= UPT1_F_RSS;
2157 devRead->misc.numRxQueues = adapter->num_rx_queues;
2158 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2159 UPT1_RSS_HASH_TYPE_IPV4 |
2160 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2161 UPT1_RSS_HASH_TYPE_IPV6;
2162 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2163 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2164 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2165 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2166 for (i = 0; i < rssConf->indTableSize; i++)
2167 rssConf->indTable[i] = i % adapter->num_rx_queues;
2169 devRead->rssConfDesc.confVer = 1;
2170 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2171 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2174 #endif /* VMXNET3_RSS */
2177 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2179 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2180 for (i = 0; i < adapter->intr.num_intrs; i++)
2181 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2183 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2184 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2186 /* rx filter settings */
2187 devRead->rxFilterConf.rxMode = 0;
2188 vmxnet3_restore_vlan(adapter);
2189 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2191 /* the rest are already zeroed */
2196 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2200 unsigned long flags;
2202 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2203 " ring sizes %u %u %u\n", adapter->netdev->name,
2204 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2205 adapter->tx_queue[0].tx_ring.size,
2206 adapter->rx_queue[0].rx_ring[0].size,
2207 adapter->rx_queue[0].rx_ring[1].size);
2209 vmxnet3_tq_init_all(adapter);
2210 err = vmxnet3_rq_init_all(adapter);
2212 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
2213 adapter->netdev->name, err);
2217 err = vmxnet3_request_irqs(adapter);
2219 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
2220 adapter->netdev->name, err);
2224 vmxnet3_setup_driver_shared(adapter);
2226 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2227 adapter->shared_pa));
2228 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2229 adapter->shared_pa));
2230 spin_lock_irqsave(&adapter->cmd_lock, flags);
2231 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2232 VMXNET3_CMD_ACTIVATE_DEV);
2233 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2234 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2237 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
2238 adapter->netdev->name, ret);
2243 for (i = 0; i < adapter->num_rx_queues; i++) {
2244 VMXNET3_WRITE_BAR0_REG(adapter,
2245 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2246 adapter->rx_queue[i].rx_ring[0].next2fill);
2247 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2248 (i * VMXNET3_REG_ALIGN)),
2249 adapter->rx_queue[i].rx_ring[1].next2fill);
2252 /* Apply the rx filter settins last. */
2253 vmxnet3_set_mc(adapter->netdev);
2256 * Check link state when first activating device. It will start the
2257 * tx queue if the link is up.
2259 vmxnet3_check_link(adapter, true);
2260 for (i = 0; i < adapter->num_rx_queues; i++)
2261 napi_enable(&adapter->rx_queue[i].napi);
2262 vmxnet3_enable_all_intrs(adapter);
2263 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2267 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2268 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2269 vmxnet3_free_irqs(adapter);
2272 /* free up buffers we allocated */
2273 vmxnet3_rq_cleanup_all(adapter);
2279 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2281 unsigned long flags;
2282 spin_lock_irqsave(&adapter->cmd_lock, flags);
2283 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2284 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2289 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2292 unsigned long flags;
2293 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2297 spin_lock_irqsave(&adapter->cmd_lock, flags);
2298 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2299 VMXNET3_CMD_QUIESCE_DEV);
2300 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2301 vmxnet3_disable_all_intrs(adapter);
2303 for (i = 0; i < adapter->num_rx_queues; i++)
2304 napi_disable(&adapter->rx_queue[i].napi);
2305 netif_tx_disable(adapter->netdev);
2306 adapter->link_speed = 0;
2307 netif_carrier_off(adapter->netdev);
2309 vmxnet3_tq_cleanup_all(adapter);
2310 vmxnet3_rq_cleanup_all(adapter);
2311 vmxnet3_free_irqs(adapter);
2317 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2322 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2324 tmp = (mac[5] << 8) | mac[4];
2325 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2330 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2332 struct sockaddr *addr = p;
2333 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2335 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2336 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2342 /* ==================== initialization and cleanup routines ============ */
2345 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2348 unsigned long mmio_start, mmio_len;
2349 struct pci_dev *pdev = adapter->pdev;
2351 err = pci_enable_device(pdev);
2353 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
2354 pci_name(pdev), err);
2358 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2359 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2360 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
2361 "for adapter %s\n", pci_name(pdev));
2367 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2368 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
2369 "%s\n", pci_name(pdev));
2376 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2377 vmxnet3_driver_name);
2379 printk(KERN_ERR "Failed to request region for adapter %s: "
2380 "error %d\n", pci_name(pdev), err);
2384 pci_set_master(pdev);
2386 mmio_start = pci_resource_start(pdev, 0);
2387 mmio_len = pci_resource_len(pdev, 0);
2388 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2389 if (!adapter->hw_addr0) {
2390 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
2396 mmio_start = pci_resource_start(pdev, 1);
2397 mmio_len = pci_resource_len(pdev, 1);
2398 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2399 if (!adapter->hw_addr1) {
2400 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
2408 iounmap(adapter->hw_addr0);
2410 pci_release_selected_regions(pdev, (1 << 2) - 1);
2412 pci_disable_device(pdev);
2418 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2420 BUG_ON(!adapter->pdev);
2422 iounmap(adapter->hw_addr0);
2423 iounmap(adapter->hw_addr1);
2424 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2425 pci_disable_device(adapter->pdev);
2430 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2432 size_t sz, i, ring0_size, ring1_size, comp_size;
2433 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2436 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2437 VMXNET3_MAX_ETH_HDR_SIZE) {
2438 adapter->skb_buf_size = adapter->netdev->mtu +
2439 VMXNET3_MAX_ETH_HDR_SIZE;
2440 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2441 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2443 adapter->rx_buf_per_pkt = 1;
2445 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2446 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2447 VMXNET3_MAX_ETH_HDR_SIZE;
2448 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2452 * for simplicity, force the ring0 size to be a multiple of
2453 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2455 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2456 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2457 ring0_size = (ring0_size + sz - 1) / sz * sz;
2458 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2460 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2461 comp_size = ring0_size + ring1_size;
2463 for (i = 0; i < adapter->num_rx_queues; i++) {
2464 rq = &adapter->rx_queue[i];
2465 rq->rx_ring[0].size = ring0_size;
2466 rq->rx_ring[1].size = ring1_size;
2467 rq->comp_ring.size = comp_size;
2473 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2474 u32 rx_ring_size, u32 rx_ring2_size)
2478 for (i = 0; i < adapter->num_tx_queues; i++) {
2479 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2480 tq->tx_ring.size = tx_ring_size;
2481 tq->data_ring.size = tx_ring_size;
2482 tq->comp_ring.size = tx_ring_size;
2483 tq->shared = &adapter->tqd_start[i].ctrl;
2485 tq->adapter = adapter;
2487 err = vmxnet3_tq_create(tq, adapter);
2489 * Too late to change num_tx_queues. We cannot do away with
2490 * lesser number of queues than what we asked for
2496 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2497 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2498 vmxnet3_adjust_rx_ring_size(adapter);
2499 for (i = 0; i < adapter->num_rx_queues; i++) {
2500 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2501 /* qid and qid2 for rx queues will be assigned later when num
2502 * of rx queues is finalized after allocating intrs */
2503 rq->shared = &adapter->rqd_start[i].ctrl;
2504 rq->adapter = adapter;
2505 err = vmxnet3_rq_create(rq, adapter);
2508 printk(KERN_ERR "Could not allocate any rx"
2509 "queues. Aborting.\n");
2512 printk(KERN_INFO "Number of rx queues changed "
2514 adapter->num_rx_queues = i;
2522 vmxnet3_tq_destroy_all(adapter);
2527 vmxnet3_open(struct net_device *netdev)
2529 struct vmxnet3_adapter *adapter;
2532 adapter = netdev_priv(netdev);
2534 for (i = 0; i < adapter->num_tx_queues; i++)
2535 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2537 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2538 VMXNET3_DEF_RX_RING_SIZE,
2539 VMXNET3_DEF_RX_RING_SIZE);
2543 err = vmxnet3_activate_dev(adapter);
2550 vmxnet3_rq_destroy_all(adapter);
2551 vmxnet3_tq_destroy_all(adapter);
2558 vmxnet3_close(struct net_device *netdev)
2560 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2563 * Reset_work may be in the middle of resetting the device, wait for its
2566 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2569 vmxnet3_quiesce_dev(adapter);
2571 vmxnet3_rq_destroy_all(adapter);
2572 vmxnet3_tq_destroy_all(adapter);
2574 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2582 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2587 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2588 * vmxnet3_close() will deadlock.
2590 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2592 /* we need to enable NAPI, otherwise dev_close will deadlock */
2593 for (i = 0; i < adapter->num_rx_queues; i++)
2594 napi_enable(&adapter->rx_queue[i].napi);
2595 dev_close(adapter->netdev);
2600 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2602 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2605 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2608 netdev->mtu = new_mtu;
2611 * Reset_work may be in the middle of resetting the device, wait for its
2614 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2617 if (netif_running(netdev)) {
2618 vmxnet3_quiesce_dev(adapter);
2619 vmxnet3_reset_dev(adapter);
2621 /* we need to re-create the rx queue based on the new mtu */
2622 vmxnet3_rq_destroy_all(adapter);
2623 vmxnet3_adjust_rx_ring_size(adapter);
2624 err = vmxnet3_rq_create_all(adapter);
2626 printk(KERN_ERR "%s: failed to re-create rx queues,"
2627 " error %d. Closing it.\n", netdev->name, err);
2631 err = vmxnet3_activate_dev(adapter);
2633 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2634 "Closing it\n", netdev->name, err);
2640 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2642 vmxnet3_force_close(adapter);
2649 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2651 struct net_device *netdev = adapter->netdev;
2653 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2654 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
2655 NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
2658 netdev->hw_features |= NETIF_F_HIGHDMA;
2659 netdev->vlan_features = netdev->hw_features &
2660 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2661 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
2663 netdev_info(adapter->netdev,
2664 "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
2665 dma64 ? " highDMA" : "");
2670 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2674 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2677 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2678 mac[4] = tmp & 0xff;
2679 mac[5] = (tmp >> 8) & 0xff;
2682 #ifdef CONFIG_PCI_MSI
2685 * Enable MSIx vectors.
2687 * 0 on successful enabling of required vectors,
2688 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2690 * number of vectors which can be enabled otherwise (this number is smaller
2691 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2695 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2698 int err = 0, vector_threshold;
2699 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2701 while (vectors >= vector_threshold) {
2702 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2705 adapter->intr.num_intrs = vectors;
2707 } else if (err < 0) {
2708 printk(KERN_ERR "Failed to enable MSI-X for %s, error"
2709 " %d\n", adapter->netdev->name, err);
2711 } else if (err < vector_threshold) {
2714 /* If fails to enable required number of MSI-x vectors
2715 * try enabling minimum number of vectors required.
2717 vectors = vector_threshold;
2718 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
2719 " %d instead\n", vectors, adapter->netdev->name,
2724 printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
2725 " are lower than min threshold required.\n");
2730 #endif /* CONFIG_PCI_MSI */
2733 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2736 unsigned long flags;
2739 spin_lock_irqsave(&adapter->cmd_lock, flags);
2740 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2741 VMXNET3_CMD_GET_CONF_INTR);
2742 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2743 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2744 adapter->intr.type = cfg & 0x3;
2745 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2747 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2748 adapter->intr.type = VMXNET3_IT_MSIX;
2751 #ifdef CONFIG_PCI_MSI
2752 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2753 int vector, err = 0;
2755 adapter->intr.num_intrs = (adapter->share_intr ==
2756 VMXNET3_INTR_TXSHARE) ? 1 :
2757 adapter->num_tx_queues;
2758 adapter->intr.num_intrs += (adapter->share_intr ==
2759 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2760 adapter->num_rx_queues;
2761 adapter->intr.num_intrs += 1; /* for link event */
2763 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2764 VMXNET3_LINUX_MIN_MSIX_VECT
2765 ? adapter->intr.num_intrs :
2766 VMXNET3_LINUX_MIN_MSIX_VECT);
2768 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2769 adapter->intr.msix_entries[vector].entry = vector;
2771 err = vmxnet3_acquire_msix_vectors(adapter,
2772 adapter->intr.num_intrs);
2773 /* If we cannot allocate one MSIx vector per queue
2774 * then limit the number of rx queues to 1
2776 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2777 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2778 || adapter->num_rx_queues != 1) {
2779 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2780 printk(KERN_ERR "Number of rx queues : 1\n");
2781 adapter->num_rx_queues = 1;
2782 adapter->intr.num_intrs =
2783 VMXNET3_LINUX_MIN_MSIX_VECT;
2790 /* If we cannot allocate MSIx vectors use only one rx queue */
2791 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
2792 "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
2794 adapter->intr.type = VMXNET3_IT_MSI;
2797 if (adapter->intr.type == VMXNET3_IT_MSI) {
2799 err = pci_enable_msi(adapter->pdev);
2801 adapter->num_rx_queues = 1;
2802 adapter->intr.num_intrs = 1;
2806 #endif /* CONFIG_PCI_MSI */
2808 adapter->num_rx_queues = 1;
2809 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
2810 adapter->intr.type = VMXNET3_IT_INTX;
2812 /* INT-X related setting */
2813 adapter->intr.num_intrs = 1;
2818 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2820 if (adapter->intr.type == VMXNET3_IT_MSIX)
2821 pci_disable_msix(adapter->pdev);
2822 else if (adapter->intr.type == VMXNET3_IT_MSI)
2823 pci_disable_msi(adapter->pdev);
2825 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2830 vmxnet3_tx_timeout(struct net_device *netdev)
2832 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2833 adapter->tx_timeout_count++;
2835 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2836 schedule_work(&adapter->work);
2837 netif_wake_queue(adapter->netdev);
2842 vmxnet3_reset_work(struct work_struct *data)
2844 struct vmxnet3_adapter *adapter;
2846 adapter = container_of(data, struct vmxnet3_adapter, work);
2848 /* if another thread is resetting the device, no need to proceed */
2849 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2852 /* if the device is closed, we must leave it alone */
2854 if (netif_running(adapter->netdev)) {
2855 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2856 vmxnet3_quiesce_dev(adapter);
2857 vmxnet3_reset_dev(adapter);
2858 vmxnet3_activate_dev(adapter);
2860 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2864 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2868 static int __devinit
2869 vmxnet3_probe_device(struct pci_dev *pdev,
2870 const struct pci_device_id *id)
2872 static const struct net_device_ops vmxnet3_netdev_ops = {
2873 .ndo_open = vmxnet3_open,
2874 .ndo_stop = vmxnet3_close,
2875 .ndo_start_xmit = vmxnet3_xmit_frame,
2876 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2877 .ndo_change_mtu = vmxnet3_change_mtu,
2878 .ndo_set_features = vmxnet3_set_features,
2879 .ndo_get_stats64 = vmxnet3_get_stats64,
2880 .ndo_tx_timeout = vmxnet3_tx_timeout,
2881 .ndo_set_rx_mode = vmxnet3_set_mc,
2882 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2883 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2884 #ifdef CONFIG_NET_POLL_CONTROLLER
2885 .ndo_poll_controller = vmxnet3_netpoll,
2889 bool dma64 = false; /* stupid gcc */
2891 struct net_device *netdev;
2892 struct vmxnet3_adapter *adapter;
2898 if (!pci_msi_enabled())
2903 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2904 (int)num_online_cpus());
2908 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
2911 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2912 (int)num_online_cpus());
2916 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
2917 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2918 max(num_tx_queues, num_rx_queues));
2919 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2920 num_tx_queues, num_rx_queues);
2923 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2924 "%s\n", pci_name(pdev));
2928 pci_set_drvdata(pdev, netdev);
2929 adapter = netdev_priv(netdev);
2930 adapter->netdev = netdev;
2931 adapter->pdev = pdev;
2933 spin_lock_init(&adapter->cmd_lock);
2934 adapter->shared = pci_alloc_consistent(adapter->pdev,
2935 sizeof(struct Vmxnet3_DriverShared),
2936 &adapter->shared_pa);
2937 if (!adapter->shared) {
2938 printk(KERN_ERR "Failed to allocate memory for %s\n",
2941 goto err_alloc_shared;
2944 adapter->num_rx_queues = num_rx_queues;
2945 adapter->num_tx_queues = num_tx_queues;
2947 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2948 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2949 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2950 &adapter->queue_desc_pa);
2952 if (!adapter->tqd_start) {
2953 printk(KERN_ERR "Failed to allocate memory for %s\n",
2956 goto err_alloc_queue_desc;
2958 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2959 adapter->num_tx_queues);
2961 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2962 if (adapter->pm_conf == NULL) {
2963 printk(KERN_ERR "Failed to allocate memory for %s\n",
2971 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2972 if (adapter->rss_conf == NULL) {
2973 printk(KERN_ERR "Failed to allocate memory for %s\n",
2978 #endif /* VMXNET3_RSS */
2980 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2984 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2986 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2988 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2989 " %s\n", ver, pci_name(pdev));
2994 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2996 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2998 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2999 "adapter %s\n", ver, pci_name(pdev));
3004 SET_NETDEV_DEV(netdev, &pdev->dev);
3005 vmxnet3_declare_features(adapter, dma64);
3007 adapter->dev_number = atomic_read(&devices_found);
3009 adapter->share_intr = irq_share_mode;
3010 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
3011 adapter->num_tx_queues != adapter->num_rx_queues)
3012 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3014 vmxnet3_alloc_intr_resources(adapter);
3017 if (adapter->num_rx_queues > 1 &&
3018 adapter->intr.type == VMXNET3_IT_MSIX) {
3019 adapter->rss = true;
3020 printk(KERN_INFO "RSS is enabled.\n");
3022 adapter->rss = false;
3026 vmxnet3_read_mac_addr(adapter, mac);
3027 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3029 netdev->netdev_ops = &vmxnet3_netdev_ops;
3030 vmxnet3_set_ethtool_ops(netdev);
3031 netdev->watchdog_timeo = 5 * HZ;
3033 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3035 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3037 for (i = 0; i < adapter->num_rx_queues; i++) {
3038 netif_napi_add(adapter->netdev,
3039 &adapter->rx_queue[i].napi,
3040 vmxnet3_poll_rx_only, 64);
3043 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3047 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3048 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3050 err = register_netdev(netdev);
3053 printk(KERN_ERR "Failed to register adapter %s\n",
3058 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3059 vmxnet3_check_link(adapter, false);
3060 atomic_inc(&devices_found);
3064 vmxnet3_free_intr_resources(adapter);
3066 vmxnet3_free_pci_resources(adapter);
3069 kfree(adapter->rss_conf);
3072 kfree(adapter->pm_conf);
3074 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3075 adapter->queue_desc_pa);
3076 err_alloc_queue_desc:
3077 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3078 adapter->shared, adapter->shared_pa);
3080 pci_set_drvdata(pdev, NULL);
3081 free_netdev(netdev);
3086 static void __devexit
3087 vmxnet3_remove_device(struct pci_dev *pdev)
3089 struct net_device *netdev = pci_get_drvdata(pdev);
3090 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3096 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3097 (int)num_online_cpus());
3101 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3103 cancel_work_sync(&adapter->work);
3105 unregister_netdev(netdev);
3107 vmxnet3_free_intr_resources(adapter);
3108 vmxnet3_free_pci_resources(adapter);
3110 kfree(adapter->rss_conf);
3112 kfree(adapter->pm_conf);
3114 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3115 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3116 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3117 adapter->queue_desc_pa);
3118 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3119 adapter->shared, adapter->shared_pa);
3120 free_netdev(netdev);
3127 vmxnet3_suspend(struct device *device)
3129 struct pci_dev *pdev = to_pci_dev(device);
3130 struct net_device *netdev = pci_get_drvdata(pdev);
3131 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3132 struct Vmxnet3_PMConf *pmConf;
3133 struct ethhdr *ehdr;
3134 struct arphdr *ahdr;
3136 struct in_device *in_dev;
3137 struct in_ifaddr *ifa;
3138 unsigned long flags;
3141 if (!netif_running(netdev))
3144 for (i = 0; i < adapter->num_rx_queues; i++)
3145 napi_disable(&adapter->rx_queue[i].napi);
3147 vmxnet3_disable_all_intrs(adapter);
3148 vmxnet3_free_irqs(adapter);
3149 vmxnet3_free_intr_resources(adapter);
3151 netif_device_detach(netdev);
3152 netif_tx_stop_all_queues(netdev);
3154 /* Create wake-up filters. */
3155 pmConf = adapter->pm_conf;
3156 memset(pmConf, 0, sizeof(*pmConf));
3158 if (adapter->wol & WAKE_UCAST) {
3159 pmConf->filters[i].patternSize = ETH_ALEN;
3160 pmConf->filters[i].maskSize = 1;
3161 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3162 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3164 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3168 if (adapter->wol & WAKE_ARP) {
3169 in_dev = in_dev_get(netdev);
3173 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3177 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3178 sizeof(struct arphdr) + /* ARP header */
3179 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3180 2 * sizeof(u32); /*2 IPv4 addresses */
3181 pmConf->filters[i].maskSize =
3182 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3184 /* ETH_P_ARP in Ethernet header. */
3185 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3186 ehdr->h_proto = htons(ETH_P_ARP);
3188 /* ARPOP_REQUEST in ARP header. */
3189 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3190 ahdr->ar_op = htons(ARPOP_REQUEST);
3191 arpreq = (u8 *)(ahdr + 1);
3193 /* The Unicast IPv4 address in 'tip' field. */
3194 arpreq += 2 * ETH_ALEN + sizeof(u32);
3195 *(u32 *)arpreq = ifa->ifa_address;
3197 /* The mask for the relevant bits. */
3198 pmConf->filters[i].mask[0] = 0x00;
3199 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3200 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3201 pmConf->filters[i].mask[3] = 0x00;
3202 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3203 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3206 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3211 if (adapter->wol & WAKE_MAGIC)
3212 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3214 pmConf->numFilters = i;
3216 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3217 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3219 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3222 spin_lock_irqsave(&adapter->cmd_lock, flags);
3223 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3224 VMXNET3_CMD_UPDATE_PMCFG);
3225 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3227 pci_save_state(pdev);
3228 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3230 pci_disable_device(pdev);
3231 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3238 vmxnet3_resume(struct device *device)
3241 unsigned long flags;
3242 struct pci_dev *pdev = to_pci_dev(device);
3243 struct net_device *netdev = pci_get_drvdata(pdev);
3244 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3245 struct Vmxnet3_PMConf *pmConf;
3247 if (!netif_running(netdev))
3250 /* Destroy wake-up filters. */
3251 pmConf = adapter->pm_conf;
3252 memset(pmConf, 0, sizeof(*pmConf));
3254 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3255 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3257 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3260 netif_device_attach(netdev);
3261 pci_set_power_state(pdev, PCI_D0);
3262 pci_restore_state(pdev);
3263 err = pci_enable_device_mem(pdev);
3267 pci_enable_wake(pdev, PCI_D0, 0);
3269 spin_lock_irqsave(&adapter->cmd_lock, flags);
3270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3271 VMXNET3_CMD_UPDATE_PMCFG);
3272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3273 vmxnet3_alloc_intr_resources(adapter);
3274 vmxnet3_request_irqs(adapter);
3275 for (i = 0; i < adapter->num_rx_queues; i++)
3276 napi_enable(&adapter->rx_queue[i].napi);
3277 vmxnet3_enable_all_intrs(adapter);
3282 static const struct dev_pm_ops vmxnet3_pm_ops = {
3283 .suspend = vmxnet3_suspend,
3284 .resume = vmxnet3_resume,
3288 static struct pci_driver vmxnet3_driver = {
3289 .name = vmxnet3_driver_name,
3290 .id_table = vmxnet3_pciid_table,
3291 .probe = vmxnet3_probe_device,
3292 .remove = __devexit_p(vmxnet3_remove_device),
3294 .driver.pm = &vmxnet3_pm_ops,
3300 vmxnet3_init_module(void)
3302 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
3303 VMXNET3_DRIVER_VERSION_REPORT);
3304 return pci_register_driver(&vmxnet3_driver);
3307 module_init(vmxnet3_init_module);
3311 vmxnet3_exit_module(void)
3313 pci_unregister_driver(&vmxnet3_driver);
3316 module_exit(vmxnet3_exit_module);
3318 MODULE_AUTHOR("VMware, Inc.");
3319 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3320 MODULE_LICENSE("GPL v2");
3321 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);