3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
75 #include <linux/delay.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et131x_adapter.h"
88 #include "et1310_tx.h"
92 * et131x_tx_dma_memory_alloc
93 * @adapter: pointer to our private adapter structure
95 * Returns 0 on success and errno on failure (as defined in errno.h).
97 * Allocates memory that will be visible both to the device and to the CPU.
98 * The OS will pass us packets, pointers to which we will insert in the Tx
99 * Descriptor queue. The device will read this queue to find the packets in
100 * memory. The device will update the "status" in memory each time it xmits a
103 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
106 struct tx_ring *tx_ring = &adapter->tx_ring;
108 /* Allocate memory for the TCB's (Transmit Control Block) */
109 adapter->tx_ring.tcb_ring =
110 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
111 if (!adapter->tx_ring.tcb_ring) {
112 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
116 /* Allocate enough memory for the Tx descriptor ring, and allocate
117 * some extra so that the ring can be aligned on a 4k boundary.
119 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
120 tx_ring->tx_desc_ring =
121 (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
122 &tx_ring->tx_desc_ring_pa);
123 if (!adapter->tx_ring.tx_desc_ring) {
124 dev_err(&adapter->pdev->dev,
125 "Cannot alloc memory for Tx Ring\n");
129 /* Save physical address
131 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
132 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
133 * are ever returned, make sure the high part is retrieved here before
134 * storing the adjusted address.
136 /* Allocate memory for the Tx status block */
137 tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
139 &tx_ring->tx_status_pa);
140 if (!adapter->tx_ring.tx_status_pa) {
141 dev_err(&adapter->pdev->dev,
142 "Cannot alloc memory for Tx status block\n");
149 * et131x_tx_dma_memory_free - Free all memory allocated within this module
150 * @adapter: pointer to our private adapter structure
152 * Returns 0 on success and errno on failure (as defined in errno.h).
154 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
158 if (adapter->tx_ring.tx_desc_ring) {
159 /* Free memory relating to Tx rings here */
160 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
162 pci_free_consistent(adapter->pdev,
164 adapter->tx_ring.tx_desc_ring,
165 adapter->tx_ring.tx_desc_ring_pa);
166 adapter->tx_ring.tx_desc_ring = NULL;
169 /* Free memory for the Tx status block */
170 if (adapter->tx_ring.tx_status) {
171 pci_free_consistent(adapter->pdev,
173 adapter->tx_ring.tx_status,
174 adapter->tx_ring.tx_status_pa);
176 adapter->tx_ring.tx_status = NULL;
178 /* Free the memory for the tcb structures */
179 kfree(adapter->tx_ring.tcb_ring);
183 * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
184 * @adapter: pointer to our private adapter structure
186 * Configure the transmit engine with the ring buffers we have created
187 * and prepare it for use.
189 void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
191 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
193 /* Load the hardware with the start of the transmit descriptor ring. */
194 writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
196 writel((u32) adapter->tx_ring.tx_desc_ring_pa,
199 /* Initialise the transmit DMA engine */
200 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
202 /* Load the completion writeback physical address */
203 writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
204 &txdma->dma_wb_base_hi);
205 writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
207 *adapter->tx_ring.tx_status = 0;
209 writel(0, &txdma->service_request);
210 adapter->tx_ring.send_idx = 0;
214 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
215 * @adapter: pointer to our adapter structure
217 void et131x_tx_dma_disable(struct et131x_adapter *adapter)
219 /* Setup the tramsmit dma configuration register */
220 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
221 &adapter->regs->txdma.csr);
225 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
226 * @adapter: pointer to our adapter structure
228 * Mainly used after a return to the D0 (full-power) state from a lower state.
230 void et131x_tx_dma_enable(struct et131x_adapter *adapter)
232 /* Setup the transmit dma configuration register for normal
235 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
236 &adapter->regs->txdma.csr);
240 * et131x_init_send - Initialize send data structures
241 * @adapter: pointer to our private adapter structure
243 void et131x_init_send(struct et131x_adapter *adapter)
247 struct tx_ring *tx_ring;
249 /* Setup some convenience pointers */
250 tx_ring = &adapter->tx_ring;
251 tcb = adapter->tx_ring.tcb_ring;
253 tx_ring->tcb_qhead = tcb;
255 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
257 /* Go through and set up each TCB */
258 for (ct = 0; ct++ < NUM_TCB; tcb++)
259 /* Set the link pointer in HW TCB to the next TCB in the
264 /* Set the tail pointer */
266 tx_ring->tcb_qtail = tcb;
268 /* Curr send queue should now be empty */
269 tx_ring->send_head = NULL;
270 tx_ring->send_tail = NULL;
274 * nic_send_packet - NIC specific send handler for version B silicon.
275 * @adapter: pointer to our adapter
276 * @tcb: pointer to struct tcb
278 * Returns 0 or errno.
280 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
283 struct tx_desc desc[24]; /* 24 x 16 byte */
285 u32 thiscopy, remainder;
286 struct sk_buff *skb = tcb->skb;
287 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
288 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
291 /* Part of the optimizations of this send routine restrict us to
292 * sending 24 fragments at a pass. In practice we should never see
293 * more than 5 fragments.
295 * NOTE: The older version of this function (below) can handle any
296 * number of fragments. If needed, we can call this function,
297 * although it is less efficient.
302 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
304 for (i = 0; i < nr_frags; i++) {
305 /* If there is something in this element, lets get a
306 * descriptor from the ring and get the necessary data
309 /* If the fragments are smaller than a standard MTU,
310 * then map them to a single descriptor in the Tx
311 * Desc ring. However, if they're larger, as is
312 * possible with support for jumbo packets, then
313 * split them each across 2 descriptors.
315 * This will work until we determine why the hardware
316 * doesn't seem to like large fragments.
318 if ((skb->len - skb->data_len) <= 1514) {
319 desc[frag].addr_hi = 0;
320 /* Low 16bits are length, high is vlan and
321 unused currently so zero */
322 desc[frag].len_vlan =
323 skb->len - skb->data_len;
325 /* NOTE: Here, the dma_addr_t returned from
326 * pci_map_single() is implicitly cast as a
327 * u32. Although dma_addr_t can be
328 * 64-bit, the address returned by
329 * pci_map_single() is always 32-bit
330 * addressable (as defined by the pci/dma
333 desc[frag++].addr_lo =
334 pci_map_single(adapter->pdev,
340 desc[frag].addr_hi = 0;
341 desc[frag].len_vlan =
342 (skb->len - skb->data_len) / 2;
344 /* NOTE: Here, the dma_addr_t returned from
345 * pci_map_single() is implicitly cast as a
346 * u32. Although dma_addr_t can be
347 * 64-bit, the address returned by
348 * pci_map_single() is always 32-bit
349 * addressable (as defined by the pci/dma
352 desc[frag++].addr_lo =
353 pci_map_single(adapter->pdev,
358 desc[frag].addr_hi = 0;
360 desc[frag].len_vlan =
361 (skb->len - skb->data_len) / 2;
363 /* NOTE: Here, the dma_addr_t returned from
364 * pci_map_single() is implicitly cast as a
365 * u32. Although dma_addr_t can be
366 * 64-bit, the address returned by
367 * pci_map_single() is always 32-bit
368 * addressable (as defined by the pci/dma
371 desc[frag++].addr_lo =
372 pci_map_single(adapter->pdev,
381 desc[frag].addr_hi = 0;
382 desc[frag].len_vlan =
385 /* NOTE: Here, the dma_addr_t returned from
386 * pci_map_page() is implicitly cast as a u32.
387 * Although dma_addr_t can be 64-bit, the address
388 * returned by pci_map_page() is always 32-bit
389 * addressable (as defined by the pci/dma subsystem)
391 desc[frag++].addr_lo =
392 pci_map_page(adapter->pdev,
394 frags[i - 1].page_offset,
403 if (adapter->linkspeed == TRUEPHY_SPEED_1000MBPS) {
404 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
405 /* Last element & Interrupt flag */
406 desc[frag - 1].flags = 0x5;
407 adapter->tx_ring.since_irq = 0;
408 } else { /* Last element */
409 desc[frag - 1].flags = 0x1;
412 desc[frag - 1].flags = 0x5;
414 desc[0].flags |= 2; /* First element flag */
416 tcb->index_start = adapter->tx_ring.send_idx;
419 spin_lock_irqsave(&adapter->send_hw_lock, flags);
421 thiscopy = NUM_DESC_PER_RING_TX -
422 INDEX10(adapter->tx_ring.send_idx);
424 if (thiscopy >= frag) {
428 remainder = frag - thiscopy;
431 memcpy(adapter->tx_ring.tx_desc_ring +
432 INDEX10(adapter->tx_ring.send_idx), desc,
433 sizeof(struct tx_desc) * thiscopy);
435 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
437 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
438 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
439 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
440 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
444 memcpy(adapter->tx_ring.tx_desc_ring,
446 sizeof(struct tx_desc) * remainder);
448 add_10bit(&adapter->tx_ring.send_idx, remainder);
451 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
452 if (adapter->tx_ring.send_idx)
453 tcb->index = NUM_DESC_PER_RING_TX - 1;
455 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
457 tcb->index = adapter->tx_ring.send_idx - 1;
459 spin_lock(&adapter->tcb_send_qlock);
461 if (adapter->tx_ring.send_tail)
462 adapter->tx_ring.send_tail->next = tcb;
464 adapter->tx_ring.send_head = tcb;
466 adapter->tx_ring.send_tail = tcb;
468 WARN_ON(tcb->next != NULL);
470 adapter->tx_ring.used++;
472 spin_unlock(&adapter->tcb_send_qlock);
474 /* Write the new write pointer back to the device. */
475 writel(adapter->tx_ring.send_idx,
476 &adapter->regs->txdma.service_request);
478 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
479 * timer to wake us up if this packet isn't followed by N more.
481 if (adapter->linkspeed == TRUEPHY_SPEED_1000MBPS) {
482 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
483 &adapter->regs->global.watchdog_timer);
485 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
491 * send_packet - Do the work to send a packet
492 * @skb: the packet(s) to send
493 * @adapter: a pointer to the device's private adapter structure
495 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
497 * Assumption: Send spinlock has been acquired
499 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
502 struct tcb *tcb = NULL;
506 /* All packets must have at least a MAC address and a protocol type */
507 if (skb->len < ETH_HLEN)
510 /* Get a TCB for this packet */
511 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
513 tcb = adapter->tx_ring.tcb_qhead;
516 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
520 adapter->tx_ring.tcb_qhead = tcb->next;
522 if (adapter->tx_ring.tcb_qhead == NULL)
523 adapter->tx_ring.tcb_qtail = NULL;
525 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
529 if (skb->data != NULL && skb->len - skb->data_len >= 6) {
530 shbufva = (u16 *) skb->data;
532 if ((shbufva[0] == 0xffff) &&
533 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
534 tcb->flags |= fMP_DEST_BROAD;
535 } else if ((shbufva[0] & 0x3) == 0x0001) {
536 tcb->flags |= fMP_DEST_MULTI;
542 /* Call the NIC specific send handler. */
543 status = nic_send_packet(adapter, tcb);
546 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
548 if (adapter->tx_ring.tcb_qtail)
549 adapter->tx_ring.tcb_qtail->next = tcb;
551 /* Apparently ready Q is empty. */
552 adapter->tx_ring.tcb_qhead = tcb;
554 adapter->tx_ring.tcb_qtail = tcb;
555 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
558 WARN_ON(adapter->tx_ring.used > NUM_TCB);
563 * et131x_send_packets - This function is called by the OS to send packets
564 * @skb: the packet(s) to send
565 * @netdev:device on which to TX the above packet(s)
567 * Return 0 in almost all cases; non-zero value in extreme hard failure only
569 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
572 struct et131x_adapter *adapter = NULL;
574 adapter = netdev_priv(netdev);
576 /* Send these packets
578 * NOTE: The Linux Tx entry point is only given one packet at a time
579 * to Tx, so the PacketCount and it's array used makes no sense here
582 /* TCB is not available */
583 if (adapter->tx_ring.used >= NUM_TCB) {
584 /* NOTE: If there's an error on send, no need to queue the
585 * packet under Linux; if we just send an error up to the
586 * netif layer, it will resend the skb to us.
590 /* We need to see if the link is up; if it's not, make the
591 * netif layer think we're good and drop the packet
593 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
594 !netif_carrier_ok(netdev)) {
595 dev_kfree_skb_any(skb);
598 adapter->net_stats.tx_dropped++;
600 status = send_packet(skb, adapter);
601 if (status != 0 && status != -ENOMEM) {
602 /* On any other error, make netif think we're
603 * OK and drop the packet
605 dev_kfree_skb_any(skb);
607 adapter->net_stats.tx_dropped++;
615 * free_send_packet - Recycle a struct tcb
616 * @adapter: pointer to our adapter
617 * @tcb: pointer to struct tcb
619 * Complete the packet if necessary
620 * Assumption - Send spinlock has been acquired
622 static inline void free_send_packet(struct et131x_adapter *adapter,
626 struct tx_desc *desc = NULL;
627 struct net_device_stats *stats = &adapter->net_stats;
629 if (tcb->flags & fMP_DEST_BROAD)
630 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
631 else if (tcb->flags & fMP_DEST_MULTI)
632 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
634 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
637 stats->tx_bytes += tcb->skb->len;
639 /* Iterate through the TX descriptors on the ring
640 * corresponding to this packet and umap the fragments
644 desc = (struct tx_desc *)(adapter->tx_ring.tx_desc_ring +
645 INDEX10(tcb->index_start));
647 pci_unmap_single(adapter->pdev,
649 desc->len_vlan, PCI_DMA_TODEVICE);
651 add_10bit(&tcb->index_start, 1);
652 if (INDEX10(tcb->index_start) >=
653 NUM_DESC_PER_RING_TX) {
654 tcb->index_start &= ~ET_DMA10_MASK;
655 tcb->index_start ^= ET_DMA10_WRAP;
657 } while (desc != (adapter->tx_ring.tx_desc_ring +
658 INDEX10(tcb->index)));
660 dev_kfree_skb_any(tcb->skb);
663 memset(tcb, 0, sizeof(struct tcb));
665 /* Add the TCB to the Ready Q */
666 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
668 adapter->net_stats.tx_packets++;
670 if (adapter->tx_ring.tcb_qtail)
671 adapter->tx_ring.tcb_qtail->next = tcb;
673 /* Apparently ready Q is empty. */
674 adapter->tx_ring.tcb_qhead = tcb;
676 adapter->tx_ring.tcb_qtail = tcb;
678 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
679 WARN_ON(adapter->tx_ring.used < 0);
683 * et131x_free_busy_send_packets - Free and complete the stopped active sends
684 * @adapter: pointer to our adapter
686 * Assumption - Send spinlock has been acquired
688 void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
694 /* Any packets being sent? Check the first TCB on the send list */
695 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
697 tcb = adapter->tx_ring.send_head;
699 while (tcb != NULL && freed < NUM_TCB) {
700 struct tcb *next = tcb->next;
702 adapter->tx_ring.send_head = next;
705 adapter->tx_ring.send_tail = NULL;
707 adapter->tx_ring.used--;
709 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
712 free_send_packet(adapter, tcb);
714 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
716 tcb = adapter->tx_ring.send_head;
719 WARN_ON(freed == NUM_TCB);
721 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
723 adapter->tx_ring.used = 0;
727 * et131x_handle_send_interrupt - Interrupt handler for sending processing
728 * @adapter: pointer to our adapter
730 * Re-claim the send resources, complete sends and get more to send from
731 * the send wait queue.
733 * Assumption - Send spinlock has been acquired
735 void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
742 serviced = readl(&adapter->regs->txdma.new_service_complete);
743 index = INDEX10(serviced);
745 /* Has the ring wrapped? Process any descriptors that do not have
746 * the same "wrap" indicator as the current completion indicator
748 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
750 tcb = adapter->tx_ring.send_head;
753 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
754 index < INDEX10(tcb->index)) {
755 adapter->tx_ring.used--;
756 adapter->tx_ring.send_head = tcb->next;
757 if (tcb->next == NULL)
758 adapter->tx_ring.send_tail = NULL;
760 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
761 free_send_packet(adapter, tcb);
762 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
764 /* Goto the next packet */
765 tcb = adapter->tx_ring.send_head;
768 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
769 && index > (tcb->index & ET_DMA10_MASK)) {
770 adapter->tx_ring.used--;
771 adapter->tx_ring.send_head = tcb->next;
772 if (tcb->next == NULL)
773 adapter->tx_ring.send_tail = NULL;
775 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
776 free_send_packet(adapter, tcb);
777 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
779 /* Goto the next packet */
780 tcb = adapter->tx_ring.send_head;
783 /* Wake up the queue when we hit a low-water mark */
784 if (adapter->tx_ring.used <= NUM_TCB / 3)
785 netif_wake_queue(adapter->netdev);
787 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);