staging: et131x: Rename var name 'etdev' to 'adapter' throughout module
[pandora-kernel.git] / drivers / staging / et131x / et1310_tx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_tx.c - Routines used to perform data transmission.
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85
86 #include "et1310_phy.h"
87 #include "et131x_adapter.h"
88 #include "et1310_tx.h"
89 #include "et131x.h"
90
91 /**
92  * et131x_tx_dma_memory_alloc
93  * @adapter: pointer to our private adapter structure
94  *
95  * Returns 0 on success and errno on failure (as defined in errno.h).
96  *
97  * Allocates memory that will be visible both to the device and to the CPU.
98  * The OS will pass us packets, pointers to which we will insert in the Tx
99  * Descriptor queue. The device will read this queue to find the packets in
100  * memory. The device will update the "status" in memory each time it xmits a
101  * packet.
102  */
103 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
104 {
105         int desc_size = 0;
106         struct tx_ring *tx_ring = &adapter->tx_ring;
107
108         /* Allocate memory for the TCB's (Transmit Control Block) */
109         adapter->tx_ring.tcb_ring =
110                 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
111         if (!adapter->tx_ring.tcb_ring) {
112                 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
113                 return -ENOMEM;
114         }
115
116         /* Allocate enough memory for the Tx descriptor ring, and allocate
117          * some extra so that the ring can be aligned on a 4k boundary.
118          */
119         desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
120         tx_ring->tx_desc_ring =
121             (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
122                                                     &tx_ring->tx_desc_ring_pa);
123         if (!adapter->tx_ring.tx_desc_ring) {
124                 dev_err(&adapter->pdev->dev,
125                                         "Cannot alloc memory for Tx Ring\n");
126                 return -ENOMEM;
127         }
128
129         /* Save physical address
130          *
131          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
132          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
133          * are ever returned, make sure the high part is retrieved here before
134          * storing the adjusted address.
135          */
136         /* Allocate memory for the Tx status block */
137         tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
138                                                     sizeof(u32),
139                                                     &tx_ring->tx_status_pa);
140         if (!adapter->tx_ring.tx_status_pa) {
141                 dev_err(&adapter->pdev->dev,
142                                   "Cannot alloc memory for Tx status block\n");
143                 return -ENOMEM;
144         }
145         return 0;
146 }
147
148 /**
149  * et131x_tx_dma_memory_free - Free all memory allocated within this module
150  * @adapter: pointer to our private adapter structure
151  *
152  * Returns 0 on success and errno on failure (as defined in errno.h).
153  */
154 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
155 {
156         int desc_size = 0;
157
158         if (adapter->tx_ring.tx_desc_ring) {
159                 /* Free memory relating to Tx rings here */
160                 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
161                                                                 + 4096 - 1;
162                 pci_free_consistent(adapter->pdev,
163                                     desc_size,
164                                     adapter->tx_ring.tx_desc_ring,
165                                     adapter->tx_ring.tx_desc_ring_pa);
166                 adapter->tx_ring.tx_desc_ring = NULL;
167         }
168
169         /* Free memory for the Tx status block */
170         if (adapter->tx_ring.tx_status) {
171                 pci_free_consistent(adapter->pdev,
172                                     sizeof(u32),
173                                     adapter->tx_ring.tx_status,
174                                     adapter->tx_ring.tx_status_pa);
175
176                 adapter->tx_ring.tx_status = NULL;
177         }
178         /* Free the memory for the tcb structures */
179         kfree(adapter->tx_ring.tcb_ring);
180 }
181
182 /**
183  * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
184  * @adapter: pointer to our private adapter structure
185  *
186  * Configure the transmit engine with the ring buffers we have created
187  * and prepare it for use.
188  */
189 void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
190 {
191         struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
192
193         /* Load the hardware with the start of the transmit descriptor ring. */
194         writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
195                &txdma->pr_base_hi);
196         writel((u32) adapter->tx_ring.tx_desc_ring_pa,
197                &txdma->pr_base_lo);
198
199         /* Initialise the transmit DMA engine */
200         writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
201
202         /* Load the completion writeback physical address */
203         writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
204                                                 &txdma->dma_wb_base_hi);
205         writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
206
207         *adapter->tx_ring.tx_status = 0;
208
209         writel(0, &txdma->service_request);
210         adapter->tx_ring.send_idx = 0;
211 }
212
213 /**
214  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
215  * @adapter: pointer to our adapter structure
216  */
217 void et131x_tx_dma_disable(struct et131x_adapter *adapter)
218 {
219         /* Setup the tramsmit dma configuration register */
220         writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
221                                         &adapter->regs->txdma.csr);
222 }
223
224 /**
225  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
226  * @adapter: pointer to our adapter structure
227  *
228  * Mainly used after a return to the D0 (full-power) state from a lower state.
229  */
230 void et131x_tx_dma_enable(struct et131x_adapter *adapter)
231 {
232         /* Setup the transmit dma configuration register for normal
233          * operation
234          */
235         writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
236                                         &adapter->regs->txdma.csr);
237 }
238
239 /**
240  * et131x_init_send - Initialize send data structures
241  * @adapter: pointer to our private adapter structure
242  */
243 void et131x_init_send(struct et131x_adapter *adapter)
244 {
245         struct tcb *tcb;
246         u32 ct;
247         struct tx_ring *tx_ring;
248
249         /* Setup some convenience pointers */
250         tx_ring = &adapter->tx_ring;
251         tcb = adapter->tx_ring.tcb_ring;
252
253         tx_ring->tcb_qhead = tcb;
254
255         memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
256
257         /* Go through and set up each TCB */
258         for (ct = 0; ct++ < NUM_TCB; tcb++)
259                 /* Set the link pointer in HW TCB to the next TCB in the
260                  * chain
261                  */
262                 tcb->next = tcb + 1;
263
264         /* Set the  tail pointer */
265         tcb--;
266         tx_ring->tcb_qtail = tcb;
267         tcb->next = NULL;
268         /* Curr send queue should now be empty */
269         tx_ring->send_head = NULL;
270         tx_ring->send_tail = NULL;
271 }
272
273 /**
274  * nic_send_packet - NIC specific send handler for version B silicon.
275  * @adapter: pointer to our adapter
276  * @tcb: pointer to struct tcb
277  *
278  * Returns 0 or errno.
279  */
280 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
281 {
282         u32 i;
283         struct tx_desc desc[24];        /* 24 x 16 byte */
284         u32 frag = 0;
285         u32 thiscopy, remainder;
286         struct sk_buff *skb = tcb->skb;
287         u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
288         struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
289         unsigned long flags;
290
291         /* Part of the optimizations of this send routine restrict us to
292          * sending 24 fragments at a pass.  In practice we should never see
293          * more than 5 fragments.
294          *
295          * NOTE: The older version of this function (below) can handle any
296          * number of fragments. If needed, we can call this function,
297          * although it is less efficient.
298          */
299         if (nr_frags > 23)
300                 return -EIO;
301
302         memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
303
304         for (i = 0; i < nr_frags; i++) {
305                 /* If there is something in this element, lets get a
306                  * descriptor from the ring and get the necessary data
307                  */
308                 if (i == 0) {
309                         /* If the fragments are smaller than a standard MTU,
310                          * then map them to a single descriptor in the Tx
311                          * Desc ring. However, if they're larger, as is
312                          * possible with support for jumbo packets, then
313                          * split them each across 2 descriptors.
314                          *
315                          * This will work until we determine why the hardware
316                          * doesn't seem to like large fragments.
317                          */
318                         if ((skb->len - skb->data_len) <= 1514) {
319                                 desc[frag].addr_hi = 0;
320                                 /* Low 16bits are length, high is vlan and
321                                    unused currently so zero */
322                                 desc[frag].len_vlan =
323                                         skb->len - skb->data_len;
324
325                                 /* NOTE: Here, the dma_addr_t returned from
326                                  * pci_map_single() is implicitly cast as a
327                                  * u32. Although dma_addr_t can be
328                                  * 64-bit, the address returned by
329                                  * pci_map_single() is always 32-bit
330                                  * addressable (as defined by the pci/dma
331                                  * subsystem)
332                                  */
333                                 desc[frag++].addr_lo =
334                                     pci_map_single(adapter->pdev,
335                                                    skb->data,
336                                                    skb->len -
337                                                    skb->data_len,
338                                                    PCI_DMA_TODEVICE);
339                         } else {
340                                 desc[frag].addr_hi = 0;
341                                 desc[frag].len_vlan =
342                                     (skb->len - skb->data_len) / 2;
343
344                                 /* NOTE: Here, the dma_addr_t returned from
345                                  * pci_map_single() is implicitly cast as a
346                                  * u32. Although dma_addr_t can be
347                                  * 64-bit, the address returned by
348                                  * pci_map_single() is always 32-bit
349                                  * addressable (as defined by the pci/dma
350                                  * subsystem)
351                                  */
352                                 desc[frag++].addr_lo =
353                                     pci_map_single(adapter->pdev,
354                                                    skb->data,
355                                                    ((skb->len -
356                                                      skb->data_len) / 2),
357                                                    PCI_DMA_TODEVICE);
358                                 desc[frag].addr_hi = 0;
359
360                                 desc[frag].len_vlan =
361                                     (skb->len - skb->data_len) / 2;
362
363                                 /* NOTE: Here, the dma_addr_t returned from
364                                  * pci_map_single() is implicitly cast as a
365                                  * u32. Although dma_addr_t can be
366                                  * 64-bit, the address returned by
367                                  * pci_map_single() is always 32-bit
368                                  * addressable (as defined by the pci/dma
369                                  * subsystem)
370                                  */
371                                 desc[frag++].addr_lo =
372                                     pci_map_single(adapter->pdev,
373                                                    skb->data +
374                                                    ((skb->len -
375                                                      skb->data_len) / 2),
376                                                    ((skb->len -
377                                                      skb->data_len) / 2),
378                                                    PCI_DMA_TODEVICE);
379                         }
380                 } else {
381                         desc[frag].addr_hi = 0;
382                         desc[frag].len_vlan =
383                                         frags[i - 1].size;
384
385                         /* NOTE: Here, the dma_addr_t returned from
386                          * pci_map_page() is implicitly cast as a u32.
387                          * Although dma_addr_t can be 64-bit, the address
388                          * returned by pci_map_page() is always 32-bit
389                          * addressable (as defined by the pci/dma subsystem)
390                          */
391                         desc[frag++].addr_lo =
392                             pci_map_page(adapter->pdev,
393                                          frags[i - 1].page,
394                                          frags[i - 1].page_offset,
395                                          frags[i - 1].size,
396                                          PCI_DMA_TODEVICE);
397                 }
398         }
399
400         if (frag == 0)
401                 return -EIO;
402
403         if (adapter->linkspeed == TRUEPHY_SPEED_1000MBPS) {
404                 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
405                         /* Last element & Interrupt flag */
406                         desc[frag - 1].flags = 0x5;
407                         adapter->tx_ring.since_irq = 0;
408                 } else { /* Last element */
409                         desc[frag - 1].flags = 0x1;
410                 }
411         } else
412                 desc[frag - 1].flags = 0x5;
413
414         desc[0].flags |= 2;     /* First element flag */
415
416         tcb->index_start = adapter->tx_ring.send_idx;
417         tcb->stale = 0;
418
419         spin_lock_irqsave(&adapter->send_hw_lock, flags);
420
421         thiscopy = NUM_DESC_PER_RING_TX -
422                                 INDEX10(adapter->tx_ring.send_idx);
423
424         if (thiscopy >= frag) {
425                 remainder = 0;
426                 thiscopy = frag;
427         } else {
428                 remainder = frag - thiscopy;
429         }
430
431         memcpy(adapter->tx_ring.tx_desc_ring +
432                INDEX10(adapter->tx_ring.send_idx), desc,
433                sizeof(struct tx_desc) * thiscopy);
434
435         add_10bit(&adapter->tx_ring.send_idx, thiscopy);
436
437         if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
438                     INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
439                 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
440                 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
441         }
442
443         if (remainder) {
444                 memcpy(adapter->tx_ring.tx_desc_ring,
445                        desc + thiscopy,
446                        sizeof(struct tx_desc) * remainder);
447
448                 add_10bit(&adapter->tx_ring.send_idx, remainder);
449         }
450
451         if (INDEX10(adapter->tx_ring.send_idx) == 0) {
452                 if (adapter->tx_ring.send_idx)
453                         tcb->index = NUM_DESC_PER_RING_TX - 1;
454                 else
455                         tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
456         } else
457                 tcb->index = adapter->tx_ring.send_idx - 1;
458
459         spin_lock(&adapter->tcb_send_qlock);
460
461         if (adapter->tx_ring.send_tail)
462                 adapter->tx_ring.send_tail->next = tcb;
463         else
464                 adapter->tx_ring.send_head = tcb;
465
466         adapter->tx_ring.send_tail = tcb;
467
468         WARN_ON(tcb->next != NULL);
469
470         adapter->tx_ring.used++;
471
472         spin_unlock(&adapter->tcb_send_qlock);
473
474         /* Write the new write pointer back to the device. */
475         writel(adapter->tx_ring.send_idx,
476                &adapter->regs->txdma.service_request);
477
478         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
479          * timer to wake us up if this packet isn't followed by N more.
480          */
481         if (adapter->linkspeed == TRUEPHY_SPEED_1000MBPS) {
482                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
483                        &adapter->regs->global.watchdog_timer);
484         }
485         spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
486
487         return 0;
488 }
489
490 /**
491  * send_packet - Do the work to send a packet
492  * @skb: the packet(s) to send
493  * @adapter: a pointer to the device's private adapter structure
494  *
495  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
496  *
497  * Assumption: Send spinlock has been acquired
498  */
499 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
500 {
501         int status;
502         struct tcb *tcb = NULL;
503         u16 *shbufva;
504         unsigned long flags;
505
506         /* All packets must have at least a MAC address and a protocol type */
507         if (skb->len < ETH_HLEN)
508                 return -EIO;
509
510         /* Get a TCB for this packet */
511         spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
512
513         tcb = adapter->tx_ring.tcb_qhead;
514
515         if (tcb == NULL) {
516                 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
517                 return -ENOMEM;
518         }
519
520         adapter->tx_ring.tcb_qhead = tcb->next;
521
522         if (adapter->tx_ring.tcb_qhead == NULL)
523                 adapter->tx_ring.tcb_qtail = NULL;
524
525         spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
526
527         tcb->skb = skb;
528
529         if (skb->data != NULL && skb->len - skb->data_len >= 6) {
530                 shbufva = (u16 *) skb->data;
531
532                 if ((shbufva[0] == 0xffff) &&
533                     (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
534                         tcb->flags |= fMP_DEST_BROAD;
535                 } else if ((shbufva[0] & 0x3) == 0x0001) {
536                         tcb->flags |=  fMP_DEST_MULTI;
537                 }
538         }
539
540         tcb->next = NULL;
541
542         /* Call the NIC specific send handler. */
543         status = nic_send_packet(adapter, tcb);
544
545         if (status != 0) {
546                 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
547
548                 if (adapter->tx_ring.tcb_qtail)
549                         adapter->tx_ring.tcb_qtail->next = tcb;
550                 else
551                         /* Apparently ready Q is empty. */
552                         adapter->tx_ring.tcb_qhead = tcb;
553
554                 adapter->tx_ring.tcb_qtail = tcb;
555                 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
556                 return status;
557         }
558         WARN_ON(adapter->tx_ring.used > NUM_TCB);
559         return 0;
560 }
561
562 /**
563  * et131x_send_packets - This function is called by the OS to send packets
564  * @skb: the packet(s) to send
565  * @netdev:device on which to TX the above packet(s)
566  *
567  * Return 0 in almost all cases; non-zero value in extreme hard failure only
568  */
569 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
570 {
571         int status = 0;
572         struct et131x_adapter *adapter = NULL;
573
574         adapter = netdev_priv(netdev);
575
576         /* Send these packets
577          *
578          * NOTE: The Linux Tx entry point is only given one packet at a time
579          * to Tx, so the PacketCount and it's array used makes no sense here
580          */
581
582         /* TCB is not available */
583         if (adapter->tx_ring.used >= NUM_TCB) {
584                 /* NOTE: If there's an error on send, no need to queue the
585                  * packet under Linux; if we just send an error up to the
586                  * netif layer, it will resend the skb to us.
587                  */
588                 status = -ENOMEM;
589         } else {
590                 /* We need to see if the link is up; if it's not, make the
591                  * netif layer think we're good and drop the packet
592                  */
593                 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
594                                         !netif_carrier_ok(netdev)) {
595                         dev_kfree_skb_any(skb);
596                         skb = NULL;
597
598                         adapter->net_stats.tx_dropped++;
599                 } else {
600                         status = send_packet(skb, adapter);
601                         if (status != 0 && status != -ENOMEM) {
602                                 /* On any other error, make netif think we're
603                                  * OK and drop the packet
604                                  */
605                                 dev_kfree_skb_any(skb);
606                                 skb = NULL;
607                                 adapter->net_stats.tx_dropped++;
608                         }
609                 }
610         }
611         return status;
612 }
613
614 /**
615  * free_send_packet - Recycle a struct tcb
616  * @adapter: pointer to our adapter
617  * @tcb: pointer to struct tcb
618  *
619  * Complete the packet if necessary
620  * Assumption - Send spinlock has been acquired
621  */
622 static inline void free_send_packet(struct et131x_adapter *adapter,
623                                                 struct tcb *tcb)
624 {
625         unsigned long flags;
626         struct tx_desc *desc = NULL;
627         struct net_device_stats *stats = &adapter->net_stats;
628
629         if (tcb->flags & fMP_DEST_BROAD)
630                 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
631         else if (tcb->flags & fMP_DEST_MULTI)
632                 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
633         else
634                 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
635
636         if (tcb->skb) {
637                 stats->tx_bytes += tcb->skb->len;
638
639                 /* Iterate through the TX descriptors on the ring
640                  * corresponding to this packet and umap the fragments
641                  * they point to
642                  */
643                 do {
644                         desc = (struct tx_desc *)(adapter->tx_ring.tx_desc_ring +
645                                                 INDEX10(tcb->index_start));
646
647                         pci_unmap_single(adapter->pdev,
648                                          desc->addr_lo,
649                                          desc->len_vlan, PCI_DMA_TODEVICE);
650
651                         add_10bit(&tcb->index_start, 1);
652                         if (INDEX10(tcb->index_start) >=
653                                                         NUM_DESC_PER_RING_TX) {
654                                 tcb->index_start &= ~ET_DMA10_MASK;
655                                 tcb->index_start ^= ET_DMA10_WRAP;
656                         }
657                 } while (desc != (adapter->tx_ring.tx_desc_ring +
658                                 INDEX10(tcb->index)));
659
660                 dev_kfree_skb_any(tcb->skb);
661         }
662
663         memset(tcb, 0, sizeof(struct tcb));
664
665         /* Add the TCB to the Ready Q */
666         spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
667
668         adapter->net_stats.tx_packets++;
669
670         if (adapter->tx_ring.tcb_qtail)
671                 adapter->tx_ring.tcb_qtail->next = tcb;
672         else
673                 /* Apparently ready Q is empty. */
674                 adapter->tx_ring.tcb_qhead = tcb;
675
676         adapter->tx_ring.tcb_qtail = tcb;
677
678         spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
679         WARN_ON(adapter->tx_ring.used < 0);
680 }
681
682 /**
683  * et131x_free_busy_send_packets - Free and complete the stopped active sends
684  * @adapter: pointer to our adapter
685  *
686  * Assumption - Send spinlock has been acquired
687  */
688 void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
689 {
690         struct tcb *tcb;
691         unsigned long flags;
692         u32 freed = 0;
693
694         /* Any packets being sent? Check the first TCB on the send list */
695         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
696
697         tcb = adapter->tx_ring.send_head;
698
699         while (tcb != NULL && freed < NUM_TCB) {
700                 struct tcb *next = tcb->next;
701
702                 adapter->tx_ring.send_head = next;
703
704                 if (next == NULL)
705                         adapter->tx_ring.send_tail = NULL;
706
707                 adapter->tx_ring.used--;
708
709                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
710
711                 freed++;
712                 free_send_packet(adapter, tcb);
713
714                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
715
716                 tcb = adapter->tx_ring.send_head;
717         }
718
719         WARN_ON(freed == NUM_TCB);
720
721         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
722
723         adapter->tx_ring.used = 0;
724 }
725
726 /**
727  * et131x_handle_send_interrupt - Interrupt handler for sending processing
728  * @adapter: pointer to our adapter
729  *
730  * Re-claim the send resources, complete sends and get more to send from
731  * the send wait queue.
732  *
733  * Assumption - Send spinlock has been acquired
734  */
735 void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
736 {
737         unsigned long flags;
738         u32 serviced;
739         struct tcb *tcb;
740         u32 index;
741
742         serviced = readl(&adapter->regs->txdma.new_service_complete);
743         index = INDEX10(serviced);
744
745         /* Has the ring wrapped?  Process any descriptors that do not have
746          * the same "wrap" indicator as the current completion indicator
747          */
748         spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
749
750         tcb = adapter->tx_ring.send_head;
751
752         while (tcb &&
753                ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
754                index < INDEX10(tcb->index)) {
755                 adapter->tx_ring.used--;
756                 adapter->tx_ring.send_head = tcb->next;
757                 if (tcb->next == NULL)
758                         adapter->tx_ring.send_tail = NULL;
759
760                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
761                 free_send_packet(adapter, tcb);
762                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
763
764                 /* Goto the next packet */
765                 tcb = adapter->tx_ring.send_head;
766         }
767         while (tcb &&
768                !((serviced ^ tcb->index) & ET_DMA10_WRAP)
769                && index > (tcb->index & ET_DMA10_MASK)) {
770                 adapter->tx_ring.used--;
771                 adapter->tx_ring.send_head = tcb->next;
772                 if (tcb->next == NULL)
773                         adapter->tx_ring.send_tail = NULL;
774
775                 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
776                 free_send_packet(adapter, tcb);
777                 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
778
779                 /* Goto the next packet */
780                 tcb = adapter->tx_ring.send_head;
781         }
782
783         /* Wake up the queue when we hit a low-water mark */
784         if (adapter->tx_ring.used <= NUM_TCB / 3)
785                 netif_wake_queue(adapter->netdev);
786
787         spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
788 }
789