3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_rx.c - Routines used to perform data reception
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
75 #include <linux/delay.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
93 #include "et1310_rx.h"
96 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
99 * et131x_rx_dma_memory_alloc
100 * @adapter: pointer to our private adapter structure
102 * Returns 0 on success and errno on failure (as defined in errno.h)
104 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
105 * and the Packet Status Ring.
107 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
111 u32 pktStatRingSize, FBRChunkSize;
114 /* Setup some convenience pointers */
115 rx_ring = (RX_RING_t *) &adapter->RxRing;
117 /* Alloc memory for the lookup table */
119 rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
122 rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
124 /* The first thing we will do is configure the sizes of the buffer
125 * rings. These will change based on jumbo packet support. Larger
126 * jumbo packets increases the size of each entry in FBR0, and the
127 * number of entries in FBR0, while at the same time decreasing the
128 * number of entries in FBR1.
130 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
131 * entries are huge in order to accomodate a "jumbo" frame, then it
132 * will have less entries. Conversely, FBR1 will now be relied upon
133 * to carry more "normal" frames, thus it's entry size also increases
134 * and the number of entries goes up too (since it now carries
135 * "small" + "regular" packets.
137 * In this scheme, we try to maintain 512 entries between the two
138 * rings. Also, FBR1 remains a constant size - when it's size doubles
139 * the number of entries halves. FBR0 increases in size, however.
142 if (adapter->RegistryJumboPacket < 2048) {
144 rx_ring->Fbr0BufferSize = 256;
145 rx_ring->Fbr0NumEntries = 512;
147 rx_ring->Fbr1BufferSize = 2048;
148 rx_ring->Fbr1NumEntries = 512;
149 } else if (adapter->RegistryJumboPacket < 4096) {
151 rx_ring->Fbr0BufferSize = 512;
152 rx_ring->Fbr0NumEntries = 1024;
154 rx_ring->Fbr1BufferSize = 4096;
155 rx_ring->Fbr1NumEntries = 512;
158 rx_ring->Fbr0BufferSize = 1024;
159 rx_ring->Fbr0NumEntries = 768;
161 rx_ring->Fbr1BufferSize = 16384;
162 rx_ring->Fbr1NumEntries = 128;
166 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
167 adapter->RxRing.Fbr1NumEntries;
169 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
172 /* Allocate an area of memory for Free Buffer Ring 1 */
173 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
174 rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
176 &rx_ring->pFbr1RingPa);
177 if (!rx_ring->pFbr1RingVa) {
178 dev_err(&adapter->pdev->dev,
179 "Cannot alloc memory for Free Buffer Ring 1\n");
183 /* Save physical address
185 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
186 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
187 * are ever returned, make sure the high part is retrieved here
188 * before storing the adjusted address.
190 rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
192 /* Align Free Buffer Ring 1 on a 4K boundary */
193 et131x_align_allocated_memory(adapter,
194 &rx_ring->Fbr1Realpa,
195 &rx_ring->Fbr1offset, 0x0FFF);
197 rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
198 rx_ring->Fbr1offset);
201 /* Allocate an area of memory for Free Buffer Ring 0 */
202 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
203 rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
205 &rx_ring->pFbr0RingPa);
206 if (!rx_ring->pFbr0RingVa) {
207 dev_err(&adapter->pdev->dev,
208 "Cannot alloc memory for Free Buffer Ring 0\n");
212 /* Save physical address
214 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
215 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
216 * are ever returned, make sure the high part is retrieved here before
217 * storing the adjusted address.
219 rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
221 /* Align Free Buffer Ring 0 on a 4K boundary */
222 et131x_align_allocated_memory(adapter,
223 &rx_ring->Fbr0Realpa,
224 &rx_ring->Fbr0offset, 0x0FFF);
226 rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
227 rx_ring->Fbr0offset);
230 for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
236 /* This code allocates an area of memory big enough for N
237 * free buffers + (buffer_size - 1) so that the buffers can
238 * be aligned on 4k boundaries. If each buffer were aligned
239 * to a buffer_size boundary, the effect would be to double
240 * the size of FBR0. By allocating N buffers at once, we
241 * reduce this overhead.
243 if (rx_ring->Fbr1BufferSize > 4096)
246 Fbr1Align = rx_ring->Fbr1BufferSize;
249 (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
250 rx_ring->Fbr1MemVa[i] =
251 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
252 &rx_ring->Fbr1MemPa[i]);
254 if (!rx_ring->Fbr1MemVa[i]) {
255 dev_err(&adapter->pdev->dev,
256 "Could not alloc memory\n");
260 /* See NOTE in "Save Physical Address" comment above */
261 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
263 et131x_align_allocated_memory(adapter,
265 &Fbr1Offset, (Fbr1Align - 1));
267 for (j = 0; j < FBR_CHUNKS; j++) {
268 u32 index = (i * FBR_CHUNKS) + j;
270 /* Save the Virtual address of this index for quick
273 rx_ring->Fbr[1]->Va[index] =
274 (uint8_t *) rx_ring->Fbr1MemVa[i] +
275 (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
277 /* now store the physical address in the descriptor
278 * so the device can access it
280 rx_ring->Fbr[1]->PAHigh[index] =
281 (u32) (Fbr1TempPa >> 32);
282 rx_ring->Fbr[1]->PALow[index] = (u32) Fbr1TempPa;
284 Fbr1TempPa += rx_ring->Fbr1BufferSize;
286 rx_ring->Fbr[1]->Buffer1[index] =
287 rx_ring->Fbr[1]->Va[index];
288 rx_ring->Fbr[1]->Buffer2[index] =
289 rx_ring->Fbr[1]->Va[index] - 4;
294 /* Same for FBR0 (if in use) */
295 for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
300 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
301 rx_ring->Fbr0MemVa[i] =
302 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
303 &rx_ring->Fbr0MemPa[i]);
305 if (!rx_ring->Fbr0MemVa[i]) {
306 dev_err(&adapter->pdev->dev,
307 "Could not alloc memory\n");
311 /* See NOTE in "Save Physical Address" comment above */
312 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
314 et131x_align_allocated_memory(adapter,
317 rx_ring->Fbr0BufferSize - 1);
319 for (j = 0; j < FBR_CHUNKS; j++) {
320 u32 index = (i * FBR_CHUNKS) + j;
322 rx_ring->Fbr[0]->Va[index] =
323 (uint8_t *) rx_ring->Fbr0MemVa[i] +
324 (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
326 rx_ring->Fbr[0]->PAHigh[index] =
327 (u32) (Fbr0TempPa >> 32);
328 rx_ring->Fbr[0]->PALow[index] = (u32) Fbr0TempPa;
330 Fbr0TempPa += rx_ring->Fbr0BufferSize;
332 rx_ring->Fbr[0]->Buffer1[index] =
333 rx_ring->Fbr[0]->Va[index];
334 rx_ring->Fbr[0]->Buffer2[index] =
335 rx_ring->Fbr[0]->Va[index] - 4;
340 /* Allocate an area of memory for FIFO of Packet Status ring entries */
342 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
344 rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
346 &rx_ring->pPSRingPa);
348 if (!rx_ring->pPSRingVa) {
349 dev_err(&adapter->pdev->dev,
350 "Cannot alloc memory for Packet Status Ring\n");
353 printk("PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
356 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
357 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
358 * are ever returned, make sure the high part is retrieved here before
359 * storing the adjusted address.
362 /* Allocate an area of memory for writeback of status information */
363 rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
364 sizeof(RX_STATUS_BLOCK_t),
365 &rx_ring->pRxStatusPa);
366 if (!rx_ring->pRxStatusVa) {
367 dev_err(&adapter->pdev->dev,
368 "Cannot alloc memory for Status Block\n");
371 rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
372 printk("PRS %lx\n", (unsigned long)rx_ring->pRxStatusPa);
375 * pci_pool_create initializes a lookaside list. After successful
376 * creation, nonpaged fixed-size blocks can be allocated from and
377 * freed to the lookaside list.
378 * RFDs will be allocated from this pool.
380 rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
387 adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
389 /* The RFDs are going to be put on lists later on, so initialize the
392 INIT_LIST_HEAD(&rx_ring->RecvList);
397 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
398 * @adapter: pointer to our private adapter structure
400 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
408 /* Setup some convenience pointers */
409 rx_ring = (RX_RING_t *) &adapter->RxRing;
411 /* Free RFDs and associated packet descriptors */
412 WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
414 while (!list_empty(&rx_ring->RecvList)) {
415 rfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
418 list_del(&rfd->list_node);
420 kmem_cache_free(adapter->RxRing.RecvLookaside, rfd);
423 /* Free Free Buffer Ring 1 */
424 if (rx_ring->pFbr1RingVa) {
425 /* First the packet memory */
426 for (index = 0; index <
427 (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
428 if (rx_ring->Fbr1MemVa[index]) {
431 if (rx_ring->Fbr1BufferSize > 4096)
434 Fbr1Align = rx_ring->Fbr1BufferSize;
437 (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
440 pci_free_consistent(adapter->pdev,
442 rx_ring->Fbr1MemVa[index],
443 rx_ring->Fbr1MemPa[index]);
445 rx_ring->Fbr1MemVa[index] = NULL;
449 /* Now the FIFO itself */
450 rx_ring->pFbr1RingVa = (void *)((uint8_t *)
451 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
453 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
456 pci_free_consistent(adapter->pdev, bufsize,
457 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
459 rx_ring->pFbr1RingVa = NULL;
463 /* Now the same for Free Buffer Ring 0 */
464 if (rx_ring->pFbr0RingVa) {
465 /* First the packet memory */
466 for (index = 0; index <
467 (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
468 if (rx_ring->Fbr0MemVa[index]) {
470 (rx_ring->Fbr0BufferSize *
471 (FBR_CHUNKS + 1)) - 1;
473 pci_free_consistent(adapter->pdev,
475 rx_ring->Fbr0MemVa[index],
476 rx_ring->Fbr0MemPa[index]);
478 rx_ring->Fbr0MemVa[index] = NULL;
482 /* Now the FIFO itself */
483 rx_ring->pFbr0RingVa = (void *)((uint8_t *)
484 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
486 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
489 pci_free_consistent(adapter->pdev,
491 rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
493 rx_ring->pFbr0RingVa = NULL;
497 /* Free Packet Status Ring */
498 if (rx_ring->pPSRingVa) {
500 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
502 pci_free_consistent(adapter->pdev, pktStatRingSize,
503 rx_ring->pPSRingVa, rx_ring->pPSRingPa);
505 rx_ring->pPSRingVa = NULL;
508 /* Free area of memory for the writeback of status information */
509 if (rx_ring->pRxStatusVa) {
510 pci_free_consistent(adapter->pdev,
511 sizeof(RX_STATUS_BLOCK_t),
512 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
514 rx_ring->pRxStatusVa = NULL;
517 /* Free receive buffer pool */
519 /* Free receive packet pool */
521 /* Destroy the lookaside (RFD) pool */
522 if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
523 kmem_cache_destroy(rx_ring->RecvLookaside);
524 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
527 /* Free the FBR Lookup Table */
529 kfree(rx_ring->Fbr[0]);
532 kfree(rx_ring->Fbr[1]);
535 rx_ring->nReadyRecv = 0;
539 * et131x_init_recv - Initialize receive data structures.
540 * @adapter: pointer to our private adapter structure
542 * Returns 0 on success and errno on failure (as defined in errno.h)
544 int et131x_init_recv(struct et131x_adapter *adapter)
546 int status = -ENOMEM;
550 RX_RING_t *rx_ring = NULL;
552 /* Setup some convenience pointers */
553 rx_ring = (RX_RING_t *) &adapter->RxRing;
556 for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
557 rfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
558 GFP_ATOMIC | GFP_DMA);
561 dev_err(&adapter->pdev->dev,
562 "Couldn't alloc RFD out of kmem_cache\n");
569 /* Add this RFD to the RecvList */
570 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
572 /* Increment both the available RFD's, and the total RFD's. */
573 rx_ring->nReadyRecv++;
577 if (numrfd > NIC_MIN_NUM_RFD)
580 rx_ring->NumRfd = numrfd;
583 kmem_cache_free(rx_ring->RecvLookaside, rfd);
584 dev_err(&adapter->pdev->dev,
585 "Allocation problems in et131x_init_recv\n");
591 * ConfigRxDmaRegs - Start of Rx_DMA init sequence
592 * @etdev: pointer to our adapter structure
594 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
596 struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
597 struct _rx_ring_t *rx_local = &etdev->RxRing;
598 struct fbr_desc *fbr_entry;
603 /* Halt RXDMA to perform the reconfigure. */
604 et131x_rx_dma_disable(etdev);
606 /* Load the completion writeback physical address
608 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
609 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
610 * are ever returned, make sure the high part is retrieved here
611 * before storing the adjusted address.
613 writel((u32) ((u64)rx_local->pRxStatusPa >> 32),
614 &rx_dma->dma_wb_base_hi);
615 writel((u32) rx_local->pRxStatusPa, &rx_dma->dma_wb_base_lo);
617 memset(rx_local->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
619 /* Set the address and parameters of the packet status ring into the
622 writel((u32) ((u64)rx_local->pPSRingPa >> 32),
623 &rx_dma->psr_base_hi);
624 writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
625 writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
626 writel(0, &rx_dma->psr_full_offset);
628 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
629 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
630 &rx_dma->psr_min_des);
632 spin_lock_irqsave(&etdev->RcvLock, flags);
634 /* These local variables track the PSR in the adapter structure */
635 rx_local->local_psr_full = 0;
637 /* Now's the best time to initialize FBR1 contents */
638 fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
639 for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
640 fbr_entry->addr_hi = rx_local->Fbr[1]->PAHigh[entry];
641 fbr_entry->addr_lo = rx_local->Fbr[1]->PALow[entry];
642 fbr_entry->word2 = entry;
646 /* Set the address and parameters of Free buffer ring 1 (and 0 if
647 * required) into the 1310's registers
649 writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
650 writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
651 writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
652 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
654 /* This variable tracks the free buffer ring 1 full position, so it
655 * has to match the above.
657 rx_local->local_Fbr1_full = ET_DMA10_WRAP;
658 writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
659 &rx_dma->fbr1_min_des);
662 /* Now's the best time to initialize FBR0 contents */
663 fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
664 for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
665 fbr_entry->addr_hi = rx_local->Fbr[0]->PAHigh[entry];
666 fbr_entry->addr_lo = rx_local->Fbr[0]->PALow[entry];
667 fbr_entry->word2 = entry;
671 writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
672 writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
673 writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
674 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
676 /* This variable tracks the free buffer ring 0 full position, so it
677 * has to match the above.
679 rx_local->local_Fbr0_full = ET_DMA10_WRAP;
680 writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
681 &rx_dma->fbr0_min_des);
684 /* Program the number of packets we will receive before generating an
686 * For version B silicon, this value gets updated once autoneg is
689 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
691 /* The "time_done" is not working correctly to coalesce interrupts
692 * after a given time period, but rather is giving us an interrupt
693 * regardless of whether we have received packets.
694 * This value gets updated once autoneg is complete.
696 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
698 spin_unlock_irqrestore(&etdev->RcvLock, flags);
702 * SetRxDmaTimer - Set the heartbeat timer according to line rate.
703 * @etdev: pointer to our adapter structure
705 void SetRxDmaTimer(struct et131x_adapter *etdev)
707 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
708 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
710 if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
711 (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
712 writel(0, &etdev->regs->rxdma.max_pkt_time);
713 writel(1, &etdev->regs->rxdma.num_pkt_done);
718 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
719 * @etdev: pointer to our adapter structure
721 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
724 /* Setup the receive dma configuration register */
725 writel(0x00002001, &etdev->regs->rxdma.csr);
726 csr = readl(&etdev->regs->rxdma.csr);
727 if ((csr & 0x00020000) != 1) { /* Check halt status (bit 17) */
729 csr = readl(&etdev->regs->rxdma.csr);
730 if ((csr & 0x00020000) != 1)
731 dev_err(&etdev->pdev->dev,
732 "RX Dma failed to enter halt state. CSR 0x%08x\n",
738 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
739 * @etdev: pointer to our adapter structure
741 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
743 /* Setup the receive dma configuration register for normal operation */
744 u32 csr = 0x2000; /* FBR1 enable */
746 if (etdev->RxRing.Fbr1BufferSize == 4096)
748 else if (etdev->RxRing.Fbr1BufferSize == 8192)
750 else if (etdev->RxRing.Fbr1BufferSize == 16384)
753 csr |= 0x0400; /* FBR0 enable */
754 if (etdev->RxRing.Fbr0BufferSize == 256)
756 else if (etdev->RxRing.Fbr0BufferSize == 512)
758 else if (etdev->RxRing.Fbr0BufferSize == 1024)
761 writel(csr, &etdev->regs->rxdma.csr);
763 csr = readl(&etdev->regs->rxdma.csr);
764 if ((csr & 0x00020000) != 0) {
766 csr = readl(&etdev->regs->rxdma.csr);
767 if ((csr & 0x00020000) != 0) {
768 dev_err(&etdev->pdev->dev,
769 "RX Dma failed to exit halt state. CSR 0x%08x\n",
776 * nic_rx_pkts - Checks the hardware for available packets
777 * @etdev: pointer to our adapter
779 * Returns rfd, a pointer to our MPRFD.
781 * Checks the hardware for available packets, using completion ring
782 * If packets are available, it gets an RFD from the RecvList, attaches
783 * the packet to it, puts the RFD in the RecvPendList, and also returns
784 * the pointer to the RFD.
786 PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
788 struct _rx_ring_t *rx_local = &etdev->RxRing;
789 PRX_STATUS_BLOCK_t status;
790 PPKT_STAT_DESC_t psr;
795 struct list_head *element;
799 PKT_STAT_DESC_WORD0_t Word0;
801 /* RX Status block is written by the DMA engine prior to every
802 * interrupt. It contains the next to be used entry in the Packet
803 * Status Ring, and also the two Free Buffer rings.
805 status = (PRX_STATUS_BLOCK_t) rx_local->pRxStatusVa;
807 /* FIXME: tidy later when conversions complete */
808 if (status->Word1.bits.PSRoffset ==
809 (rx_local->local_psr_full & 0xFFF) &&
810 status->Word1.bits.PSRwrap ==
811 ((rx_local->local_psr_full >> 12) & 1)) {
812 /* Looks like this ring is not updated yet */
816 /* The packet status ring indicates that data is available. */
817 psr = (PPKT_STAT_DESC_t) (rx_local->pPSRingVa) +
818 (rx_local->local_psr_full & 0xFFF);
820 /* Grab any information that is required once the PSR is
821 * advanced, since we can no longer rely on the memory being
824 len = psr->word1.bits.length;
825 rindex = (uint8_t) psr->word1.bits.ri;
826 bindex = (uint16_t) psr->word1.bits.bi;
829 /* Indicate that we have used this PSR entry. */
831 add_12bit(&rx_local->local_psr_full, 1);
832 if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) {
833 /* Clear psr full and toggle the wrap bit */
834 rx_local->local_psr_full &= ~0xFFF;
835 rx_local->local_psr_full ^= 0x1000;
838 writel(rx_local->local_psr_full,
839 &etdev->regs->rxdma.psr_full_offset);
850 bindex > rx_local->Fbr0NumEntries - 1) ||
852 bindex > rx_local->Fbr1NumEntries - 1))
855 bindex > rx_local->Fbr1NumEntries - 1)
858 /* Illegal buffer or ring index cannot be used by S/W*/
859 dev_err(&etdev->pdev->dev,
860 "NICRxPkts PSR Entry %d indicates "
861 "length of %d and/or bad bi(%d)\n",
862 rx_local->local_psr_full & 0xFFF,
867 /* Get and fill the RFD. */
868 spin_lock_irqsave(&etdev->RcvLock, flags);
871 element = rx_local->RecvList.next;
872 rfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
875 spin_unlock_irqrestore(&etdev->RcvLock, flags);
879 list_del(&rfd->list_node);
880 rx_local->nReadyRecv--;
882 spin_unlock_irqrestore(&etdev->RcvLock, flags);
884 rfd->bufferindex = bindex;
885 rfd->ringindex = rindex;
887 /* In V1 silicon, there is a bug which screws up filtering of
888 * runt packets. Therefore runt packet filtering is disabled
889 * in the MAC and the packets are dropped here. They are
892 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
893 etdev->Stats.other_errors++;
898 if (etdev->ReplicaPhyLoopbk == 1) {
899 buf = rx_local->Fbr[rindex]->Va[bindex];
901 if (memcmp(&buf[6], &etdev->CurrentAddress[0],
903 if (memcmp(&buf[42], "Replica packet",
905 etdev->ReplicaPhyLoopbkPF = 1;
910 /* Determine if this is a multicast packet coming in */
911 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
912 !(Word0.value & ALCATEL_BROADCAST_PKT)) {
913 /* Promiscuous mode and Multicast mode are
914 * not mutually exclusive as was first
915 * thought. I guess Promiscuous is just
916 * considered a super-set of the other
917 * filters. Generally filter is 0x2b when in
920 if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
921 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
922 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
923 buf = rx_local->Fbr[rindex]->
926 /* Loop through our list to see if the
927 * destination address of this packet
928 * matches one in our list.
931 i < etdev->MCAddressCount;
944 etdev->MCList[i][5]) {
949 /* If our index is equal to the number
950 * of Multicast address we have, then
951 * this means we did not find this
952 * packet's matching address in our
953 * list. Set the PacketSize to zero,
954 * so we free our RFD when we return
955 * from this function.
957 if (i == etdev->MCAddressCount)
962 etdev->Stats.multircv++;
963 } else if (Word0.value & ALCATEL_BROADCAST_PKT)
964 etdev->Stats.brdcstrcv++;
966 /* Not sure what this counter measures in
967 * promiscuous mode. Perhaps we should check
968 * the MAC address to see if it is directed
969 * to us in promiscuous mode.
971 etdev->Stats.unircv++;
975 struct sk_buff *skb = NULL;
977 /* rfd->PacketSize = len - 4; */
978 rfd->PacketSize = len;
980 skb = dev_alloc_skb(rfd->PacketSize + 2);
982 dev_err(&etdev->pdev->dev,
983 "Couldn't alloc an SKB for Rx\n");
987 etdev->net_stats.rx_bytes += rfd->PacketSize;
989 memcpy(skb_put(skb, rfd->PacketSize),
990 rx_local->Fbr[rindex]->Va[bindex],
993 skb->dev = etdev->netdev;
994 skb->protocol = eth_type_trans(skb, etdev->netdev);
995 skb->ip_summed = CHECKSUM_NONE;
1002 nic_return_rfd(etdev, rfd);
1007 * et131x_reset_recv - Reset the receive list
1008 * @etdev: pointer to our adapter
1010 * Assumption, Rcv spinlock has been acquired.
1012 void et131x_reset_recv(struct et131x_adapter *etdev)
1014 WARN_ON(list_empty(&etdev->RxRing.RecvList));
1019 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1020 * @etdev: pointer to our adapter
1022 * Assumption, Rcv spinlock has been acquired.
1024 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1030 /* Process up to available RFD's */
1031 while (count < NUM_PACKETS_HANDLED) {
1032 if (list_empty(&etdev->RxRing.RecvList)) {
1033 WARN_ON(etdev->RxRing.nReadyRecv != 0);
1038 rfd = nic_rx_pkts(etdev);
1043 /* Do not receive any packets until a filter has been set.
1044 * Do not receive any packets until we have link.
1045 * If length is zero, return the RFD in order to advance the
1048 if (!etdev->PacketFilter ||
1049 !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1050 rfd->PacketSize == 0) {
1054 /* Increment the number of packets we received */
1055 etdev->Stats.ipackets++;
1057 /* Set the status on the packet, either resources or success */
1058 if (etdev->RxRing.nReadyRecv < RFD_LOW_WATER_MARK) {
1059 dev_warn(&etdev->pdev->dev,
1060 "RFD's are running out\n");
1065 if (count == NUM_PACKETS_HANDLED || !done) {
1066 etdev->RxRing.UnfinishedReceives = true;
1067 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1068 &etdev->regs->global.watchdog_timer);
1070 /* Watchdog timer will disable itself if appropriate. */
1071 etdev->RxRing.UnfinishedReceives = false;
1074 static inline u32 bump_fbr(u32 *fbr, u32 limit)
1078 /* This works for all cases where limit < 1024. The 1023 case
1079 works because 1023++ is 1024 which means the if condition is not
1080 taken but the carry of the bit into the wrap bit toggles the wrap
1082 if ((v & ET_DMA10_MASK) > limit) {
1083 v &= ~ET_DMA10_MASK;
1086 /* For the 1023 case */
1087 v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1093 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1094 * @etdev: pointer to our adapter
1095 * @rfd: pointer to the RFD
1097 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd)
1099 struct _rx_ring_t *rx_local = &etdev->RxRing;
1100 struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
1101 uint16_t bi = rfd->bufferindex;
1102 uint8_t ri = rfd->ringindex;
1103 unsigned long flags;
1105 /* We don't use any of the OOB data besides status. Otherwise, we
1106 * need to clean up OOB data
1110 (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1112 (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1113 spin_lock_irqsave(&etdev->FbrLock, flags);
1116 struct fbr_desc *next =
1117 (struct fbr_desc *) (rx_local->pFbr1RingVa) +
1118 INDEX10(rx_local->local_Fbr1_full);
1120 /* Handle the Free Buffer Ring advancement here. Write
1121 * the PA / Buffer Index for the returned buffer into
1122 * the oldest (next to be freed)FBR entry
1124 next->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1125 next->addr_lo = rx_local->Fbr[1]->PALow[bi];
1128 writel(bump_fbr(&rx_local->local_Fbr1_full,
1129 rx_local->Fbr1NumEntries - 1),
1130 &rx_dma->fbr1_full_offset);
1134 struct fbr_desc *next = (struct fbr_desc *)
1135 rx_local->pFbr0RingVa +
1136 INDEX10(rx_local->local_Fbr0_full);
1138 /* Handle the Free Buffer Ring advancement here. Write
1139 * the PA / Buffer Index for the returned buffer into
1140 * the oldest (next to be freed) FBR entry
1142 next->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1143 next->addr_lo = rx_local->Fbr[0]->PALow[bi];
1146 writel(bump_fbr(&rx_local->local_Fbr0_full,
1147 rx_local->Fbr0NumEntries - 1),
1148 &rx_dma->fbr0_full_offset);
1151 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1153 dev_err(&etdev->pdev->dev,
1154 "NICReturnRFD illegal Buffer Index returned\n");
1157 /* The processing on this RFD is done, so put it back on the tail of
1160 spin_lock_irqsave(&etdev->RcvLock, flags);
1161 list_add_tail(&rfd->list_node, &rx_local->RecvList);
1162 rx_local->nReadyRecv++;
1163 spin_unlock_irqrestore(&etdev->RcvLock, flags);
1165 WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);