Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh...
[pandora-kernel.git] / drivers / staging / et131x / et1310_rx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_rx.c - Routines used to perform data reception
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85
86 #include "et1310_phy.h"
87 #include "et131x_adapter.h"
88 #include "et1310_rx.h"
89 #include "et131x.h"
90
91 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
92
93 /**
94  * et131x_rx_dma_memory_alloc
95  * @adapter: pointer to our private adapter structure
96  *
97  * Returns 0 on success and errno on failure (as defined in errno.h)
98  *
99  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
100  * and the Packet Status Ring.
101  */
102 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
103 {
104         u32 i, j;
105         u32 bufsize;
106         u32 pktStatRingSize, FBRChunkSize;
107         struct rx_ring *rx_ring;
108
109         /* Setup some convenience pointers */
110         rx_ring = &adapter->rx_ring;
111
112         /* Alloc memory for the lookup table */
113 #ifdef USE_FBR0
114         rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
115 #endif
116         rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
117
118         /* The first thing we will do is configure the sizes of the buffer
119          * rings. These will change based on jumbo packet support.  Larger
120          * jumbo packets increases the size of each entry in FBR0, and the
121          * number of entries in FBR0, while at the same time decreasing the
122          * number of entries in FBR1.
123          *
124          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
125          * entries are huge in order to accomodate a "jumbo" frame, then it
126          * will have less entries.  Conversely, FBR1 will now be relied upon
127          * to carry more "normal" frames, thus it's entry size also increases
128          * and the number of entries goes up too (since it now carries
129          * "small" + "regular" packets.
130          *
131          * In this scheme, we try to maintain 512 entries between the two
132          * rings. Also, FBR1 remains a constant size - when it's size doubles
133          * the number of entries halves.  FBR0 increases in size, however.
134          */
135
136         if (adapter->RegistryJumboPacket < 2048) {
137 #ifdef USE_FBR0
138                 rx_ring->Fbr0BufferSize = 256;
139                 rx_ring->Fbr0NumEntries = 512;
140 #endif
141                 rx_ring->Fbr1BufferSize = 2048;
142                 rx_ring->Fbr1NumEntries = 512;
143         } else if (adapter->RegistryJumboPacket < 4096) {
144 #ifdef USE_FBR0
145                 rx_ring->Fbr0BufferSize = 512;
146                 rx_ring->Fbr0NumEntries = 1024;
147 #endif
148                 rx_ring->Fbr1BufferSize = 4096;
149                 rx_ring->Fbr1NumEntries = 512;
150         } else {
151 #ifdef USE_FBR0
152                 rx_ring->Fbr0BufferSize = 1024;
153                 rx_ring->Fbr0NumEntries = 768;
154 #endif
155                 rx_ring->Fbr1BufferSize = 16384;
156                 rx_ring->Fbr1NumEntries = 128;
157         }
158
159 #ifdef USE_FBR0
160         adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr0NumEntries +
161             adapter->rx_ring.Fbr1NumEntries;
162 #else
163         adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr1NumEntries;
164 #endif
165
166         /* Allocate an area of memory for Free Buffer Ring 1 */
167         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
168         rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
169                                                     bufsize,
170                                                     &rx_ring->pFbr1RingPa);
171         if (!rx_ring->pFbr1RingVa) {
172                 dev_err(&adapter->pdev->dev,
173                           "Cannot alloc memory for Free Buffer Ring 1\n");
174                 return -ENOMEM;
175         }
176
177         /* Save physical address
178          *
179          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
180          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
181          * are ever returned, make sure the high part is retrieved here
182          * before storing the adjusted address.
183          */
184         rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
185
186         /* Align Free Buffer Ring 1 on a 4K boundary */
187         et131x_align_allocated_memory(adapter,
188                                       &rx_ring->Fbr1Realpa,
189                                       &rx_ring->Fbr1offset, 0x0FFF);
190
191         rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa +
192                                         rx_ring->Fbr1offset);
193
194 #ifdef USE_FBR0
195         /* Allocate an area of memory for Free Buffer Ring 0 */
196         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
197         rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
198                                                     bufsize,
199                                                     &rx_ring->pFbr0RingPa);
200         if (!rx_ring->pFbr0RingVa) {
201                 dev_err(&adapter->pdev->dev,
202                           "Cannot alloc memory for Free Buffer Ring 0\n");
203                 return -ENOMEM;
204         }
205
206         /* Save physical address
207          *
208          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
209          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
210          * are ever returned, make sure the high part is retrieved here before
211          * storing the adjusted address.
212          */
213         rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
214
215         /* Align Free Buffer Ring 0 on a 4K boundary */
216         et131x_align_allocated_memory(adapter,
217                                       &rx_ring->Fbr0Realpa,
218                                       &rx_ring->Fbr0offset, 0x0FFF);
219
220         rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa +
221                                         rx_ring->Fbr0offset);
222 #endif
223
224         for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
225              i++) {
226                 u64 Fbr1Offset;
227                 u64 Fbr1TempPa;
228                 u32 Fbr1Align;
229
230                 /* This code allocates an area of memory big enough for N
231                  * free buffers + (buffer_size - 1) so that the buffers can
232                  * be aligned on 4k boundaries.  If each buffer were aligned
233                  * to a buffer_size boundary, the effect would be to double
234                  * the size of FBR0.  By allocating N buffers at once, we
235                  * reduce this overhead.
236                  */
237                 if (rx_ring->Fbr1BufferSize > 4096)
238                         Fbr1Align = 4096;
239                 else
240                         Fbr1Align = rx_ring->Fbr1BufferSize;
241
242                 FBRChunkSize =
243                     (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
244                 rx_ring->Fbr1MemVa[i] =
245                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
246                                          &rx_ring->Fbr1MemPa[i]);
247
248                 if (!rx_ring->Fbr1MemVa[i]) {
249                 dev_err(&adapter->pdev->dev,
250                                 "Could not alloc memory\n");
251                         return -ENOMEM;
252                 }
253
254                 /* See NOTE in "Save Physical Address" comment above */
255                 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
256
257                 et131x_align_allocated_memory(adapter,
258                                               &Fbr1TempPa,
259                                               &Fbr1Offset, (Fbr1Align - 1));
260
261                 for (j = 0; j < FBR_CHUNKS; j++) {
262                         u32 index = (i * FBR_CHUNKS) + j;
263
264                         /* Save the Virtual address of this index for quick
265                          * access later
266                          */
267                         rx_ring->fbr[1]->virt[index] =
268                             (u8 *) rx_ring->Fbr1MemVa[i] +
269                             (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
270
271                         /* now store the physical address in the descriptor
272                          * so the device can access it
273                          */
274                         rx_ring->fbr[1]->bus_high[index] =
275                             (u32) (Fbr1TempPa >> 32);
276                         rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa;
277
278                         Fbr1TempPa += rx_ring->Fbr1BufferSize;
279
280                         rx_ring->fbr[1]->buffer1[index] =
281                             rx_ring->fbr[1]->virt[index];
282                         rx_ring->fbr[1]->buffer2[index] =
283                             rx_ring->fbr[1]->virt[index] - 4;
284                 }
285         }
286
287 #ifdef USE_FBR0
288         /* Same for FBR0 (if in use) */
289         for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
290              i++) {
291                 u64 Fbr0Offset;
292                 u64 Fbr0TempPa;
293
294                 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
295                 rx_ring->Fbr0MemVa[i] =
296                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
297                                          &rx_ring->Fbr0MemPa[i]);
298
299                 if (!rx_ring->Fbr0MemVa[i]) {
300                         dev_err(&adapter->pdev->dev,
301                                 "Could not alloc memory\n");
302                         return -ENOMEM;
303                 }
304
305                 /* See NOTE in "Save Physical Address" comment above */
306                 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
307
308                 et131x_align_allocated_memory(adapter,
309                                               &Fbr0TempPa,
310                                               &Fbr0Offset,
311                                               rx_ring->Fbr0BufferSize - 1);
312
313                 for (j = 0; j < FBR_CHUNKS; j++) {
314                         u32 index = (i * FBR_CHUNKS) + j;
315
316                         rx_ring->fbr[0]->virt[index] =
317                             (u8 *) rx_ring->Fbr0MemVa[i] +
318                             (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
319
320                         rx_ring->fbr[0]->bus_high[index] =
321                             (u32) (Fbr0TempPa >> 32);
322                         rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa;
323
324                         Fbr0TempPa += rx_ring->Fbr0BufferSize;
325
326                         rx_ring->fbr[0]->buffer1[index] =
327                             rx_ring->fbr[0]->virt[index];
328                         rx_ring->fbr[0]->buffer2[index] =
329                             rx_ring->fbr[0]->virt[index] - 4;
330                 }
331         }
332 #endif
333
334         /* Allocate an area of memory for FIFO of Packet Status ring entries */
335         pktStatRingSize =
336             sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
337
338         rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
339                                                   pktStatRingSize,
340                                                   &rx_ring->pPSRingPa);
341
342         if (!rx_ring->pPSRingVa) {
343                 dev_err(&adapter->pdev->dev,
344                           "Cannot alloc memory for Packet Status Ring\n");
345                 return -ENOMEM;
346         }
347         printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
348
349         /*
350          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
351          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
352          * are ever returned, make sure the high part is retrieved here before
353          * storing the adjusted address.
354          */
355
356         /* Allocate an area of memory for writeback of status information */
357         rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev,
358                                             sizeof(struct rx_status_block),
359                                             &rx_ring->rx_status_bus);
360         if (!rx_ring->rx_status_block) {
361                 dev_err(&adapter->pdev->dev,
362                           "Cannot alloc memory for Status Block\n");
363                 return -ENOMEM;
364         }
365         rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
366         printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
367
368         /* Recv
369          * pci_pool_create initializes a lookaside list. After successful
370          * creation, nonpaged fixed-size blocks can be allocated from and
371          * freed to the lookaside list.
372          * RFDs will be allocated from this pool.
373          */
374         rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
375                                                    sizeof(MP_RFD),
376                                                    0,
377                                                    SLAB_CACHE_DMA |
378                                                    SLAB_HWCACHE_ALIGN,
379                                                    NULL);
380
381         adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
382
383         /* The RFDs are going to be put on lists later on, so initialize the
384          * lists now.
385          */
386         INIT_LIST_HEAD(&rx_ring->RecvList);
387         return 0;
388 }
389
390 /**
391  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
392  * @adapter: pointer to our private adapter structure
393  */
394 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
395 {
396         u32 index;
397         u32 bufsize;
398         u32 pktStatRingSize;
399         PMP_RFD rfd;
400         struct rx_ring *rx_ring;
401
402         /* Setup some convenience pointers */
403         rx_ring = &adapter->rx_ring;
404
405         /* Free RFDs and associated packet descriptors */
406         WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
407
408         while (!list_empty(&rx_ring->RecvList)) {
409                 rfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
410                                                MP_RFD, list_node);
411
412                 list_del(&rfd->list_node);
413                 rfd->Packet = NULL;
414                 kmem_cache_free(adapter->rx_ring.RecvLookaside, rfd);
415         }
416
417         /* Free Free Buffer Ring 1 */
418         if (rx_ring->pFbr1RingVa) {
419                 /* First the packet memory */
420                 for (index = 0; index <
421                      (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
422                         if (rx_ring->Fbr1MemVa[index]) {
423                                 u32 Fbr1Align;
424
425                                 if (rx_ring->Fbr1BufferSize > 4096)
426                                         Fbr1Align = 4096;
427                                 else
428                                         Fbr1Align = rx_ring->Fbr1BufferSize;
429
430                                 bufsize =
431                                     (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
432                                     Fbr1Align - 1;
433
434                                 pci_free_consistent(adapter->pdev,
435                                                     bufsize,
436                                                     rx_ring->Fbr1MemVa[index],
437                                                     rx_ring->Fbr1MemPa[index]);
438
439                                 rx_ring->Fbr1MemVa[index] = NULL;
440                         }
441                 }
442
443                 /* Now the FIFO itself */
444                 rx_ring->pFbr1RingVa = (void *)((u8 *)
445                                 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
446
447                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
448                                                             + 0xfff;
449
450                 pci_free_consistent(adapter->pdev, bufsize,
451                                 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
452
453                 rx_ring->pFbr1RingVa = NULL;
454         }
455
456 #ifdef USE_FBR0
457         /* Now the same for Free Buffer Ring 0 */
458         if (rx_ring->pFbr0RingVa) {
459                 /* First the packet memory */
460                 for (index = 0; index <
461                      (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
462                         if (rx_ring->Fbr0MemVa[index]) {
463                                 bufsize =
464                                     (rx_ring->Fbr0BufferSize *
465                                      (FBR_CHUNKS + 1)) - 1;
466
467                                 pci_free_consistent(adapter->pdev,
468                                                     bufsize,
469                                                     rx_ring->Fbr0MemVa[index],
470                                                     rx_ring->Fbr0MemPa[index]);
471
472                                 rx_ring->Fbr0MemVa[index] = NULL;
473                         }
474                 }
475
476                 /* Now the FIFO itself */
477                 rx_ring->pFbr0RingVa = (void *)((u8 *)
478                                 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
479
480                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
481                                                             + 0xfff;
482
483                 pci_free_consistent(adapter->pdev,
484                                     bufsize,
485                                     rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
486
487                 rx_ring->pFbr0RingVa = NULL;
488         }
489 #endif
490
491         /* Free Packet Status Ring */
492         if (rx_ring->pPSRingVa) {
493                 pktStatRingSize =
494                     sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
495
496                 pci_free_consistent(adapter->pdev, pktStatRingSize,
497                                     rx_ring->pPSRingVa, rx_ring->pPSRingPa);
498
499                 rx_ring->pPSRingVa = NULL;
500         }
501
502         /* Free area of memory for the writeback of status information */
503         if (rx_ring->rx_status_block) {
504                 pci_free_consistent(adapter->pdev,
505                         sizeof(struct rx_status_block),
506                         rx_ring->rx_status_block, rx_ring->rx_status_bus);
507                 rx_ring->rx_status_block = NULL;
508         }
509
510         /* Free receive buffer pool */
511
512         /* Free receive packet pool */
513
514         /* Destroy the lookaside (RFD) pool */
515         if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
516                 kmem_cache_destroy(rx_ring->RecvLookaside);
517                 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
518         }
519
520         /* Free the FBR Lookup Table */
521 #ifdef USE_FBR0
522         kfree(rx_ring->fbr[0]);
523 #endif
524
525         kfree(rx_ring->fbr[1]);
526
527         /* Reset Counters */
528         rx_ring->nReadyRecv = 0;
529 }
530
531 /**
532  * et131x_init_recv - Initialize receive data structures.
533  * @adapter: pointer to our private adapter structure
534  *
535  * Returns 0 on success and errno on failure (as defined in errno.h)
536  */
537 int et131x_init_recv(struct et131x_adapter *adapter)
538 {
539         int status = -ENOMEM;
540         PMP_RFD rfd = NULL;
541         u32 rfdct;
542         u32 numrfd = 0;
543         struct rx_ring *rx_ring;
544
545         /* Setup some convenience pointers */
546         rx_ring = &adapter->rx_ring;
547
548         /* Setup each RFD */
549         for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
550                 rfd = kmem_cache_alloc(rx_ring->RecvLookaside,
551                                                      GFP_ATOMIC | GFP_DMA);
552
553                 if (!rfd) {
554                         dev_err(&adapter->pdev->dev,
555                                   "Couldn't alloc RFD out of kmem_cache\n");
556                         status = -ENOMEM;
557                         continue;
558                 }
559
560                 rfd->Packet = NULL;
561
562                 /* Add this RFD to the RecvList */
563                 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
564
565                 /* Increment both the available RFD's, and the total RFD's. */
566                 rx_ring->nReadyRecv++;
567                 numrfd++;
568         }
569
570         if (numrfd > NIC_MIN_NUM_RFD)
571                 status = 0;
572
573         rx_ring->NumRfd = numrfd;
574
575         if (status != 0) {
576                 kmem_cache_free(rx_ring->RecvLookaside, rfd);
577                 dev_err(&adapter->pdev->dev,
578                           "Allocation problems in et131x_init_recv\n");
579         }
580         return status;
581 }
582
583 /**
584  * ConfigRxDmaRegs - Start of Rx_DMA init sequence
585  * @etdev: pointer to our adapter structure
586  */
587 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
588 {
589         struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
590         struct rx_ring *rx_local = &etdev->rx_ring;
591         struct fbr_desc *fbr_entry;
592         u32 entry;
593         u32 psr_num_des;
594         unsigned long flags;
595
596         /* Halt RXDMA to perform the reconfigure.  */
597         et131x_rx_dma_disable(etdev);
598
599         /* Load the completion writeback physical address
600          *
601          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
602          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
603          * are ever returned, make sure the high part is retrieved here
604          * before storing the adjusted address.
605          */
606         writel((u32) ((u64)rx_local->rx_status_bus >> 32),
607                &rx_dma->dma_wb_base_hi);
608         writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
609
610         memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
611
612         /* Set the address and parameters of the packet status ring into the
613          * 1310's registers
614          */
615         writel((u32) ((u64)rx_local->pPSRingPa >> 32),
616                &rx_dma->psr_base_hi);
617         writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
618         writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
619         writel(0, &rx_dma->psr_full_offset);
620
621         psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
622         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
623                &rx_dma->psr_min_des);
624
625         spin_lock_irqsave(&etdev->RcvLock, flags);
626
627         /* These local variables track the PSR in the adapter structure */
628         rx_local->local_psr_full = 0;
629
630         /* Now's the best time to initialize FBR1 contents */
631         fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
632         for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
633                 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
634                 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
635                 fbr_entry->word2 = entry;
636                 fbr_entry++;
637         }
638
639         /* Set the address and parameters of Free buffer ring 1 (and 0 if
640          * required) into the 1310's registers
641          */
642         writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
643         writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
644         writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
645         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
646
647         /* This variable tracks the free buffer ring 1 full position, so it
648          * has to match the above.
649          */
650         rx_local->local_Fbr1_full = ET_DMA10_WRAP;
651         writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
652                &rx_dma->fbr1_min_des);
653
654 #ifdef USE_FBR0
655         /* Now's the best time to initialize FBR0 contents */
656         fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
657         for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
658                 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
659                 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
660                 fbr_entry->word2 = entry;
661                 fbr_entry++;
662         }
663
664         writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
665         writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
666         writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
667         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
668
669         /* This variable tracks the free buffer ring 0 full position, so it
670          * has to match the above.
671          */
672         rx_local->local_Fbr0_full = ET_DMA10_WRAP;
673         writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
674                &rx_dma->fbr0_min_des);
675 #endif
676
677         /* Program the number of packets we will receive before generating an
678          * interrupt.
679          * For version B silicon, this value gets updated once autoneg is
680          *complete.
681          */
682         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
683
684         /* The "time_done" is not working correctly to coalesce interrupts
685          * after a given time period, but rather is giving us an interrupt
686          * regardless of whether we have received packets.
687          * This value gets updated once autoneg is complete.
688          */
689         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
690
691         spin_unlock_irqrestore(&etdev->RcvLock, flags);
692 }
693
694 /**
695  * SetRxDmaTimer - Set the heartbeat timer according to line rate.
696  * @etdev: pointer to our adapter structure
697  */
698 void SetRxDmaTimer(struct et131x_adapter *etdev)
699 {
700         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
701          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
702          */
703         if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
704             (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
705                 writel(0, &etdev->regs->rxdma.max_pkt_time);
706                 writel(1, &etdev->regs->rxdma.num_pkt_done);
707         }
708 }
709
710 /**
711  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
712  * @etdev: pointer to our adapter structure
713  */
714 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
715 {
716         u32 csr;
717         /* Setup the receive dma configuration register */
718         writel(0x00002001, &etdev->regs->rxdma.csr);
719         csr = readl(&etdev->regs->rxdma.csr);
720         if ((csr & 0x00020000) != 1) {  /* Check halt status (bit 17) */
721                 udelay(5);
722                 csr = readl(&etdev->regs->rxdma.csr);
723                 if ((csr & 0x00020000) != 1)
724                         dev_err(&etdev->pdev->dev,
725                         "RX Dma failed to enter halt state. CSR 0x%08x\n",
726                                 csr);
727         }
728 }
729
730 /**
731  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
732  * @etdev: pointer to our adapter structure
733  */
734 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
735 {
736         /* Setup the receive dma configuration register for normal operation */
737         u32 csr =  0x2000;      /* FBR1 enable */
738
739         if (etdev->rx_ring.Fbr1BufferSize == 4096)
740                 csr |= 0x0800;
741         else if (etdev->rx_ring.Fbr1BufferSize == 8192)
742                 csr |= 0x1000;
743         else if (etdev->rx_ring.Fbr1BufferSize == 16384)
744                 csr |= 0x1800;
745 #ifdef USE_FBR0
746         csr |= 0x0400;          /* FBR0 enable */
747         if (etdev->rx_ring.Fbr0BufferSize == 256)
748                 csr |= 0x0100;
749         else if (etdev->rx_ring.Fbr0BufferSize == 512)
750                 csr |= 0x0200;
751         else if (etdev->rx_ring.Fbr0BufferSize == 1024)
752                 csr |= 0x0300;
753 #endif
754         writel(csr, &etdev->regs->rxdma.csr);
755
756         csr = readl(&etdev->regs->rxdma.csr);
757         if ((csr & 0x00020000) != 0) {
758                 udelay(5);
759                 csr = readl(&etdev->regs->rxdma.csr);
760                 if ((csr & 0x00020000) != 0) {
761                         dev_err(&etdev->pdev->dev,
762                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
763                                 csr);
764                 }
765         }
766 }
767
768 /**
769  * nic_rx_pkts - Checks the hardware for available packets
770  * @etdev: pointer to our adapter
771  *
772  * Returns rfd, a pointer to our MPRFD.
773  *
774  * Checks the hardware for available packets, using completion ring
775  * If packets are available, it gets an RFD from the RecvList, attaches
776  * the packet to it, puts the RFD in the RecvPendList, and also returns
777  * the pointer to the RFD.
778  */
779 PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
780 {
781         struct rx_ring *rx_local = &etdev->rx_ring;
782         struct rx_status_block *status;
783         struct pkt_stat_desc *psr;
784         PMP_RFD rfd;
785         u32 i;
786         u8 *buf;
787         unsigned long flags;
788         struct list_head *element;
789         u8 rindex;
790         u16 bindex;
791         u32 len;
792         u32 word0;
793         u32 word1;
794
795         /* RX Status block is written by the DMA engine prior to every
796          * interrupt. It contains the next to be used entry in the Packet
797          * Status Ring, and also the two Free Buffer rings.
798          */
799         status = rx_local->rx_status_block;
800         word1 = status->Word1 >> 16;    /* Get the useful bits */
801
802         /* Check the PSR and wrap bits do not match */
803         if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
804                 /* Looks like this ring is not updated yet */
805                 return NULL;
806
807         /* The packet status ring indicates that data is available. */
808         psr = (struct pkt_stat_desc *) (rx_local->pPSRingVa) +
809                         (rx_local->local_psr_full & 0xFFF);
810
811         /* Grab any information that is required once the PSR is
812          * advanced, since we can no longer rely on the memory being
813          * accurate
814          */
815         len = psr->word1 & 0xFFFF;
816         rindex = (psr->word1 >> 26) & 0x03;
817         bindex = (psr->word1 >> 16) & 0x3FF;
818         word0 = psr->word0;
819
820         /* Indicate that we have used this PSR entry. */
821         /* FIXME wrap 12 */
822         add_12bit(&rx_local->local_psr_full, 1);
823         if ((rx_local->local_psr_full & 0xFFF)  > rx_local->PsrNumEntries - 1) {
824                 /* Clear psr full and toggle the wrap bit */
825                 rx_local->local_psr_full &=  ~0xFFF;
826                 rx_local->local_psr_full ^= 0x1000;
827         }
828
829         writel(rx_local->local_psr_full,
830                &etdev->regs->rxdma.psr_full_offset);
831
832 #ifndef USE_FBR0
833         if (rindex != 1)
834                 return NULL;
835 #endif
836
837 #ifdef USE_FBR0
838         if (rindex > 1 ||
839                 (rindex == 0 &&
840                 bindex > rx_local->Fbr0NumEntries - 1) ||
841                 (rindex == 1 &&
842                 bindex > rx_local->Fbr1NumEntries - 1))
843 #else
844         if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
845 #endif
846         {
847                 /* Illegal buffer or ring index cannot be used by S/W*/
848                 dev_err(&etdev->pdev->dev,
849                           "NICRxPkts PSR Entry %d indicates "
850                           "length of %d and/or bad bi(%d)\n",
851                           rx_local->local_psr_full & 0xFFF,
852                           len, bindex);
853                 return NULL;
854         }
855
856         /* Get and fill the RFD. */
857         spin_lock_irqsave(&etdev->RcvLock, flags);
858
859         rfd = NULL;
860         element = rx_local->RecvList.next;
861         rfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
862
863         if (rfd == NULL) {
864                 spin_unlock_irqrestore(&etdev->RcvLock, flags);
865                 return NULL;
866         }
867
868         list_del(&rfd->list_node);
869         rx_local->nReadyRecv--;
870
871         spin_unlock_irqrestore(&etdev->RcvLock, flags);
872
873         rfd->bufferindex = bindex;
874         rfd->ringindex = rindex;
875
876         /* In V1 silicon, there is a bug which screws up filtering of
877          * runt packets.  Therefore runt packet filtering is disabled
878          * in the MAC and the packets are dropped here.  They are
879          * also counted here.
880          */
881         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
882                 etdev->Stats.other_errors++;
883                 len = 0;
884         }
885
886         if (len) {
887                 if (etdev->ReplicaPhyLoopbk == 1) {
888                         buf = rx_local->fbr[rindex]->virt[bindex];
889
890                         if (memcmp(&buf[6], &etdev->CurrentAddress[0],
891                                    ETH_ALEN) == 0) {
892                                 if (memcmp(&buf[42], "Replica packet",
893                                            ETH_HLEN)) {
894                                         etdev->ReplicaPhyLoopbkPF = 1;
895                                 }
896                         }
897                 }
898
899                 /* Determine if this is a multicast packet coming in */
900                 if ((word0 & ALCATEL_MULTICAST_PKT) &&
901                     !(word0 & ALCATEL_BROADCAST_PKT)) {
902                         /* Promiscuous mode and Multicast mode are
903                          * not mutually exclusive as was first
904                          * thought.  I guess Promiscuous is just
905                          * considered a super-set of the other
906                          * filters. Generally filter is 0x2b when in
907                          * promiscuous mode.
908                          */
909                         if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
910                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
911                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
912                                 buf = rx_local->fbr[rindex]->
913                                                 virt[bindex];
914
915                                 /* Loop through our list to see if the
916                                  * destination address of this packet
917                                  * matches one in our list.
918                                  */
919                                 for (i = 0;
920                                      i < etdev->MCAddressCount;
921                                      i++) {
922                                         if (buf[0] ==
923                                             etdev->MCList[i][0]
924                                             && buf[1] ==
925                                             etdev->MCList[i][1]
926                                             && buf[2] ==
927                                             etdev->MCList[i][2]
928                                             && buf[3] ==
929                                             etdev->MCList[i][3]
930                                             && buf[4] ==
931                                             etdev->MCList[i][4]
932                                             && buf[5] ==
933                                             etdev->MCList[i][5]) {
934                                                 break;
935                                         }
936                                 }
937
938                                 /* If our index is equal to the number
939                                  * of Multicast address we have, then
940                                  * this means we did not find this
941                                  * packet's matching address in our
942                                  * list.  Set the PacketSize to zero,
943                                  * so we free our RFD when we return
944                                  * from this function.
945                                  */
946                                 if (i == etdev->MCAddressCount)
947                                         len = 0;
948                         }
949
950                         if (len > 0)
951                                 etdev->Stats.multircv++;
952                 } else if (word0 & ALCATEL_BROADCAST_PKT)
953                         etdev->Stats.brdcstrcv++;
954                 else
955                         /* Not sure what this counter measures in
956                          * promiscuous mode. Perhaps we should check
957                          * the MAC address to see if it is directed
958                          * to us in promiscuous mode.
959                          */
960                         etdev->Stats.unircv++;
961         }
962
963         if (len > 0) {
964                 struct sk_buff *skb = NULL;
965
966                 /* rfd->PacketSize = len - 4; */
967                 rfd->PacketSize = len;
968
969                 skb = dev_alloc_skb(rfd->PacketSize + 2);
970                 if (!skb) {
971                         dev_err(&etdev->pdev->dev,
972                                   "Couldn't alloc an SKB for Rx\n");
973                         return NULL;
974                 }
975
976                 etdev->net_stats.rx_bytes += rfd->PacketSize;
977
978                 memcpy(skb_put(skb, rfd->PacketSize),
979                        rx_local->fbr[rindex]->virt[bindex],
980                        rfd->PacketSize);
981
982                 skb->dev = etdev->netdev;
983                 skb->protocol = eth_type_trans(skb, etdev->netdev);
984                 skb->ip_summed = CHECKSUM_NONE;
985
986                 netif_rx(skb);
987         } else {
988                 rfd->PacketSize = 0;
989         }
990
991         nic_return_rfd(etdev, rfd);
992         return rfd;
993 }
994
995 /**
996  * et131x_reset_recv - Reset the receive list
997  * @etdev: pointer to our adapter
998  *
999  * Assumption, Rcv spinlock has been acquired.
1000  */
1001 void et131x_reset_recv(struct et131x_adapter *etdev)
1002 {
1003         WARN_ON(list_empty(&etdev->rx_ring.RecvList));
1004
1005 }
1006
1007 /**
1008  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1009  * @etdev: pointer to our adapter
1010  *
1011  * Assumption, Rcv spinlock has been acquired.
1012  */
1013 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1014 {
1015         PMP_RFD rfd = NULL;
1016         u32 count = 0;
1017         bool done = true;
1018
1019         /* Process up to available RFD's */
1020         while (count < NUM_PACKETS_HANDLED) {
1021                 if (list_empty(&etdev->rx_ring.RecvList)) {
1022                         WARN_ON(etdev->rx_ring.nReadyRecv != 0);
1023                         done = false;
1024                         break;
1025                 }
1026
1027                 rfd = nic_rx_pkts(etdev);
1028
1029                 if (rfd == NULL)
1030                         break;
1031
1032                 /* Do not receive any packets until a filter has been set.
1033                  * Do not receive any packets until we have link.
1034                  * If length is zero, return the RFD in order to advance the
1035                  * Free buffer ring.
1036                  */
1037                 if (!etdev->PacketFilter ||
1038                     !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1039                     rfd->PacketSize == 0) {
1040                         continue;
1041                 }
1042
1043                 /* Increment the number of packets we received */
1044                 etdev->Stats.ipackets++;
1045
1046                 /* Set the status on the packet, either resources or success */
1047                 if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) {
1048                         dev_warn(&etdev->pdev->dev,
1049                                     "RFD's are running out\n");
1050                 }
1051                 count++;
1052         }
1053
1054         if (count == NUM_PACKETS_HANDLED || !done) {
1055                 etdev->rx_ring.UnfinishedReceives = true;
1056                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1057                        &etdev->regs->global.watchdog_timer);
1058         } else
1059                 /* Watchdog timer will disable itself if appropriate. */
1060                 etdev->rx_ring.UnfinishedReceives = false;
1061 }
1062
1063 static inline u32 bump_fbr(u32 *fbr, u32 limit)
1064 {
1065         u32 v = *fbr;
1066         v++;
1067         /* This works for all cases where limit < 1024. The 1023 case
1068            works because 1023++ is 1024 which means the if condition is not
1069            taken but the carry of the bit into the wrap bit toggles the wrap
1070            value correctly */
1071         if ((v & ET_DMA10_MASK) > limit) {
1072                 v &= ~ET_DMA10_MASK;
1073                 v ^= ET_DMA10_WRAP;
1074         }
1075         /* For the 1023 case */
1076         v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1077         *fbr = v;
1078         return v;
1079 }
1080
1081 /**
1082  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1083  * @etdev: pointer to our adapter
1084  * @rfd: pointer to the RFD
1085  */
1086 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd)
1087 {
1088         struct rx_ring *rx_local = &etdev->rx_ring;
1089         struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
1090         u16 bi = rfd->bufferindex;
1091         u8 ri = rfd->ringindex;
1092         unsigned long flags;
1093
1094         /* We don't use any of the OOB data besides status. Otherwise, we
1095          * need to clean up OOB data
1096          */
1097         if (
1098 #ifdef USE_FBR0
1099             (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1100 #endif
1101             (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1102                 spin_lock_irqsave(&etdev->FbrLock, flags);
1103
1104                 if (ri == 1) {
1105                         struct fbr_desc *next =
1106                             (struct fbr_desc *) (rx_local->pFbr1RingVa) +
1107                                          INDEX10(rx_local->local_Fbr1_full);
1108
1109                         /* Handle the Free Buffer Ring advancement here. Write
1110                          * the PA / Buffer Index for the returned buffer into
1111                          * the oldest (next to be freed)FBR entry
1112                          */
1113                         next->addr_hi = rx_local->fbr[1]->bus_high[bi];
1114                         next->addr_lo = rx_local->fbr[1]->bus_low[bi];
1115                         next->word2 = bi;
1116
1117                         writel(bump_fbr(&rx_local->local_Fbr1_full,
1118                                 rx_local->Fbr1NumEntries - 1),
1119                                 &rx_dma->fbr1_full_offset);
1120                 }
1121 #ifdef USE_FBR0
1122                 else {
1123                         struct fbr_desc *next = (struct fbr_desc *)
1124                                 rx_local->pFbr0RingVa +
1125                                         INDEX10(rx_local->local_Fbr0_full);
1126
1127                         /* Handle the Free Buffer Ring advancement here. Write
1128                          * the PA / Buffer Index for the returned buffer into
1129                          * the oldest (next to be freed) FBR entry
1130                          */
1131                         next->addr_hi = rx_local->fbr[0]->bus_high[bi];
1132                         next->addr_lo = rx_local->fbr[0]->bus_low[bi];
1133                         next->word2 = bi;
1134
1135                         writel(bump_fbr(&rx_local->local_Fbr0_full,
1136                                         rx_local->Fbr0NumEntries - 1),
1137                                &rx_dma->fbr0_full_offset);
1138                 }
1139 #endif
1140                 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1141         } else {
1142                 dev_err(&etdev->pdev->dev,
1143                           "NICReturnRFD illegal Buffer Index returned\n");
1144         }
1145
1146         /* The processing on this RFD is done, so put it back on the tail of
1147          * our list
1148          */
1149         spin_lock_irqsave(&etdev->RcvLock, flags);
1150         list_add_tail(&rfd->list_node, &rx_local->RecvList);
1151         rx_local->nReadyRecv++;
1152         spin_unlock_irqrestore(&etdev->RcvLock, flags);
1153
1154         WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1155 }