Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[pandora-kernel.git] / drivers / staging / et131x / et1310_rx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_rx.c - Routines used to perform data reception
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85
86 #include "et1310_phy.h"
87 #include "et131x_adapter.h"
88 #include "et1310_rx.h"
89 #include "et131x.h"
90
91 static inline u32 bump_fbr(u32 *fbr, u32 limit)
92 {
93         u32 v = *fbr;
94         v++;
95         /* This works for all cases where limit < 1024. The 1023 case
96            works because 1023++ is 1024 which means the if condition is not
97            taken but the carry of the bit into the wrap bit toggles the wrap
98            value correctly */
99         if ((v & ET_DMA10_MASK) > limit) {
100                 v &= ~ET_DMA10_MASK;
101                 v ^= ET_DMA10_WRAP;
102         }
103         /* For the 1023 case */
104         v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
105         *fbr = v;
106         return v;
107 }
108
109 /**
110  * et131x_rx_dma_memory_alloc
111  * @adapter: pointer to our private adapter structure
112  *
113  * Returns 0 on success and errno on failure (as defined in errno.h)
114  *
115  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
116  * and the Packet Status Ring.
117  */
118 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
119 {
120         u32 i, j;
121         u32 bufsize;
122         u32 pktStatRingSize, FBRChunkSize;
123         struct rx_ring *rx_ring;
124
125         /* Setup some convenience pointers */
126         rx_ring = &adapter->rx_ring;
127
128         /* Alloc memory for the lookup table */
129 #ifdef USE_FBR0
130         rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
131 #endif
132         rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
133
134         /* The first thing we will do is configure the sizes of the buffer
135          * rings. These will change based on jumbo packet support.  Larger
136          * jumbo packets increases the size of each entry in FBR0, and the
137          * number of entries in FBR0, while at the same time decreasing the
138          * number of entries in FBR1.
139          *
140          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
141          * entries are huge in order to accommodate a "jumbo" frame, then it
142          * will have less entries.  Conversely, FBR1 will now be relied upon
143          * to carry more "normal" frames, thus it's entry size also increases
144          * and the number of entries goes up too (since it now carries
145          * "small" + "regular" packets.
146          *
147          * In this scheme, we try to maintain 512 entries between the two
148          * rings. Also, FBR1 remains a constant size - when it's size doubles
149          * the number of entries halves.  FBR0 increases in size, however.
150          */
151
152         if (adapter->RegistryJumboPacket < 2048) {
153 #ifdef USE_FBR0
154                 rx_ring->Fbr0BufferSize = 256;
155                 rx_ring->Fbr0NumEntries = 512;
156 #endif
157                 rx_ring->Fbr1BufferSize = 2048;
158                 rx_ring->Fbr1NumEntries = 512;
159         } else if (adapter->RegistryJumboPacket < 4096) {
160 #ifdef USE_FBR0
161                 rx_ring->Fbr0BufferSize = 512;
162                 rx_ring->Fbr0NumEntries = 1024;
163 #endif
164                 rx_ring->Fbr1BufferSize = 4096;
165                 rx_ring->Fbr1NumEntries = 512;
166         } else {
167 #ifdef USE_FBR0
168                 rx_ring->Fbr0BufferSize = 1024;
169                 rx_ring->Fbr0NumEntries = 768;
170 #endif
171                 rx_ring->Fbr1BufferSize = 16384;
172                 rx_ring->Fbr1NumEntries = 128;
173         }
174
175 #ifdef USE_FBR0
176         adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr0NumEntries +
177             adapter->rx_ring.Fbr1NumEntries;
178 #else
179         adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr1NumEntries;
180 #endif
181
182         /* Allocate an area of memory for Free Buffer Ring 1 */
183         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
184         rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
185                                                     bufsize,
186                                                     &rx_ring->pFbr1RingPa);
187         if (!rx_ring->pFbr1RingVa) {
188                 dev_err(&adapter->pdev->dev,
189                           "Cannot alloc memory for Free Buffer Ring 1\n");
190                 return -ENOMEM;
191         }
192
193         /* Save physical address
194          *
195          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
196          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
197          * are ever returned, make sure the high part is retrieved here
198          * before storing the adjusted address.
199          */
200         rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
201
202         /* Align Free Buffer Ring 1 on a 4K boundary */
203         et131x_align_allocated_memory(adapter,
204                                       &rx_ring->Fbr1Realpa,
205                                       &rx_ring->Fbr1offset, 0x0FFF);
206
207         rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa +
208                                         rx_ring->Fbr1offset);
209
210 #ifdef USE_FBR0
211         /* Allocate an area of memory for Free Buffer Ring 0 */
212         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
213         rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
214                                                     bufsize,
215                                                     &rx_ring->pFbr0RingPa);
216         if (!rx_ring->pFbr0RingVa) {
217                 dev_err(&adapter->pdev->dev,
218                           "Cannot alloc memory for Free Buffer Ring 0\n");
219                 return -ENOMEM;
220         }
221
222         /* Save physical address
223          *
224          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
225          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
226          * are ever returned, make sure the high part is retrieved here before
227          * storing the adjusted address.
228          */
229         rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
230
231         /* Align Free Buffer Ring 0 on a 4K boundary */
232         et131x_align_allocated_memory(adapter,
233                                       &rx_ring->Fbr0Realpa,
234                                       &rx_ring->Fbr0offset, 0x0FFF);
235
236         rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa +
237                                         rx_ring->Fbr0offset);
238 #endif
239
240         for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
241              i++) {
242                 u64 Fbr1Offset;
243                 u64 Fbr1TempPa;
244                 u32 Fbr1Align;
245
246                 /* This code allocates an area of memory big enough for N
247                  * free buffers + (buffer_size - 1) so that the buffers can
248                  * be aligned on 4k boundaries.  If each buffer were aligned
249                  * to a buffer_size boundary, the effect would be to double
250                  * the size of FBR0.  By allocating N buffers at once, we
251                  * reduce this overhead.
252                  */
253                 if (rx_ring->Fbr1BufferSize > 4096)
254                         Fbr1Align = 4096;
255                 else
256                         Fbr1Align = rx_ring->Fbr1BufferSize;
257
258                 FBRChunkSize =
259                     (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
260                 rx_ring->Fbr1MemVa[i] =
261                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
262                                          &rx_ring->Fbr1MemPa[i]);
263
264                 if (!rx_ring->Fbr1MemVa[i]) {
265                         dev_err(&adapter->pdev->dev,
266                                 "Could not alloc memory\n");
267                         return -ENOMEM;
268                 }
269
270                 /* See NOTE in "Save Physical Address" comment above */
271                 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
272
273                 et131x_align_allocated_memory(adapter,
274                                               &Fbr1TempPa,
275                                               &Fbr1Offset, (Fbr1Align - 1));
276
277                 for (j = 0; j < FBR_CHUNKS; j++) {
278                         u32 index = (i * FBR_CHUNKS) + j;
279
280                         /* Save the Virtual address of this index for quick
281                          * access later
282                          */
283                         rx_ring->fbr[1]->virt[index] =
284                             (u8 *) rx_ring->Fbr1MemVa[i] +
285                             (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
286
287                         /* now store the physical address in the descriptor
288                          * so the device can access it
289                          */
290                         rx_ring->fbr[1]->bus_high[index] =
291                             (u32) (Fbr1TempPa >> 32);
292                         rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa;
293
294                         Fbr1TempPa += rx_ring->Fbr1BufferSize;
295
296                         rx_ring->fbr[1]->buffer1[index] =
297                             rx_ring->fbr[1]->virt[index];
298                         rx_ring->fbr[1]->buffer2[index] =
299                             rx_ring->fbr[1]->virt[index] - 4;
300                 }
301         }
302
303 #ifdef USE_FBR0
304         /* Same for FBR0 (if in use) */
305         for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
306              i++) {
307                 u64 Fbr0Offset;
308                 u64 Fbr0TempPa;
309
310                 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
311                 rx_ring->Fbr0MemVa[i] =
312                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
313                                          &rx_ring->Fbr0MemPa[i]);
314
315                 if (!rx_ring->Fbr0MemVa[i]) {
316                         dev_err(&adapter->pdev->dev,
317                                 "Could not alloc memory\n");
318                         return -ENOMEM;
319                 }
320
321                 /* See NOTE in "Save Physical Address" comment above */
322                 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
323
324                 et131x_align_allocated_memory(adapter,
325                                               &Fbr0TempPa,
326                                               &Fbr0Offset,
327                                               rx_ring->Fbr0BufferSize - 1);
328
329                 for (j = 0; j < FBR_CHUNKS; j++) {
330                         u32 index = (i * FBR_CHUNKS) + j;
331
332                         rx_ring->fbr[0]->virt[index] =
333                             (u8 *) rx_ring->Fbr0MemVa[i] +
334                             (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
335
336                         rx_ring->fbr[0]->bus_high[index] =
337                             (u32) (Fbr0TempPa >> 32);
338                         rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa;
339
340                         Fbr0TempPa += rx_ring->Fbr0BufferSize;
341
342                         rx_ring->fbr[0]->buffer1[index] =
343                             rx_ring->fbr[0]->virt[index];
344                         rx_ring->fbr[0]->buffer2[index] =
345                             rx_ring->fbr[0]->virt[index] - 4;
346                 }
347         }
348 #endif
349
350         /* Allocate an area of memory for FIFO of Packet Status ring entries */
351         pktStatRingSize =
352             sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
353
354         rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
355                                                   pktStatRingSize,
356                                                   &rx_ring->pPSRingPa);
357
358         if (!rx_ring->pPSRingVa) {
359                 dev_err(&adapter->pdev->dev,
360                           "Cannot alloc memory for Packet Status Ring\n");
361                 return -ENOMEM;
362         }
363         printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
364
365         /*
366          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
367          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
368          * are ever returned, make sure the high part is retrieved here before
369          * storing the adjusted address.
370          */
371
372         /* Allocate an area of memory for writeback of status information */
373         rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev,
374                                             sizeof(struct rx_status_block),
375                                             &rx_ring->rx_status_bus);
376         if (!rx_ring->rx_status_block) {
377                 dev_err(&adapter->pdev->dev,
378                           "Cannot alloc memory for Status Block\n");
379                 return -ENOMEM;
380         }
381         rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
382         printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
383
384         /* Recv
385          * pci_pool_create initializes a lookaside list. After successful
386          * creation, nonpaged fixed-size blocks can be allocated from and
387          * freed to the lookaside list.
388          * RFDs will be allocated from this pool.
389          */
390         rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
391                                                    sizeof(struct rfd),
392                                                    0,
393                                                    SLAB_CACHE_DMA |
394                                                    SLAB_HWCACHE_ALIGN,
395                                                    NULL);
396
397         adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
398
399         /* The RFDs are going to be put on lists later on, so initialize the
400          * lists now.
401          */
402         INIT_LIST_HEAD(&rx_ring->RecvList);
403         return 0;
404 }
405
406 /**
407  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
408  * @adapter: pointer to our private adapter structure
409  */
410 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
411 {
412         u32 index;
413         u32 bufsize;
414         u32 pktStatRingSize;
415         struct rfd *rfd;
416         struct rx_ring *rx_ring;
417
418         /* Setup some convenience pointers */
419         rx_ring = &adapter->rx_ring;
420
421         /* Free RFDs and associated packet descriptors */
422         WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
423
424         while (!list_empty(&rx_ring->RecvList)) {
425                 rfd = (struct rfd *) list_entry(rx_ring->RecvList.next,
426                                 struct rfd, list_node);
427
428                 list_del(&rfd->list_node);
429                 rfd->skb = NULL;
430                 kmem_cache_free(adapter->rx_ring.RecvLookaside, rfd);
431         }
432
433         /* Free Free Buffer Ring 1 */
434         if (rx_ring->pFbr1RingVa) {
435                 /* First the packet memory */
436                 for (index = 0; index <
437                      (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
438                         if (rx_ring->Fbr1MemVa[index]) {
439                                 u32 Fbr1Align;
440
441                                 if (rx_ring->Fbr1BufferSize > 4096)
442                                         Fbr1Align = 4096;
443                                 else
444                                         Fbr1Align = rx_ring->Fbr1BufferSize;
445
446                                 bufsize =
447                                     (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
448                                     Fbr1Align - 1;
449
450                                 pci_free_consistent(adapter->pdev,
451                                                     bufsize,
452                                                     rx_ring->Fbr1MemVa[index],
453                                                     rx_ring->Fbr1MemPa[index]);
454
455                                 rx_ring->Fbr1MemVa[index] = NULL;
456                         }
457                 }
458
459                 /* Now the FIFO itself */
460                 rx_ring->pFbr1RingVa = (void *)((u8 *)
461                                 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
462
463                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
464                                                             + 0xfff;
465
466                 pci_free_consistent(adapter->pdev, bufsize,
467                                 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
468
469                 rx_ring->pFbr1RingVa = NULL;
470         }
471
472 #ifdef USE_FBR0
473         /* Now the same for Free Buffer Ring 0 */
474         if (rx_ring->pFbr0RingVa) {
475                 /* First the packet memory */
476                 for (index = 0; index <
477                      (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
478                         if (rx_ring->Fbr0MemVa[index]) {
479                                 bufsize =
480                                     (rx_ring->Fbr0BufferSize *
481                                      (FBR_CHUNKS + 1)) - 1;
482
483                                 pci_free_consistent(adapter->pdev,
484                                                     bufsize,
485                                                     rx_ring->Fbr0MemVa[index],
486                                                     rx_ring->Fbr0MemPa[index]);
487
488                                 rx_ring->Fbr0MemVa[index] = NULL;
489                         }
490                 }
491
492                 /* Now the FIFO itself */
493                 rx_ring->pFbr0RingVa = (void *)((u8 *)
494                                 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
495
496                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
497                                                             + 0xfff;
498
499                 pci_free_consistent(adapter->pdev,
500                                     bufsize,
501                                     rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
502
503                 rx_ring->pFbr0RingVa = NULL;
504         }
505 #endif
506
507         /* Free Packet Status Ring */
508         if (rx_ring->pPSRingVa) {
509                 pktStatRingSize =
510                   sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
511
512                 pci_free_consistent(adapter->pdev, pktStatRingSize,
513                                     rx_ring->pPSRingVa, rx_ring->pPSRingPa);
514
515                 rx_ring->pPSRingVa = NULL;
516         }
517
518         /* Free area of memory for the writeback of status information */
519         if (rx_ring->rx_status_block) {
520                 pci_free_consistent(adapter->pdev,
521                         sizeof(struct rx_status_block),
522                         rx_ring->rx_status_block, rx_ring->rx_status_bus);
523                 rx_ring->rx_status_block = NULL;
524         }
525
526         /* Free receive buffer pool */
527
528         /* Free receive packet pool */
529
530         /* Destroy the lookaside (RFD) pool */
531         if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
532                 kmem_cache_destroy(rx_ring->RecvLookaside);
533                 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
534         }
535
536         /* Free the FBR Lookup Table */
537 #ifdef USE_FBR0
538         kfree(rx_ring->fbr[0]);
539 #endif
540
541         kfree(rx_ring->fbr[1]);
542
543         /* Reset Counters */
544         rx_ring->nReadyRecv = 0;
545 }
546
547 /**
548  * et131x_init_recv - Initialize receive data structures.
549  * @adapter: pointer to our private adapter structure
550  *
551  * Returns 0 on success and errno on failure (as defined in errno.h)
552  */
553 int et131x_init_recv(struct et131x_adapter *adapter)
554 {
555         int status = -ENOMEM;
556         struct rfd *rfd = NULL;
557         u32 rfdct;
558         u32 numrfd = 0;
559         struct rx_ring *rx_ring;
560
561         /* Setup some convenience pointers */
562         rx_ring = &adapter->rx_ring;
563
564         /* Setup each RFD */
565         for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
566                 rfd = kmem_cache_alloc(rx_ring->RecvLookaside,
567                                                      GFP_ATOMIC | GFP_DMA);
568
569                 if (!rfd) {
570                         dev_err(&adapter->pdev->dev,
571                                   "Couldn't alloc RFD out of kmem_cache\n");
572                         status = -ENOMEM;
573                         continue;
574                 }
575
576                 rfd->skb = NULL;
577
578                 /* Add this RFD to the RecvList */
579                 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
580
581                 /* Increment both the available RFD's, and the total RFD's. */
582                 rx_ring->nReadyRecv++;
583                 numrfd++;
584         }
585
586         if (numrfd > NIC_MIN_NUM_RFD)
587                 status = 0;
588
589         rx_ring->NumRfd = numrfd;
590
591         if (status != 0) {
592                 kmem_cache_free(rx_ring->RecvLookaside, rfd);
593                 dev_err(&adapter->pdev->dev,
594                           "Allocation problems in et131x_init_recv\n");
595         }
596         return status;
597 }
598
599 /**
600  * ConfigRxDmaRegs - Start of Rx_DMA init sequence
601  * @etdev: pointer to our adapter structure
602  */
603 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
604 {
605         struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
606         struct rx_ring *rx_local = &etdev->rx_ring;
607         struct fbr_desc *fbr_entry;
608         u32 entry;
609         u32 psr_num_des;
610         unsigned long flags;
611
612         /* Halt RXDMA to perform the reconfigure.  */
613         et131x_rx_dma_disable(etdev);
614
615         /* Load the completion writeback physical address
616          *
617          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
618          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
619          * are ever returned, make sure the high part is retrieved here
620          * before storing the adjusted address.
621          */
622         writel((u32) ((u64)rx_local->rx_status_bus >> 32),
623                &rx_dma->dma_wb_base_hi);
624         writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
625
626         memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
627
628         /* Set the address and parameters of the packet status ring into the
629          * 1310's registers
630          */
631         writel((u32) ((u64)rx_local->pPSRingPa >> 32),
632                &rx_dma->psr_base_hi);
633         writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
634         writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
635         writel(0, &rx_dma->psr_full_offset);
636
637         psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
638         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
639                &rx_dma->psr_min_des);
640
641         spin_lock_irqsave(&etdev->rcv_lock, flags);
642
643         /* These local variables track the PSR in the adapter structure */
644         rx_local->local_psr_full = 0;
645
646         /* Now's the best time to initialize FBR1 contents */
647         fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
648         for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
649                 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
650                 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
651                 fbr_entry->word2 = entry;
652                 fbr_entry++;
653         }
654
655         /* Set the address and parameters of Free buffer ring 1 (and 0 if
656          * required) into the 1310's registers
657          */
658         writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
659         writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
660         writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
661         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
662
663         /* This variable tracks the free buffer ring 1 full position, so it
664          * has to match the above.
665          */
666         rx_local->local_Fbr1_full = ET_DMA10_WRAP;
667         writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
668                &rx_dma->fbr1_min_des);
669
670 #ifdef USE_FBR0
671         /* Now's the best time to initialize FBR0 contents */
672         fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
673         for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
674                 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
675                 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
676                 fbr_entry->word2 = entry;
677                 fbr_entry++;
678         }
679
680         writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
681         writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
682         writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
683         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
684
685         /* This variable tracks the free buffer ring 0 full position, so it
686          * has to match the above.
687          */
688         rx_local->local_Fbr0_full = ET_DMA10_WRAP;
689         writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
690                &rx_dma->fbr0_min_des);
691 #endif
692
693         /* Program the number of packets we will receive before generating an
694          * interrupt.
695          * For version B silicon, this value gets updated once autoneg is
696          *complete.
697          */
698         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
699
700         /* The "time_done" is not working correctly to coalesce interrupts
701          * after a given time period, but rather is giving us an interrupt
702          * regardless of whether we have received packets.
703          * This value gets updated once autoneg is complete.
704          */
705         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
706
707         spin_unlock_irqrestore(&etdev->rcv_lock, flags);
708 }
709
710 /**
711  * SetRxDmaTimer - Set the heartbeat timer according to line rate.
712  * @etdev: pointer to our adapter structure
713  */
714 void SetRxDmaTimer(struct et131x_adapter *etdev)
715 {
716         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
717          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
718          */
719         if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
720             (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
721                 writel(0, &etdev->regs->rxdma.max_pkt_time);
722                 writel(1, &etdev->regs->rxdma.num_pkt_done);
723         }
724 }
725
726 /**
727  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
728  * @etdev: pointer to our adapter
729  * @rfd: pointer to the RFD
730  */
731 void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
732 {
733         struct rx_ring *rx_local = &etdev->rx_ring;
734         struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
735         u16 bi = rfd->bufferindex;
736         u8 ri = rfd->ringindex;
737         unsigned long flags;
738
739         /* We don't use any of the OOB data besides status. Otherwise, we
740          * need to clean up OOB data
741          */
742         if (
743 #ifdef USE_FBR0
744             (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
745 #endif
746             (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
747                 spin_lock_irqsave(&etdev->FbrLock, flags);
748
749                 if (ri == 1) {
750                         struct fbr_desc *next =
751                             (struct fbr_desc *) (rx_local->pFbr1RingVa) +
752                                          INDEX10(rx_local->local_Fbr1_full);
753
754                         /* Handle the Free Buffer Ring advancement here. Write
755                          * the PA / Buffer Index for the returned buffer into
756                          * the oldest (next to be freed)FBR entry
757                          */
758                         next->addr_hi = rx_local->fbr[1]->bus_high[bi];
759                         next->addr_lo = rx_local->fbr[1]->bus_low[bi];
760                         next->word2 = bi;
761
762                         writel(bump_fbr(&rx_local->local_Fbr1_full,
763                                 rx_local->Fbr1NumEntries - 1),
764                                 &rx_dma->fbr1_full_offset);
765                 }
766 #ifdef USE_FBR0
767                 else {
768                         struct fbr_desc *next = (struct fbr_desc *)
769                                 rx_local->pFbr0RingVa +
770                                         INDEX10(rx_local->local_Fbr0_full);
771
772                         /* Handle the Free Buffer Ring advancement here. Write
773                          * the PA / Buffer Index for the returned buffer into
774                          * the oldest (next to be freed) FBR entry
775                          */
776                         next->addr_hi = rx_local->fbr[0]->bus_high[bi];
777                         next->addr_lo = rx_local->fbr[0]->bus_low[bi];
778                         next->word2 = bi;
779
780                         writel(bump_fbr(&rx_local->local_Fbr0_full,
781                                         rx_local->Fbr0NumEntries - 1),
782                                &rx_dma->fbr0_full_offset);
783                 }
784 #endif
785                 spin_unlock_irqrestore(&etdev->FbrLock, flags);
786         } else {
787                 dev_err(&etdev->pdev->dev,
788                           "NICReturnRFD illegal Buffer Index returned\n");
789         }
790
791         /* The processing on this RFD is done, so put it back on the tail of
792          * our list
793          */
794         spin_lock_irqsave(&etdev->rcv_lock, flags);
795         list_add_tail(&rfd->list_node, &rx_local->RecvList);
796         rx_local->nReadyRecv++;
797         spin_unlock_irqrestore(&etdev->rcv_lock, flags);
798
799         WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
800 }
801
802 /**
803  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
804  * @etdev: pointer to our adapter structure
805  */
806 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
807 {
808         u32 csr;
809         /* Setup the receive dma configuration register */
810         writel(0x00002001, &etdev->regs->rxdma.csr);
811         csr = readl(&etdev->regs->rxdma.csr);
812         if ((csr & 0x00020000) == 0) {  /* Check halt status (bit 17) */
813                 udelay(5);
814                 csr = readl(&etdev->regs->rxdma.csr);
815                 if ((csr & 0x00020000) == 0)
816                         dev_err(&etdev->pdev->dev,
817                         "RX Dma failed to enter halt state. CSR 0x%08x\n",
818                                 csr);
819         }
820 }
821
822 /**
823  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
824  * @etdev: pointer to our adapter structure
825  */
826 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
827 {
828         /* Setup the receive dma configuration register for normal operation */
829         u32 csr =  0x2000;      /* FBR1 enable */
830
831         if (etdev->rx_ring.Fbr1BufferSize == 4096)
832                 csr |= 0x0800;
833         else if (etdev->rx_ring.Fbr1BufferSize == 8192)
834                 csr |= 0x1000;
835         else if (etdev->rx_ring.Fbr1BufferSize == 16384)
836                 csr |= 0x1800;
837 #ifdef USE_FBR0
838         csr |= 0x0400;          /* FBR0 enable */
839         if (etdev->rx_ring.Fbr0BufferSize == 256)
840                 csr |= 0x0100;
841         else if (etdev->rx_ring.Fbr0BufferSize == 512)
842                 csr |= 0x0200;
843         else if (etdev->rx_ring.Fbr0BufferSize == 1024)
844                 csr |= 0x0300;
845 #endif
846         writel(csr, &etdev->regs->rxdma.csr);
847
848         csr = readl(&etdev->regs->rxdma.csr);
849         if ((csr & 0x00020000) != 0) {
850                 udelay(5);
851                 csr = readl(&etdev->regs->rxdma.csr);
852                 if ((csr & 0x00020000) != 0) {
853                         dev_err(&etdev->pdev->dev,
854                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
855                                 csr);
856                 }
857         }
858 }
859
860 /**
861  * nic_rx_pkts - Checks the hardware for available packets
862  * @etdev: pointer to our adapter
863  *
864  * Returns rfd, a pointer to our MPRFD.
865  *
866  * Checks the hardware for available packets, using completion ring
867  * If packets are available, it gets an RFD from the RecvList, attaches
868  * the packet to it, puts the RFD in the RecvPendList, and also returns
869  * the pointer to the RFD.
870  */
871 struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
872 {
873         struct rx_ring *rx_local = &etdev->rx_ring;
874         struct rx_status_block *status;
875         struct pkt_stat_desc *psr;
876         struct rfd *rfd;
877         u32 i;
878         u8 *buf;
879         unsigned long flags;
880         struct list_head *element;
881         u8 rindex;
882         u16 bindex;
883         u32 len;
884         u32 word0;
885         u32 word1;
886
887         /* RX Status block is written by the DMA engine prior to every
888          * interrupt. It contains the next to be used entry in the Packet
889          * Status Ring, and also the two Free Buffer rings.
890          */
891         status = rx_local->rx_status_block;
892         word1 = status->Word1 >> 16;    /* Get the useful bits */
893
894         /* Check the PSR and wrap bits do not match */
895         if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
896                 /* Looks like this ring is not updated yet */
897                 return NULL;
898
899         /* The packet status ring indicates that data is available. */
900         psr = (struct pkt_stat_desc *) (rx_local->pPSRingVa) +
901                         (rx_local->local_psr_full & 0xFFF);
902
903         /* Grab any information that is required once the PSR is
904          * advanced, since we can no longer rely on the memory being
905          * accurate
906          */
907         len = psr->word1 & 0xFFFF;
908         rindex = (psr->word1 >> 26) & 0x03;
909         bindex = (psr->word1 >> 16) & 0x3FF;
910         word0 = psr->word0;
911
912         /* Indicate that we have used this PSR entry. */
913         /* FIXME wrap 12 */
914         add_12bit(&rx_local->local_psr_full, 1);
915         if ((rx_local->local_psr_full & 0xFFF)  > rx_local->PsrNumEntries - 1) {
916                 /* Clear psr full and toggle the wrap bit */
917                 rx_local->local_psr_full &=  ~0xFFF;
918                 rx_local->local_psr_full ^= 0x1000;
919         }
920
921         writel(rx_local->local_psr_full,
922                &etdev->regs->rxdma.psr_full_offset);
923
924 #ifndef USE_FBR0
925         if (rindex != 1)
926                 return NULL;
927 #endif
928
929 #ifdef USE_FBR0
930         if (rindex > 1 ||
931                 (rindex == 0 &&
932                 bindex > rx_local->Fbr0NumEntries - 1) ||
933                 (rindex == 1 &&
934                 bindex > rx_local->Fbr1NumEntries - 1))
935 #else
936         if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
937 #endif
938         {
939                 /* Illegal buffer or ring index cannot be used by S/W*/
940                 dev_err(&etdev->pdev->dev,
941                           "NICRxPkts PSR Entry %d indicates "
942                           "length of %d and/or bad bi(%d)\n",
943                           rx_local->local_psr_full & 0xFFF,
944                           len, bindex);
945                 return NULL;
946         }
947
948         /* Get and fill the RFD. */
949         spin_lock_irqsave(&etdev->rcv_lock, flags);
950
951         rfd = NULL;
952         element = rx_local->RecvList.next;
953         rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
954
955         if (rfd == NULL) {
956                 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
957                 return NULL;
958         }
959
960         list_del(&rfd->list_node);
961         rx_local->nReadyRecv--;
962
963         spin_unlock_irqrestore(&etdev->rcv_lock, flags);
964
965         rfd->bufferindex = bindex;
966         rfd->ringindex = rindex;
967
968         /* In V1 silicon, there is a bug which screws up filtering of
969          * runt packets.  Therefore runt packet filtering is disabled
970          * in the MAC and the packets are dropped here.  They are
971          * also counted here.
972          */
973         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
974                 etdev->stats.other_errors++;
975                 len = 0;
976         }
977
978         if (len) {
979                 if (etdev->ReplicaPhyLoopbk == 1) {
980                         buf = rx_local->fbr[rindex]->virt[bindex];
981
982                         if (memcmp(&buf[6], etdev->addr, ETH_ALEN) == 0) {
983                                 if (memcmp(&buf[42], "Replica packet",
984                                            ETH_HLEN)) {
985                                         etdev->ReplicaPhyLoopbkPF = 1;
986                                 }
987                         }
988                 }
989
990                 /* Determine if this is a multicast packet coming in */
991                 if ((word0 & ALCATEL_MULTICAST_PKT) &&
992                     !(word0 & ALCATEL_BROADCAST_PKT)) {
993                         /* Promiscuous mode and Multicast mode are
994                          * not mutually exclusive as was first
995                          * thought.  I guess Promiscuous is just
996                          * considered a super-set of the other
997                          * filters. Generally filter is 0x2b when in
998                          * promiscuous mode.
999                          */
1000                         if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
1001                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
1002                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1003                                 buf = rx_local->fbr[rindex]->
1004                                                 virt[bindex];
1005
1006                                 /* Loop through our list to see if the
1007                                  * destination address of this packet
1008                                  * matches one in our list.
1009                                  */
1010                                 for (i = 0;
1011                                      i < etdev->MCAddressCount;
1012                                      i++) {
1013                                         if (buf[0] ==
1014                                             etdev->MCList[i][0]
1015                                             && buf[1] ==
1016                                             etdev->MCList[i][1]
1017                                             && buf[2] ==
1018                                             etdev->MCList[i][2]
1019                                             && buf[3] ==
1020                                             etdev->MCList[i][3]
1021                                             && buf[4] ==
1022                                             etdev->MCList[i][4]
1023                                             && buf[5] ==
1024                                             etdev->MCList[i][5]) {
1025                                                 break;
1026                                         }
1027                                 }
1028
1029                                 /* If our index is equal to the number
1030                                  * of Multicast address we have, then
1031                                  * this means we did not find this
1032                                  * packet's matching address in our
1033                                  * list.  Set the len to zero,
1034                                  * so we free our RFD when we return
1035                                  * from this function.
1036                                  */
1037                                 if (i == etdev->MCAddressCount)
1038                                         len = 0;
1039                         }
1040
1041                         if (len > 0)
1042                                 etdev->stats.multircv++;
1043                 } else if (word0 & ALCATEL_BROADCAST_PKT)
1044                         etdev->stats.brdcstrcv++;
1045                 else
1046                         /* Not sure what this counter measures in
1047                          * promiscuous mode. Perhaps we should check
1048                          * the MAC address to see if it is directed
1049                          * to us in promiscuous mode.
1050                          */
1051                         etdev->stats.unircv++;
1052         }
1053
1054         if (len > 0) {
1055                 struct sk_buff *skb = NULL;
1056
1057                 /*rfd->len = len - 4; */
1058                 rfd->len = len;
1059
1060                 skb = dev_alloc_skb(rfd->len + 2);
1061                 if (!skb) {
1062                         dev_err(&etdev->pdev->dev,
1063                                   "Couldn't alloc an SKB for Rx\n");
1064                         return NULL;
1065                 }
1066
1067                 etdev->net_stats.rx_bytes += rfd->len;
1068
1069                 memcpy(skb_put(skb, rfd->len),
1070                        rx_local->fbr[rindex]->virt[bindex],
1071                        rfd->len);
1072
1073                 skb->dev = etdev->netdev;
1074                 skb->protocol = eth_type_trans(skb, etdev->netdev);
1075                 skb->ip_summed = CHECKSUM_NONE;
1076
1077                 netif_rx(skb);
1078         } else {
1079                 rfd->len = 0;
1080         }
1081
1082         nic_return_rfd(etdev, rfd);
1083         return rfd;
1084 }
1085
1086 /**
1087  * et131x_reset_recv - Reset the receive list
1088  * @etdev: pointer to our adapter
1089  *
1090  * Assumption, Rcv spinlock has been acquired.
1091  */
1092 void et131x_reset_recv(struct et131x_adapter *etdev)
1093 {
1094         WARN_ON(list_empty(&etdev->rx_ring.RecvList));
1095
1096 }
1097
1098 /**
1099  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1100  * @etdev: pointer to our adapter
1101  *
1102  * Assumption, Rcv spinlock has been acquired.
1103  */
1104 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1105 {
1106         struct rfd *rfd = NULL;
1107         u32 count = 0;
1108         bool done = true;
1109
1110         /* Process up to available RFD's */
1111         while (count < NUM_PACKETS_HANDLED) {
1112                 if (list_empty(&etdev->rx_ring.RecvList)) {
1113                         WARN_ON(etdev->rx_ring.nReadyRecv != 0);
1114                         done = false;
1115                         break;
1116                 }
1117
1118                 rfd = nic_rx_pkts(etdev);
1119
1120                 if (rfd == NULL)
1121                         break;
1122
1123                 /* Do not receive any packets until a filter has been set.
1124                  * Do not receive any packets until we have link.
1125                  * If length is zero, return the RFD in order to advance the
1126                  * Free buffer ring.
1127                  */
1128                 if (!etdev->PacketFilter ||
1129                     !netif_carrier_ok(etdev->netdev) ||
1130                     rfd->len == 0)
1131                         continue;
1132
1133                 /* Increment the number of packets we received */
1134                 etdev->net_stats.rx_packets++;
1135
1136                 /* Set the status on the packet, either resources or success */
1137                 if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) {
1138                         dev_warn(&etdev->pdev->dev,
1139                                     "RFD's are running out\n");
1140                 }
1141                 count++;
1142         }
1143
1144         if (count == NUM_PACKETS_HANDLED || !done) {
1145                 etdev->rx_ring.UnfinishedReceives = true;
1146                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1147                        &etdev->regs->global.watchdog_timer);
1148         } else
1149                 /* Watchdog timer will disable itself if appropriate. */
1150                 etdev->rx_ring.UnfinishedReceives = false;
1151 }
1152