600a8f10da86458046371888617e61376d3c067e
[pandora-kernel.git] / drivers / staging / et131x / et1310_rx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10  *
11  *------------------------------------------------------------------------------
12  *
13  * et1310_rx.c - Routines used to perform data reception
14  *
15  *------------------------------------------------------------------------------
16  *
17  * SOFTWARE LICENSE
18  *
19  * This software is provided subject to the following terms and conditions,
20  * which you should read carefully before using the software.  Using this
21  * software indicates your acceptance of these terms and conditions.  If you do
22  * not agree with these terms and conditions, do not use the software.
23  *
24  * Copyright © 2005 Agere Systems Inc.
25  * All rights reserved.
26  *
27  * Redistribution and use in source or binary forms, with or without
28  * modifications, are permitted provided that the following conditions are met:
29  *
30  * . Redistributions of source code must retain the above copyright notice, this
31  *    list of conditions and the following Disclaimer as comments in the code as
32  *    well as in the documentation and/or other materials provided with the
33  *    distribution.
34  *
35  * . Redistributions in binary form must reproduce the above copyright notice,
36  *    this list of conditions and the following Disclaimer in the documentation
37  *    and/or other materials provided with the distribution.
38  *
39  * . Neither the name of Agere Systems Inc. nor the names of the contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * Disclaimer
44  *
45  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
46  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
47  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
48  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
49  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
50  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
51  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
52  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
53  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
55  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
56  * DAMAGE.
57  *
58  */
59
60 #include "et131x_version.h"
61 #include "et131x_defs.h"
62
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/module.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
68
69 #include <linux/sched.h>
70 #include <linux/ptrace.h>
71 #include <linux/slab.h>
72 #include <linux/ctype.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/interrupt.h>
76 #include <linux/in.h>
77 #include <linux/delay.h>
78 #include <linux/io.h>
79 #include <linux/bitops.h>
80 #include <asm/system.h>
81
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/if_arp.h>
86 #include <linux/ioport.h>
87 #include <linux/phy.h>
88
89 #include "et1310_phy.h"
90 #include "et131x_adapter.h"
91 #include "et1310_rx.h"
92 #include "et131x.h"
93
94 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
95 {
96         u32 tmp_free_buff_ring = *free_buff_ring;
97         tmp_free_buff_ring++;
98         /* This works for all cases where limit < 1024. The 1023 case
99            works because 1023++ is 1024 which means the if condition is not
100            taken but the carry of the bit into the wrap bit toggles the wrap
101            value correctly */
102         if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
103                 tmp_free_buff_ring &= ~ET_DMA10_MASK;
104                 tmp_free_buff_ring ^= ET_DMA10_WRAP;
105         }
106         /* For the 1023 case */
107         tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
108         *free_buff_ring = tmp_free_buff_ring;
109         return tmp_free_buff_ring;
110 }
111
112 /**
113  * et131x_rx_dma_memory_alloc
114  * @adapter: pointer to our private adapter structure
115  *
116  * Returns 0 on success and errno on failure (as defined in errno.h)
117  *
118  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
119  * and the Packet Status Ring.
120  */
121 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
122 {
123         u32 i, j;
124         u32 bufsize;
125         u32 pktstat_ringsize, fbr_chunksize;
126         struct rx_ring *rx_ring;
127
128         /* Setup some convenience pointers */
129         rx_ring = &adapter->rx_ring;
130
131         /* Alloc memory for the lookup table */
132 #ifdef USE_FBR0
133         rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
134 #endif
135         rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
136
137         /* The first thing we will do is configure the sizes of the buffer
138          * rings. These will change based on jumbo packet support.  Larger
139          * jumbo packets increases the size of each entry in FBR0, and the
140          * number of entries in FBR0, while at the same time decreasing the
141          * number of entries in FBR1.
142          *
143          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
144          * entries are huge in order to accommodate a "jumbo" frame, then it
145          * will have less entries.  Conversely, FBR1 will now be relied upon
146          * to carry more "normal" frames, thus it's entry size also increases
147          * and the number of entries goes up too (since it now carries
148          * "small" + "regular" packets.
149          *
150          * In this scheme, we try to maintain 512 entries between the two
151          * rings. Also, FBR1 remains a constant size - when it's size doubles
152          * the number of entries halves.  FBR0 increases in size, however.
153          */
154
155         if (adapter->registry_jumbo_packet < 2048) {
156 #ifdef USE_FBR0
157                 rx_ring->fbr0_buffsize = 256;
158                 rx_ring->fbr0_num_entries = 512;
159 #endif
160                 rx_ring->fbr1_buffsize = 2048;
161                 rx_ring->fbr1_num_entries = 512;
162         } else if (adapter->registry_jumbo_packet < 4096) {
163 #ifdef USE_FBR0
164                 rx_ring->fbr0_buffsize = 512;
165                 rx_ring->fbr0_num_entries = 1024;
166 #endif
167                 rx_ring->fbr1_buffsize = 4096;
168                 rx_ring->fbr1_num_entries = 512;
169         } else {
170 #ifdef USE_FBR0
171                 rx_ring->fbr0_buffsize = 1024;
172                 rx_ring->fbr0_num_entries = 768;
173 #endif
174                 rx_ring->fbr1_buffsize = 16384;
175                 rx_ring->fbr1_num_entries = 128;
176         }
177
178 #ifdef USE_FBR0
179         adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr0_num_entries +
180             adapter->rx_ring.fbr1_num_entries;
181 #else
182         adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr1_num_entries;
183 #endif
184
185         /* Allocate an area of memory for Free Buffer Ring 1 */
186         bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr1_num_entries) + 0xfff;
187         rx_ring->fbr1_ring_virtaddr = pci_alloc_consistent(adapter->pdev,
188                                                 bufsize,
189                                                 &rx_ring->fbr1_ring_physaddr);
190         if (!rx_ring->fbr1_ring_virtaddr) {
191                 dev_err(&adapter->pdev->dev,
192                           "Cannot alloc memory for Free Buffer Ring 1\n");
193                 return -ENOMEM;
194         }
195
196         /* Save physical address
197          *
198          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
199          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
200          * are ever returned, make sure the high part is retrieved here
201          * before storing the adjusted address.
202          */
203         rx_ring->fbr1_real_physaddr = rx_ring->fbr1_ring_physaddr;
204
205         /* Align Free Buffer Ring 1 on a 4K boundary */
206         et131x_align_allocated_memory(adapter,
207                                       &rx_ring->fbr1_real_physaddr,
208                                       &rx_ring->fbr1_offset, 0x0FFF);
209
210         rx_ring->fbr1_ring_virtaddr =
211                         (void *)((u8 *) rx_ring->fbr1_ring_virtaddr +
212                         rx_ring->fbr1_offset);
213
214 #ifdef USE_FBR0
215         /* Allocate an area of memory for Free Buffer Ring 0 */
216         bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr0_num_entries) + 0xfff;
217         rx_ring->fbr0_ring_virtaddr = pci_alloc_consistent(adapter->pdev,
218                                                 bufsize,
219                                                 &rx_ring->fbr0_ring_physaddr);
220         if (!rx_ring->fbr0_ring_virtaddr) {
221                 dev_err(&adapter->pdev->dev,
222                           "Cannot alloc memory for Free Buffer Ring 0\n");
223                 return -ENOMEM;
224         }
225
226         /* Save physical address
227          *
228          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
229          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
230          * are ever returned, make sure the high part is retrieved here before
231          * storing the adjusted address.
232          */
233         rx_ring->fbr0_real_physaddr = rx_ring->fbr0_ring_physaddr;
234
235         /* Align Free Buffer Ring 0 on a 4K boundary */
236         et131x_align_allocated_memory(adapter,
237                                       &rx_ring->fbr0_real_physaddr,
238                                       &rx_ring->fbr0_offset, 0x0FFF);
239
240         rx_ring->fbr0_ring_virtaddr =
241                                 (void *)((u8 *) rx_ring->fbr0_ring_virtaddr +
242                                 rx_ring->fbr0_offset);
243 #endif
244         for (i = 0; i < (rx_ring->fbr1_num_entries / FBR_CHUNKS); i++) {
245                 u64 fbr1_offset;
246                 u64 fbr1_tmp_physaddr;
247                 u32 fbr1_align;
248
249                 /* This code allocates an area of memory big enough for N
250                  * free buffers + (buffer_size - 1) so that the buffers can
251                  * be aligned on 4k boundaries.  If each buffer were aligned
252                  * to a buffer_size boundary, the effect would be to double
253                  * the size of FBR0.  By allocating N buffers at once, we
254                  * reduce this overhead.
255                  */
256                 if (rx_ring->fbr1_buffsize > 4096)
257                         fbr1_align = 4096;
258                 else
259                         fbr1_align = rx_ring->fbr1_buffsize;
260
261                 fbr_chunksize =
262                     (FBR_CHUNKS * rx_ring->fbr1_buffsize) + fbr1_align - 1;
263                 rx_ring->fbr1_mem_virtaddrs[i] =
264                     pci_alloc_consistent(adapter->pdev, fbr_chunksize,
265                                          &rx_ring->fbr1_mem_physaddrs[i]);
266
267                 if (!rx_ring->fbr1_mem_virtaddrs[i]) {
268                         dev_err(&adapter->pdev->dev,
269                                 "Could not alloc memory\n");
270                         return -ENOMEM;
271                 }
272
273                 /* See NOTE in "Save Physical Address" comment above */
274                 fbr1_tmp_physaddr = rx_ring->fbr1_mem_physaddrs[i];
275
276                 et131x_align_allocated_memory(adapter,
277                                               &fbr1_tmp_physaddr,
278                                               &fbr1_offset, (fbr1_align - 1));
279
280                 for (j = 0; j < FBR_CHUNKS; j++) {
281                         u32 index = (i * FBR_CHUNKS) + j;
282
283                         /* Save the Virtual address of this index for quick
284                          * access later
285                          */
286                         rx_ring->fbr[1]->virt[index] =
287                             (u8 *) rx_ring->fbr1_mem_virtaddrs[i] +
288                             (j * rx_ring->fbr1_buffsize) + fbr1_offset;
289
290                         /* now store the physical address in the descriptor
291                          * so the device can access it
292                          */
293                         rx_ring->fbr[1]->bus_high[index] =
294                             (u32) (fbr1_tmp_physaddr >> 32);
295                         rx_ring->fbr[1]->bus_low[index] =
296                             (u32) fbr1_tmp_physaddr;
297
298                         fbr1_tmp_physaddr += rx_ring->fbr1_buffsize;
299
300                         rx_ring->fbr[1]->buffer1[index] =
301                             rx_ring->fbr[1]->virt[index];
302                         rx_ring->fbr[1]->buffer2[index] =
303                             rx_ring->fbr[1]->virt[index] - 4;
304                 }
305         }
306
307 #ifdef USE_FBR0
308         /* Same for FBR0 (if in use) */
309         for (i = 0; i < (rx_ring->fbr0_num_entries / FBR_CHUNKS); i++) {
310                 u64 fbr0_offset;
311                 u64 fbr0_tmp_physaddr;
312
313                 fbr_chunksize =
314                     ((FBR_CHUNKS + 1) * rx_ring->fbr0_buffsize) - 1;
315                 rx_ring->fbr0_mem_virtaddrs[i] =
316                     pci_alloc_consistent(adapter->pdev, fbr_chunksize,
317                                          &rx_ring->fbr0_mem_physaddrs[i]);
318
319                 if (!rx_ring->fbr0_mem_virtaddrs[i]) {
320                         dev_err(&adapter->pdev->dev,
321                                 "Could not alloc memory\n");
322                         return -ENOMEM;
323                 }
324
325                 /* See NOTE in "Save Physical Address" comment above */
326                 fbr0_tmp_physaddr = rx_ring->fbr0_mem_physaddrs[i];
327
328                 et131x_align_allocated_memory(adapter,
329                                               &fbr0_tmp_physaddr,
330                                               &fbr0_offset,
331                                               rx_ring->fbr0_buffsize - 1);
332
333                 for (j = 0; j < FBR_CHUNKS; j++) {
334                         u32 index = (i * FBR_CHUNKS) + j;
335
336                         rx_ring->fbr[0]->virt[index] =
337                             (u8 *) rx_ring->fbr0_mem_virtaddrs[i] +
338                             (j * rx_ring->fbr0_buffsize) + fbr0_offset;
339
340                         rx_ring->fbr[0]->bus_high[index] =
341                             (u32) (fbr0_tmp_physaddr >> 32);
342                         rx_ring->fbr[0]->bus_low[index] =
343                             (u32) fbr0_tmp_physaddr;
344
345                         fbr0_tmp_physaddr += rx_ring->fbr0_buffsize;
346
347                         rx_ring->fbr[0]->buffer1[index] =
348                             rx_ring->fbr[0]->virt[index];
349                         rx_ring->fbr[0]->buffer2[index] =
350                             rx_ring->fbr[0]->virt[index] - 4;
351                 }
352         }
353 #endif
354
355         /* Allocate an area of memory for FIFO of Packet Status ring entries */
356         pktstat_ringsize =
357             sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
358
359         rx_ring->ps_ring_virtaddr = pci_alloc_consistent(adapter->pdev,
360                                                   pktstat_ringsize,
361                                                   &rx_ring->ps_ring_physaddr);
362
363         if (!rx_ring->ps_ring_virtaddr) {
364                 dev_err(&adapter->pdev->dev,
365                           "Cannot alloc memory for Packet Status Ring\n");
366                 return -ENOMEM;
367         }
368         printk(KERN_INFO "Packet Status Ring %lx\n",
369             (unsigned long) rx_ring->ps_ring_physaddr);
370
371         /*
372          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
373          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
374          * are ever returned, make sure the high part is retrieved here before
375          * storing the adjusted address.
376          */
377
378         /* Allocate an area of memory for writeback of status information */
379         rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev,
380                                             sizeof(struct rx_status_block),
381                                             &rx_ring->rx_status_bus);
382         if (!rx_ring->rx_status_block) {
383                 dev_err(&adapter->pdev->dev,
384                           "Cannot alloc memory for Status Block\n");
385                 return -ENOMEM;
386         }
387         rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
388         printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
389
390         /* Recv
391          * pci_pool_create initializes a lookaside list. After successful
392          * creation, nonpaged fixed-size blocks can be allocated from and
393          * freed to the lookaside list.
394          * RFDs will be allocated from this pool.
395          */
396         rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
397                                                    sizeof(struct rfd),
398                                                    0,
399                                                    SLAB_CACHE_DMA |
400                                                    SLAB_HWCACHE_ALIGN,
401                                                    NULL);
402
403         adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
404
405         /* The RFDs are going to be put on lists later on, so initialize the
406          * lists now.
407          */
408         INIT_LIST_HEAD(&rx_ring->recv_list);
409         return 0;
410 }
411
412 /**
413  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
414  * @adapter: pointer to our private adapter structure
415  */
416 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
417 {
418         u32 index;
419         u32 bufsize;
420         u32 pktstat_ringsize;
421         struct rfd *rfd;
422         struct rx_ring *rx_ring;
423
424         /* Setup some convenience pointers */
425         rx_ring = &adapter->rx_ring;
426
427         /* Free RFDs and associated packet descriptors */
428         WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
429
430         while (!list_empty(&rx_ring->recv_list)) {
431                 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
432                                 struct rfd, list_node);
433
434                 list_del(&rfd->list_node);
435                 rfd->skb = NULL;
436                 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
437         }
438
439         /* Free Free Buffer Ring 1 */
440         if (rx_ring->fbr1_ring_virtaddr) {
441                 /* First the packet memory */
442                 for (index = 0; index <
443                      (rx_ring->fbr1_num_entries / FBR_CHUNKS); index++) {
444                         if (rx_ring->fbr1_mem_virtaddrs[index]) {
445                                 u32 fbr1_align;
446
447                                 if (rx_ring->fbr1_buffsize > 4096)
448                                         fbr1_align = 4096;
449                                 else
450                                         fbr1_align = rx_ring->fbr1_buffsize;
451
452                                 bufsize =
453                                     (rx_ring->fbr1_buffsize * FBR_CHUNKS) +
454                                     fbr1_align - 1;
455
456                                 pci_free_consistent(adapter->pdev,
457                                         bufsize,
458                                         rx_ring->fbr1_mem_virtaddrs[index],
459                                         rx_ring->fbr1_mem_physaddrs[index]);
460
461                                 rx_ring->fbr1_mem_virtaddrs[index] = NULL;
462                         }
463                 }
464
465                 /* Now the FIFO itself */
466                 rx_ring->fbr1_ring_virtaddr = (void *)((u8 *)
467                         rx_ring->fbr1_ring_virtaddr - rx_ring->fbr1_offset);
468
469                 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr1_num_entries)
470                                                             + 0xfff;
471
472                 pci_free_consistent(adapter->pdev, bufsize,
473                                     rx_ring->fbr1_ring_virtaddr,
474                                     rx_ring->fbr1_ring_physaddr);
475
476                 rx_ring->fbr1_ring_virtaddr = NULL;
477         }
478
479 #ifdef USE_FBR0
480         /* Now the same for Free Buffer Ring 0 */
481         if (rx_ring->fbr0_ring_virtaddr) {
482                 /* First the packet memory */
483                 for (index = 0; index <
484                      (rx_ring->fbr0_num_entries / FBR_CHUNKS); index++) {
485                         if (rx_ring->fbr0_mem_virtaddrs[index]) {
486                                 bufsize =
487                                     (rx_ring->fbr0_buffsize *
488                                      (FBR_CHUNKS + 1)) - 1;
489
490                                 pci_free_consistent(adapter->pdev,
491                                         bufsize,
492                                         rx_ring->fbr0_mem_virtaddrs[index],
493                                         rx_ring->fbr0_mem_physaddrs[index]);
494
495                                 rx_ring->fbr0_mem_virtaddrs[index] = NULL;
496                         }
497                 }
498
499                 /* Now the FIFO itself */
500                 rx_ring->fbr0_ring_virtaddr = (void *)((u8 *)
501                         rx_ring->fbr0_ring_virtaddr - rx_ring->fbr0_offset);
502
503                 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr0_num_entries)
504                                                             + 0xfff;
505
506                 pci_free_consistent(adapter->pdev,
507                                     bufsize,
508                                     rx_ring->fbr0_ring_virtaddr,
509                                     rx_ring->fbr0_ring_physaddr);
510
511                 rx_ring->fbr0_ring_virtaddr = NULL;
512         }
513 #endif
514
515         /* Free Packet Status Ring */
516         if (rx_ring->ps_ring_virtaddr) {
517                 pktstat_ringsize =
518                     sizeof(struct pkt_stat_desc) *
519                     adapter->rx_ring.psr_num_entries;
520
521                 pci_free_consistent(adapter->pdev, pktstat_ringsize,
522                                     rx_ring->ps_ring_virtaddr,
523                                     rx_ring->ps_ring_physaddr);
524
525                 rx_ring->ps_ring_virtaddr = NULL;
526         }
527
528         /* Free area of memory for the writeback of status information */
529         if (rx_ring->rx_status_block) {
530                 pci_free_consistent(adapter->pdev,
531                         sizeof(struct rx_status_block),
532                         rx_ring->rx_status_block, rx_ring->rx_status_bus);
533                 rx_ring->rx_status_block = NULL;
534         }
535
536         /* Free receive buffer pool */
537
538         /* Free receive packet pool */
539
540         /* Destroy the lookaside (RFD) pool */
541         if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
542                 kmem_cache_destroy(rx_ring->recv_lookaside);
543                 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
544         }
545
546         /* Free the FBR Lookup Table */
547 #ifdef USE_FBR0
548         kfree(rx_ring->fbr[0]);
549 #endif
550
551         kfree(rx_ring->fbr[1]);
552
553         /* Reset Counters */
554         rx_ring->num_ready_recv = 0;
555 }
556
557 /**
558  * et131x_init_recv - Initialize receive data structures.
559  * @adapter: pointer to our private adapter structure
560  *
561  * Returns 0 on success and errno on failure (as defined in errno.h)
562  */
563 int et131x_init_recv(struct et131x_adapter *adapter)
564 {
565         int status = -ENOMEM;
566         struct rfd *rfd = NULL;
567         u32 rfdct;
568         u32 numrfd = 0;
569         struct rx_ring *rx_ring;
570
571         /* Setup some convenience pointers */
572         rx_ring = &adapter->rx_ring;
573
574         /* Setup each RFD */
575         for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
576                 rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
577                                                      GFP_ATOMIC | GFP_DMA);
578
579                 if (!rfd) {
580                         dev_err(&adapter->pdev->dev,
581                                   "Couldn't alloc RFD out of kmem_cache\n");
582                         status = -ENOMEM;
583                         continue;
584                 }
585
586                 rfd->skb = NULL;
587
588                 /* Add this RFD to the recv_list */
589                 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
590
591                 /* Increment both the available RFD's, and the total RFD's. */
592                 rx_ring->num_ready_recv++;
593                 numrfd++;
594         }
595
596         if (numrfd > NIC_MIN_NUM_RFD)
597                 status = 0;
598
599         rx_ring->num_rfd = numrfd;
600
601         if (status != 0) {
602                 kmem_cache_free(rx_ring->recv_lookaside, rfd);
603                 dev_err(&adapter->pdev->dev,
604                           "Allocation problems in et131x_init_recv\n");
605         }
606         return status;
607 }
608
609 /**
610  * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
611  * @adapter: pointer to our adapter structure
612  */
613 void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
614 {
615         struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
616         struct rx_ring *rx_local = &adapter->rx_ring;
617         struct fbr_desc *fbr_entry;
618         u32 entry;
619         u32 psr_num_des;
620         unsigned long flags;
621
622         /* Halt RXDMA to perform the reconfigure.  */
623         et131x_rx_dma_disable(adapter);
624
625         /* Load the completion writeback physical address
626          *
627          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
628          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
629          * are ever returned, make sure the high part is retrieved here
630          * before storing the adjusted address.
631          */
632         writel((u32) ((u64)rx_local->rx_status_bus >> 32),
633                &rx_dma->dma_wb_base_hi);
634         writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
635
636         memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
637
638         /* Set the address and parameters of the packet status ring into the
639          * 1310's registers
640          */
641         writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
642                &rx_dma->psr_base_hi);
643         writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
644         writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
645         writel(0, &rx_dma->psr_full_offset);
646
647         psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
648         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
649                &rx_dma->psr_min_des);
650
651         spin_lock_irqsave(&adapter->rcv_lock, flags);
652
653         /* These local variables track the PSR in the adapter structure */
654         rx_local->local_psr_full = 0;
655
656         /* Now's the best time to initialize FBR1 contents */
657         fbr_entry = (struct fbr_desc *) rx_local->fbr1_ring_virtaddr;
658         for (entry = 0; entry < rx_local->fbr1_num_entries; entry++) {
659                 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
660                 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
661                 fbr_entry->word2 = entry;
662                 fbr_entry++;
663         }
664
665         /* Set the address and parameters of Free buffer ring 1 (and 0 if
666          * required) into the 1310's registers
667          */
668         writel((u32) (rx_local->fbr1_real_physaddr >> 32),
669                &rx_dma->fbr1_base_hi);
670         writel((u32) rx_local->fbr1_real_physaddr, &rx_dma->fbr1_base_lo);
671         writel(rx_local->fbr1_num_entries - 1, &rx_dma->fbr1_num_des);
672         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
673
674         /* This variable tracks the free buffer ring 1 full position, so it
675          * has to match the above.
676          */
677         rx_local->local_fbr1_full = ET_DMA10_WRAP;
678         writel(
679             ((rx_local->fbr1_num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
680             &rx_dma->fbr1_min_des);
681
682 #ifdef USE_FBR0
683         /* Now's the best time to initialize FBR0 contents */
684         fbr_entry = (struct fbr_desc *) rx_local->fbr0_ring_virtaddr;
685         for (entry = 0; entry < rx_local->fbr0_num_entries; entry++) {
686                 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
687                 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
688                 fbr_entry->word2 = entry;
689                 fbr_entry++;
690         }
691
692         writel((u32) (rx_local->fbr0_real_physaddr >> 32),
693                &rx_dma->fbr0_base_hi);
694         writel((u32) rx_local->fbr0_real_physaddr, &rx_dma->fbr0_base_lo);
695         writel(rx_local->fbr0_num_entries - 1, &rx_dma->fbr0_num_des);
696         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
697
698         /* This variable tracks the free buffer ring 0 full position, so it
699          * has to match the above.
700          */
701         rx_local->local_fbr0_full = ET_DMA10_WRAP;
702         writel(
703             ((rx_local->fbr0_num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
704             &rx_dma->fbr0_min_des);
705 #endif
706
707         /* Program the number of packets we will receive before generating an
708          * interrupt.
709          * For version B silicon, this value gets updated once autoneg is
710          *complete.
711          */
712         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
713
714         /* The "time_done" is not working correctly to coalesce interrupts
715          * after a given time period, but rather is giving us an interrupt
716          * regardless of whether we have received packets.
717          * This value gets updated once autoneg is complete.
718          */
719         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
720
721         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
722 }
723
724 /**
725  * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
726  * @adapter: pointer to our adapter structure
727  */
728 void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
729 {
730         struct phy_device *phydev = adapter->phydev;
731
732         if (!phydev)
733                 return;
734
735         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
736          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
737          */
738         if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
739                 writel(0, &adapter->regs->rxdma.max_pkt_time);
740                 writel(1, &adapter->regs->rxdma.num_pkt_done);
741         }
742 }
743
744 /**
745  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
746  * @adapter: pointer to our adapter
747  * @rfd: pointer to the RFD
748  */
749 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
750 {
751         struct rx_ring *rx_local = &adapter->rx_ring;
752         struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
753         u16 buff_index = rfd->bufferindex;
754         u8 ring_index = rfd->ringindex;
755         unsigned long flags;
756
757         /* We don't use any of the OOB data besides status. Otherwise, we
758          * need to clean up OOB data
759          */
760         if (
761 #ifdef USE_FBR0
762             (ring_index == 0 && buff_index < rx_local->fbr0_num_entries) ||
763 #endif
764             (ring_index == 1 && buff_index < rx_local->fbr1_num_entries)) {
765                 spin_lock_irqsave(&adapter->fbr_lock, flags);
766
767                 if (ring_index == 1) {
768                         struct fbr_desc *next =
769                             (struct fbr_desc *) (rx_local->fbr1_ring_virtaddr) +
770                                          INDEX10(rx_local->local_fbr1_full);
771
772                         /* Handle the Free Buffer Ring advancement here. Write
773                          * the PA / Buffer Index for the returned buffer into
774                          * the oldest (next to be freed)FBR entry
775                          */
776                         next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
777                         next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
778                         next->word2 = buff_index;
779
780                         writel(bump_free_buff_ring(&rx_local->local_fbr1_full,
781                                 rx_local->fbr1_num_entries - 1),
782                                 &rx_dma->fbr1_full_offset);
783                 }
784 #ifdef USE_FBR0
785                 else {
786                         struct fbr_desc *next = (struct fbr_desc *)
787                                 rx_local->fbr0_ring_virtaddr +
788                                         INDEX10(rx_local->local_fbr0_full);
789
790                         /* Handle the Free Buffer Ring advancement here. Write
791                          * the PA / Buffer Index for the returned buffer into
792                          * the oldest (next to be freed) FBR entry
793                          */
794                         next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
795                         next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
796                         next->word2 = buff_index;
797
798                         writel(bump_free_buff_ring(&rx_local->local_fbr0_full,
799                                         rx_local->fbr0_num_entries - 1),
800                                &rx_dma->fbr0_full_offset);
801                 }
802 #endif
803                 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
804         } else {
805                 dev_err(&adapter->pdev->dev,
806                           "%s illegal Buffer Index returned\n", __func__);
807         }
808
809         /* The processing on this RFD is done, so put it back on the tail of
810          * our list
811          */
812         spin_lock_irqsave(&adapter->rcv_lock, flags);
813         list_add_tail(&rfd->list_node, &rx_local->recv_list);
814         rx_local->num_ready_recv++;
815         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
816
817         WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
818 }
819
820 /**
821  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
822  * @adapter: pointer to our adapter structure
823  */
824 void et131x_rx_dma_disable(struct et131x_adapter *adapter)
825 {
826         u32 csr;
827         /* Setup the receive dma configuration register */
828         writel(0x00002001, &adapter->regs->rxdma.csr);
829         csr = readl(&adapter->regs->rxdma.csr);
830         if ((csr & 0x00020000) == 0) {  /* Check halt status (bit 17) */
831                 udelay(5);
832                 csr = readl(&adapter->regs->rxdma.csr);
833                 if ((csr & 0x00020000) == 0)
834                         dev_err(&adapter->pdev->dev,
835                         "RX Dma failed to enter halt state. CSR 0x%08x\n",
836                                 csr);
837         }
838 }
839
840 /**
841  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
842  * @adapter: pointer to our adapter structure
843  */
844 void et131x_rx_dma_enable(struct et131x_adapter *adapter)
845 {
846         /* Setup the receive dma configuration register for normal operation */
847         u32 csr =  0x2000;      /* FBR1 enable */
848
849         if (adapter->rx_ring.fbr1_buffsize == 4096)
850                 csr |= 0x0800;
851         else if (adapter->rx_ring.fbr1_buffsize == 8192)
852                 csr |= 0x1000;
853         else if (adapter->rx_ring.fbr1_buffsize == 16384)
854                 csr |= 0x1800;
855 #ifdef USE_FBR0
856         csr |= 0x0400;          /* FBR0 enable */
857         if (adapter->rx_ring.fbr0_buffsize == 256)
858                 csr |= 0x0100;
859         else if (adapter->rx_ring.fbr0_buffsize == 512)
860                 csr |= 0x0200;
861         else if (adapter->rx_ring.fbr0_buffsize == 1024)
862                 csr |= 0x0300;
863 #endif
864         writel(csr, &adapter->regs->rxdma.csr);
865
866         csr = readl(&adapter->regs->rxdma.csr);
867         if ((csr & 0x00020000) != 0) {
868                 udelay(5);
869                 csr = readl(&adapter->regs->rxdma.csr);
870                 if ((csr & 0x00020000) != 0) {
871                         dev_err(&adapter->pdev->dev,
872                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
873                                 csr);
874                 }
875         }
876 }
877
878 /**
879  * nic_rx_pkts - Checks the hardware for available packets
880  * @adapter: pointer to our adapter
881  *
882  * Returns rfd, a pointer to our MPRFD.
883  *
884  * Checks the hardware for available packets, using completion ring
885  * If packets are available, it gets an RFD from the recv_list, attaches
886  * the packet to it, puts the RFD in the RecvPendList, and also returns
887  * the pointer to the RFD.
888  */
889 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
890 {
891         struct rx_ring *rx_local = &adapter->rx_ring;
892         struct rx_status_block *status;
893         struct pkt_stat_desc *psr;
894         struct rfd *rfd;
895         u32 i;
896         u8 *buf;
897         unsigned long flags;
898         struct list_head *element;
899         u8 ring_index;
900         u16 buff_index;
901         u32 len;
902         u32 word0;
903         u32 word1;
904
905         /* RX Status block is written by the DMA engine prior to every
906          * interrupt. It contains the next to be used entry in the Packet
907          * Status Ring, and also the two Free Buffer rings.
908          */
909         status = rx_local->rx_status_block;
910         word1 = status->word1 >> 16;    /* Get the useful bits */
911
912         /* Check the PSR and wrap bits do not match */
913         if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
914                 /* Looks like this ring is not updated yet */
915                 return NULL;
916
917         /* The packet status ring indicates that data is available. */
918         psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
919                         (rx_local->local_psr_full & 0xFFF);
920
921         /* Grab any information that is required once the PSR is
922          * advanced, since we can no longer rely on the memory being
923          * accurate
924          */
925         len = psr->word1 & 0xFFFF;
926         ring_index = (psr->word1 >> 26) & 0x03;
927         buff_index = (psr->word1 >> 16) & 0x3FF;
928         word0 = psr->word0;
929
930         /* Indicate that we have used this PSR entry. */
931         /* FIXME wrap 12 */
932         add_12bit(&rx_local->local_psr_full, 1);
933         if (
934           (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
935                 /* Clear psr full and toggle the wrap bit */
936                 rx_local->local_psr_full &=  ~0xFFF;
937                 rx_local->local_psr_full ^= 0x1000;
938         }
939
940         writel(rx_local->local_psr_full,
941                &adapter->regs->rxdma.psr_full_offset);
942
943 #ifndef USE_FBR0
944         if (ring_index != 1)
945                 return NULL;
946 #endif
947
948 #ifdef USE_FBR0
949         if (ring_index > 1 ||
950                 (ring_index == 0 &&
951                 buff_index > rx_local->fbr0_num_entries - 1) ||
952                 (ring_index == 1 &&
953                 buff_index > rx_local->fbr1_num_entries - 1))
954 #else
955         if (ring_index != 1 || buff_index > rx_local->fbr1_num_entries - 1)
956 #endif
957         {
958                 /* Illegal buffer or ring index cannot be used by S/W*/
959                 dev_err(&adapter->pdev->dev,
960                           "NICRxPkts PSR Entry %d indicates "
961                           "length of %d and/or bad bi(%d)\n",
962                           rx_local->local_psr_full & 0xFFF,
963                           len, buff_index);
964                 return NULL;
965         }
966
967         /* Get and fill the RFD. */
968         spin_lock_irqsave(&adapter->rcv_lock, flags);
969
970         rfd = NULL;
971         element = rx_local->recv_list.next;
972         rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
973
974         if (rfd == NULL) {
975                 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
976                 return NULL;
977         }
978
979         list_del(&rfd->list_node);
980         rx_local->num_ready_recv--;
981
982         spin_unlock_irqrestore(&adapter->rcv_lock, flags);
983
984         rfd->bufferindex = buff_index;
985         rfd->ringindex = ring_index;
986
987         /* In V1 silicon, there is a bug which screws up filtering of
988          * runt packets.  Therefore runt packet filtering is disabled
989          * in the MAC and the packets are dropped here.  They are
990          * also counted here.
991          */
992         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
993                 adapter->stats.rx_other_errs++;
994                 len = 0;
995         }
996
997         if (len) {
998                 /* Determine if this is a multicast packet coming in */
999                 if ((word0 & ALCATEL_MULTICAST_PKT) &&
1000                     !(word0 & ALCATEL_BROADCAST_PKT)) {
1001                         /* Promiscuous mode and Multicast mode are
1002                          * not mutually exclusive as was first
1003                          * thought.  I guess Promiscuous is just
1004                          * considered a super-set of the other
1005                          * filters. Generally filter is 0x2b when in
1006                          * promiscuous mode.
1007                          */
1008                         if ((adapter->packet_filter &
1009                                         ET131X_PACKET_TYPE_MULTICAST)
1010                             && !(adapter->packet_filter &
1011                                         ET131X_PACKET_TYPE_PROMISCUOUS)
1012                             && !(adapter->packet_filter &
1013                                         ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1014                                 buf = rx_local->fbr[ring_index]->
1015                                                 virt[buff_index];
1016
1017                                 /* Loop through our list to see if the
1018                                  * destination address of this packet
1019                                  * matches one in our list.
1020                                  */
1021                                 for (i = 0; i < adapter->multicast_addr_count;
1022                                      i++) {
1023                                         if (buf[0] ==
1024                                                 adapter->multicast_list[i][0]
1025                                             && buf[1] ==
1026                                                 adapter->multicast_list[i][1]
1027                                             && buf[2] ==
1028                                                 adapter->multicast_list[i][2]
1029                                             && buf[3] ==
1030                                                 adapter->multicast_list[i][3]
1031                                             && buf[4] ==
1032                                                 adapter->multicast_list[i][4]
1033                                             && buf[5] ==
1034                                                 adapter->multicast_list[i][5]) {
1035                                                 break;
1036                                         }
1037                                 }
1038
1039                                 /* If our index is equal to the number
1040                                  * of Multicast address we have, then
1041                                  * this means we did not find this
1042                                  * packet's matching address in our
1043                                  * list.  Set the len to zero,
1044                                  * so we free our RFD when we return
1045                                  * from this function.
1046                                  */
1047                                 if (i == adapter->multicast_addr_count)
1048                                         len = 0;
1049                         }
1050
1051                         if (len > 0)
1052                                 adapter->stats.multicast_pkts_rcvd++;
1053                 } else if (word0 & ALCATEL_BROADCAST_PKT)
1054                         adapter->stats.broadcast_pkts_rcvd++;
1055                 else
1056                         /* Not sure what this counter measures in
1057                          * promiscuous mode. Perhaps we should check
1058                          * the MAC address to see if it is directed
1059                          * to us in promiscuous mode.
1060                          */
1061                         adapter->stats.unicast_pkts_rcvd++;
1062         }
1063
1064         if (len > 0) {
1065                 struct sk_buff *skb = NULL;
1066
1067                 /*rfd->len = len - 4; */
1068                 rfd->len = len;
1069
1070                 skb = dev_alloc_skb(rfd->len + 2);
1071                 if (!skb) {
1072                         dev_err(&adapter->pdev->dev,
1073                                   "Couldn't alloc an SKB for Rx\n");
1074                         return NULL;
1075                 }
1076
1077                 adapter->net_stats.rx_bytes += rfd->len;
1078
1079                 memcpy(skb_put(skb, rfd->len),
1080                        rx_local->fbr[ring_index]->virt[buff_index],
1081                        rfd->len);
1082
1083                 skb->dev = adapter->netdev;
1084                 skb->protocol = eth_type_trans(skb, adapter->netdev);
1085                 skb->ip_summed = CHECKSUM_NONE;
1086
1087                 netif_rx(skb);
1088         } else {
1089                 rfd->len = 0;
1090         }
1091
1092         nic_return_rfd(adapter, rfd);
1093         return rfd;
1094 }
1095
1096 /**
1097  * et131x_reset_recv - Reset the receive list
1098  * @adapter: pointer to our adapter
1099  *
1100  * Assumption, Rcv spinlock has been acquired.
1101  */
1102 void et131x_reset_recv(struct et131x_adapter *adapter)
1103 {
1104         WARN_ON(list_empty(&adapter->rx_ring.recv_list));
1105 }
1106
1107 /**
1108  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1109  * @adapter: pointer to our adapter
1110  *
1111  * Assumption, Rcv spinlock has been acquired.
1112  */
1113 void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
1114 {
1115         struct rfd *rfd = NULL;
1116         u32 count = 0;
1117         bool done = true;
1118
1119         /* Process up to available RFD's */
1120         while (count < NUM_PACKETS_HANDLED) {
1121                 if (list_empty(&adapter->rx_ring.recv_list)) {
1122                         WARN_ON(adapter->rx_ring.num_ready_recv != 0);
1123                         done = false;
1124                         break;
1125                 }
1126
1127                 rfd = nic_rx_pkts(adapter);
1128
1129                 if (rfd == NULL)
1130                         break;
1131
1132                 /* Do not receive any packets until a filter has been set.
1133                  * Do not receive any packets until we have link.
1134                  * If length is zero, return the RFD in order to advance the
1135                  * Free buffer ring.
1136                  */
1137                 if (!adapter->packet_filter ||
1138                     !netif_carrier_ok(adapter->netdev) ||
1139                     rfd->len == 0)
1140                         continue;
1141
1142                 /* Increment the number of packets we received */
1143                 adapter->net_stats.rx_packets++;
1144
1145                 /* Set the status on the packet, either resources or success */
1146                 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
1147                         dev_warn(&adapter->pdev->dev,
1148                                     "RFD's are running out\n");
1149                 }
1150                 count++;
1151         }
1152
1153         if (count == NUM_PACKETS_HANDLED || !done) {
1154                 adapter->rx_ring.unfinished_receives = true;
1155                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1156                        &adapter->regs->global.watchdog_timer);
1157         } else
1158                 /* Watchdog timer will disable itself if appropriate. */
1159                 adapter->rx_ring.unfinished_receives = false;
1160 }
1161