[PATCH] bcm43xx: fix DMA TX skb freeing in case of fragmented packets.
[pandora-kernel.git] / drivers / net / wireless / bcm43xx / bcm43xx_dma.c
1 /*
2
3   Broadcom BCM43xx wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dmapool.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41 #include <asm/semaphore.h>
42
43
44 static inline int free_slots(struct bcm43xx_dmaring *ring)
45 {
46         return (ring->nr_slots - ring->used_slots);
47 }
48
49 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
50 {
51         assert(slot >= -1 && slot <= ring->nr_slots - 1);
52         if (slot == ring->nr_slots - 1)
53                 return 0;
54         return slot + 1;
55 }
56
57 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
58 {
59         assert(slot >= 0 && slot <= ring->nr_slots - 1);
60         if (slot == 0)
61                 return ring->nr_slots - 1;
62         return slot - 1;
63 }
64
65 /* Request a slot for usage. */
66 static inline
67 int request_slot(struct bcm43xx_dmaring *ring)
68 {
69         int slot;
70
71         assert(ring->tx);
72         assert(!ring->suspended);
73         assert(free_slots(ring) != 0);
74
75         slot = next_slot(ring, ring->current_slot);
76         ring->current_slot = slot;
77         ring->used_slots++;
78
79         /* Check the number of available slots and suspend TX,
80          * if we are running low on free slots.
81          */
82         if (unlikely(free_slots(ring) < ring->suspend_mark)) {
83                 netif_stop_queue(ring->bcm->net_dev);
84                 ring->suspended = 1;
85         }
86 #ifdef CONFIG_BCM43XX_DEBUG
87         if (ring->used_slots > ring->max_used_slots)
88                 ring->max_used_slots = ring->used_slots;
89 #endif /* CONFIG_BCM43XX_DEBUG*/
90
91         return slot;
92 }
93
94 /* Return a slot to the free slots. */
95 static inline
96 void return_slot(struct bcm43xx_dmaring *ring, int slot)
97 {
98         assert(ring->tx);
99
100         ring->used_slots--;
101
102         /* Check if TX is suspended and check if we have
103          * enough free slots to resume it again.
104          */
105         if (unlikely(ring->suspended)) {
106                 if (free_slots(ring) >= ring->resume_mark) {
107                         ring->suspended = 0;
108                         netif_wake_queue(ring->bcm->net_dev);
109                 }
110         }
111 }
112
113 static inline
114 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
115                           unsigned char *buf,
116                           size_t len,
117                           int tx)
118 {
119         dma_addr_t dmaaddr;
120
121         if (tx) {
122                 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
123                                          buf, len,
124                                          DMA_TO_DEVICE);
125         } else {
126                 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
127                                          buf, len,
128                                          DMA_FROM_DEVICE);
129         }
130
131         return dmaaddr;
132 }
133
134 static inline
135 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
136                       dma_addr_t addr,
137                       size_t len,
138                       int tx)
139 {
140         if (tx) {
141                 dma_unmap_single(&ring->bcm->pci_dev->dev,
142                                  addr, len,
143                                  DMA_TO_DEVICE);
144         } else {
145                 dma_unmap_single(&ring->bcm->pci_dev->dev,
146                                  addr, len,
147                                  DMA_FROM_DEVICE);
148         }
149 }
150
151 static inline
152 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
153                              dma_addr_t addr,
154                              size_t len)
155 {
156         assert(!ring->tx);
157
158         dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
159                                 addr, len, DMA_FROM_DEVICE);
160 }
161
162 static inline
163 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
164                                 dma_addr_t addr,
165                                 size_t len)
166 {
167         assert(!ring->tx);
168
169         dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
170                                    addr, len, DMA_FROM_DEVICE);
171 }
172
173 /* Unmap and free a descriptor buffer. */
174 static inline
175 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
176                             struct bcm43xx_dmadesc *desc,
177                             struct bcm43xx_dmadesc_meta *meta,
178                             int irq_context)
179 {
180         assert(meta->skb);
181         if (irq_context)
182                 dev_kfree_skb_irq(meta->skb);
183         else
184                 dev_kfree_skb(meta->skb);
185         meta->skb = NULL;
186 }
187
188 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
189 {
190         struct device *dev = &(ring->bcm->pci_dev->dev);
191
192         ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
193                                          &(ring->dmabase), GFP_KERNEL);
194         if (!ring->vbase) {
195                 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
196                 return -ENOMEM;
197         }
198         if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
199                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RINGMEMORY >1G "
200                                     "(0x%08x, len: %lu)\n",
201                        ring->dmabase, BCM43xx_DMA_RINGMEMSIZE);
202                 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
203                                   ring->vbase, ring->dmabase);
204                 return -ENOMEM;
205         }
206         assert(!(ring->dmabase & 0x000003FF));
207         memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
208
209         return 0;
210 }
211
212 static void free_ringmemory(struct bcm43xx_dmaring *ring)
213 {
214         struct device *dev = &(ring->bcm->pci_dev->dev);
215
216         dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
217                           ring->vbase, ring->dmabase);
218 }
219
220 /* Reset the RX DMA channel */
221 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
222                                    u16 mmio_base)
223 {
224         int i;
225         u32 value;
226
227         bcm43xx_write32(bcm,
228                         mmio_base + BCM43xx_DMA_RX_CONTROL,
229                         0x00000000);
230         for (i = 0; i < 1000; i++) {
231                 value = bcm43xx_read32(bcm,
232                                        mmio_base + BCM43xx_DMA_RX_STATUS);
233                 value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
234                 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
235                         i = -1;
236                         break;
237                 }
238                 udelay(10);
239         }
240         if (i != -1) {
241                 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
242                 return -ENODEV;
243         }
244
245         return 0;
246 }
247
248 /* Reset the RX DMA channel */
249 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
250                                    u16 mmio_base)
251 {
252         int i;
253         u32 value;
254
255         for (i = 0; i < 1000; i++) {
256                 value = bcm43xx_read32(bcm,
257                                        mmio_base + BCM43xx_DMA_TX_STATUS);
258                 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
259                 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
260                     value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
261                     value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
262                         break;
263                 udelay(10);
264         }
265         bcm43xx_write32(bcm,
266                         mmio_base + BCM43xx_DMA_TX_CONTROL,
267                         0x00000000);
268         for (i = 0; i < 1000; i++) {
269                 value = bcm43xx_read32(bcm,
270                                        mmio_base + BCM43xx_DMA_TX_STATUS);
271                 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
272                 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
273                         i = -1;
274                         break;
275                 }
276                 udelay(10);
277         }
278         if (i != -1) {
279                 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
280                 return -ENODEV;
281         }
282         /* ensure the reset is completed. */
283         udelay(300);
284
285         return 0;
286 }
287
288 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
289                                struct bcm43xx_dmadesc *desc,
290                                struct bcm43xx_dmadesc_meta *meta,
291                                gfp_t gfp_flags)
292 {
293         struct bcm43xx_rxhdr *rxhdr;
294         dma_addr_t dmaaddr;
295         u32 desc_addr;
296         u32 desc_ctl;
297         const int slot = (int)(desc - ring->vbase);
298         struct sk_buff *skb;
299
300         assert(slot >= 0 && slot < ring->nr_slots);
301         assert(!ring->tx);
302
303         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
304         if (unlikely(!skb))
305                 return -ENOMEM;
306         dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
307         if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
308                 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
309                 dev_kfree_skb_any(skb);
310                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RX SKB >1G "
311                                     "(0x%08x, len: %u)\n",
312                        dmaaddr, ring->rx_buffersize);
313                 return -ENOMEM;
314         }
315         meta->skb = skb;
316         meta->dmaaddr = dmaaddr;
317         skb->dev = ring->bcm->net_dev;
318         desc_addr = (u32)(dmaaddr + ring->memoffset);
319         desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
320                     (u32)(ring->rx_buffersize - ring->frameoffset));
321         if (slot == ring->nr_slots - 1)
322                 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
323         set_desc_addr(desc, desc_addr);
324         set_desc_ctl(desc, desc_ctl);
325
326         rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
327         rxhdr->frame_length = 0;
328         rxhdr->flags1 = 0;
329
330         return 0;
331 }
332
333 /* Allocate the initial descbuffers.
334  * This is used for an RX ring only.
335  */
336 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
337 {
338         int i, err = -ENOMEM;
339         struct bcm43xx_dmadesc *desc;
340         struct bcm43xx_dmadesc_meta *meta;
341
342         for (i = 0; i < ring->nr_slots; i++) {
343                 desc = ring->vbase + i;
344                 meta = ring->meta + i;
345
346                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
347                 if (err)
348                         goto err_unwind;
349         }
350         ring->used_slots = ring->nr_slots;
351         err = 0;
352 out:
353         return err;
354
355 err_unwind:
356         for (i--; i >= 0; i--) {
357                 desc = ring->vbase + i;
358                 meta = ring->meta + i;
359
360                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
361                 dev_kfree_skb(meta->skb);
362         }
363         goto out;
364 }
365
366 /* Do initial setup of the DMA controller.
367  * Reset the controller, write the ring busaddress
368  * and switch the "enable" bit on.
369  */
370 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
371 {
372         int err = 0;
373         u32 value;
374
375         if (ring->tx) {
376                 /* Set Transmit Control register to "transmit enable" */
377                 bcm43xx_write32(ring->bcm,
378                                 ring->mmio_base + BCM43xx_DMA_TX_CONTROL,
379                                 BCM43xx_DMA_TXCTRL_ENABLE);
380                 /* Set Transmit Descriptor ring address. */
381                 bcm43xx_write32(ring->bcm,
382                                 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
383                                 ring->dmabase + ring->memoffset);
384         } else {
385                 err = alloc_initial_descbuffers(ring);
386                 if (err)
387                         goto out;
388                 /* Set Receive Control "receive enable" and frame offset */
389                 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
390                 value |= BCM43xx_DMA_RXCTRL_ENABLE;
391                 bcm43xx_write32(ring->bcm,
392                                 ring->mmio_base + BCM43xx_DMA_RX_CONTROL,
393                                 value);
394                 /* Set Receive Descriptor ring address. */
395                 bcm43xx_write32(ring->bcm,
396                                 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
397                                 ring->dmabase + ring->memoffset);
398                 /* Init the descriptor pointer. */
399                 bcm43xx_write32(ring->bcm,
400                                 ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
401                                 200);
402         }
403
404 out:
405         return err;
406 }
407
408 /* Shutdown the DMA controller. */
409 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
410 {
411         if (ring->tx) {
412                 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
413                 /* Zero out Transmit Descriptor ring address. */
414                 bcm43xx_write32(ring->bcm,
415                                 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
416                                 0x00000000);
417         } else {
418                 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
419                 /* Zero out Receive Descriptor ring address. */
420                 bcm43xx_write32(ring->bcm,
421                                 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
422                                 0x00000000);
423         }
424 }
425
426 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
427 {
428         struct bcm43xx_dmadesc *desc;
429         struct bcm43xx_dmadesc_meta *meta;
430         int i;
431
432         if (!ring->used_slots)
433                 return;
434         for (i = 0; i < ring->nr_slots; i++) {
435                 desc = ring->vbase + i;
436                 meta = ring->meta + i;
437
438                 if (!meta->skb) {
439                         assert(ring->tx);
440                         continue;
441                 }
442                 if (ring->tx) {
443                         unmap_descbuffer(ring, meta->dmaaddr,
444                                          meta->skb->len, 1);
445                 } else {
446                         unmap_descbuffer(ring, meta->dmaaddr,
447                                          ring->rx_buffersize, 0);
448                 }
449                 free_descriptor_buffer(ring, desc, meta, 0);
450         }
451 }
452
453 /* Main initialization function. */
454 static
455 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
456                                                u16 dma_controller_base,
457                                                int nr_descriptor_slots,
458                                                int tx)
459 {
460         struct bcm43xx_dmaring *ring;
461         int err;
462
463         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
464         if (!ring)
465                 goto out;
466
467         ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
468                              GFP_KERNEL);
469         if (!ring->meta)
470                 goto err_kfree_ring;
471
472         ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
473 #ifdef CONFIG_BCM947XX
474         if (bcm->pci_dev->bus->number == 0)
475                 ring->memoffset = 0;
476 #endif
477
478         ring->bcm = bcm;
479         ring->nr_slots = nr_descriptor_slots;
480         ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
481         ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
482         assert(ring->suspend_mark < ring->resume_mark);
483         ring->mmio_base = dma_controller_base;
484         if (tx) {
485                 ring->tx = 1;
486                 ring->current_slot = -1;
487         } else {
488                 switch (dma_controller_base) {
489                 case BCM43xx_MMIO_DMA1_BASE:
490                         ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
491                         ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
492                         break;
493                 case BCM43xx_MMIO_DMA4_BASE:
494                         ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
495                         ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
496                         break;
497                 default:
498                         assert(0);
499                 }
500         }
501
502         err = alloc_ringmemory(ring);
503         if (err)
504                 goto err_kfree_meta;
505         err = dmacontroller_setup(ring);
506         if (err)
507                 goto err_free_ringmemory;
508
509 out:
510         return ring;
511
512 err_free_ringmemory:
513         free_ringmemory(ring);
514 err_kfree_meta:
515         kfree(ring->meta);
516 err_kfree_ring:
517         kfree(ring);
518         ring = NULL;
519         goto out;
520 }
521
522 /* Main cleanup function. */
523 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
524 {
525         if (!ring)
526                 return;
527
528         dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
529                 ring->mmio_base,
530                 (ring->tx) ? "TX" : "RX",
531                 ring->max_used_slots, ring->nr_slots);
532         /* Device IRQs are disabled prior entering this function,
533          * so no need to take care of concurrency with rx handler stuff.
534          */
535         dmacontroller_cleanup(ring);
536         free_all_descbuffers(ring);
537         free_ringmemory(ring);
538
539         kfree(ring->meta);
540         kfree(ring);
541 }
542
543 void bcm43xx_dma_free(struct bcm43xx_private *bcm)
544 {
545         struct bcm43xx_dma *dma = bcm->current_core->dma;
546
547         bcm43xx_destroy_dmaring(dma->rx_ring1);
548         dma->rx_ring1 = NULL;
549         bcm43xx_destroy_dmaring(dma->rx_ring0);
550         dma->rx_ring0 = NULL;
551         bcm43xx_destroy_dmaring(dma->tx_ring3);
552         dma->tx_ring3 = NULL;
553         bcm43xx_destroy_dmaring(dma->tx_ring2);
554         dma->tx_ring2 = NULL;
555         bcm43xx_destroy_dmaring(dma->tx_ring1);
556         dma->tx_ring1 = NULL;
557         bcm43xx_destroy_dmaring(dma->tx_ring0);
558         dma->tx_ring0 = NULL;
559 }
560
561 int bcm43xx_dma_init(struct bcm43xx_private *bcm)
562 {
563         struct bcm43xx_dma *dma = bcm->current_core->dma;
564         struct bcm43xx_dmaring *ring;
565         int err = -ENOMEM;
566
567         /* setup TX DMA channels. */
568         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
569                                      BCM43xx_TXRING_SLOTS, 1);
570         if (!ring)
571                 goto out;
572         dma->tx_ring0 = ring;
573
574         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
575                                      BCM43xx_TXRING_SLOTS, 1);
576         if (!ring)
577                 goto err_destroy_tx0;
578         dma->tx_ring1 = ring;
579
580         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
581                                      BCM43xx_TXRING_SLOTS, 1);
582         if (!ring)
583                 goto err_destroy_tx1;
584         dma->tx_ring2 = ring;
585
586         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
587                                      BCM43xx_TXRING_SLOTS, 1);
588         if (!ring)
589                 goto err_destroy_tx2;
590         dma->tx_ring3 = ring;
591
592         /* setup RX DMA channels. */
593         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
594                                      BCM43xx_RXRING_SLOTS, 0);
595         if (!ring)
596                 goto err_destroy_tx3;
597         dma->rx_ring0 = ring;
598
599         if (bcm->current_core->rev < 5) {
600                 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
601                                              BCM43xx_RXRING_SLOTS, 0);
602                 if (!ring)
603                         goto err_destroy_rx0;
604                 dma->rx_ring1 = ring;
605         }
606
607         dprintk(KERN_INFO PFX "DMA initialized\n");
608         err = 0;
609 out:
610         return err;
611
612 err_destroy_rx0:
613         bcm43xx_destroy_dmaring(dma->rx_ring0);
614         dma->rx_ring0 = NULL;
615 err_destroy_tx3:
616         bcm43xx_destroy_dmaring(dma->tx_ring3);
617         dma->tx_ring3 = NULL;
618 err_destroy_tx2:
619         bcm43xx_destroy_dmaring(dma->tx_ring2);
620         dma->tx_ring2 = NULL;
621 err_destroy_tx1:
622         bcm43xx_destroy_dmaring(dma->tx_ring1);
623         dma->tx_ring1 = NULL;
624 err_destroy_tx0:
625         bcm43xx_destroy_dmaring(dma->tx_ring0);
626         dma->tx_ring0 = NULL;
627         goto out;
628 }
629
630 /* Generate a cookie for the TX header. */
631 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
632                            int slot)
633 {
634         u16 cookie = 0x0000;
635
636         /* Use the upper 4 bits of the cookie as
637          * DMA controller ID and store the slot number
638          * in the lower 12 bits
639          */
640         switch (ring->mmio_base) {
641         default:
642                 assert(0);
643         case BCM43xx_MMIO_DMA1_BASE:
644                 break;
645         case BCM43xx_MMIO_DMA2_BASE:
646                 cookie = 0x1000;
647                 break;
648         case BCM43xx_MMIO_DMA3_BASE:
649                 cookie = 0x2000;
650                 break;
651         case BCM43xx_MMIO_DMA4_BASE:
652                 cookie = 0x3000;
653                 break;
654         }
655         assert(((u16)slot & 0xF000) == 0x0000);
656         cookie |= (u16)slot;
657
658         return cookie;
659 }
660
661 /* Inspect a cookie and find out to which controller/slot it belongs. */
662 static
663 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
664                                       u16 cookie, int *slot)
665 {
666         struct bcm43xx_dma *dma = bcm->current_core->dma;
667         struct bcm43xx_dmaring *ring = NULL;
668
669         switch (cookie & 0xF000) {
670         case 0x0000:
671                 ring = dma->tx_ring0;
672                 break;
673         case 0x1000:
674                 ring = dma->tx_ring1;
675                 break;
676         case 0x2000:
677                 ring = dma->tx_ring2;
678                 break;
679         case 0x3000:
680                 ring = dma->tx_ring3;
681                 break;
682         default:
683                 assert(0);
684         }
685         *slot = (cookie & 0x0FFF);
686         assert(*slot >= 0 && *slot < ring->nr_slots);
687
688         return ring;
689 }
690
691 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
692                                   int slot)
693 {
694         /* Everything is ready to start. Buffers are DMA mapped and
695          * associated with slots.
696          * "slot" is the last slot of the new frame we want to transmit.
697          * Close your seat belts now, please.
698          */
699         wmb();
700         slot = next_slot(ring, slot);
701         bcm43xx_write32(ring->bcm,
702                         ring->mmio_base + BCM43xx_DMA_TX_DESC_INDEX,
703                         (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
704 }
705
706 static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
707                            struct sk_buff *skb,
708                            u8 cur_frag)
709 {
710         int slot;
711         struct bcm43xx_dmadesc *desc;
712         struct bcm43xx_dmadesc_meta *meta;
713         u32 desc_ctl;
714         u32 desc_addr;
715
716         assert(skb_shinfo(skb)->nr_frags == 0);
717
718         slot = request_slot(ring);
719         desc = ring->vbase + slot;
720         meta = ring->meta + slot;
721
722         /* Add a device specific TX header. */
723         assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
724         /* Reserve enough headroom for the device tx header. */
725         __skb_push(skb, sizeof(struct bcm43xx_txhdr));
726         /* Now calculate and add the tx header.
727          * The tx header includes the PLCP header.
728          */
729         bcm43xx_generate_txhdr(ring->bcm,
730                                (struct bcm43xx_txhdr *)skb->data,
731                                skb->data + sizeof(struct bcm43xx_txhdr),
732                                skb->len - sizeof(struct bcm43xx_txhdr),
733                                (cur_frag == 0),
734                                generate_cookie(ring, slot));
735
736         meta->skb = skb;
737         meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
738         if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
739                 return_slot(ring, slot);
740                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA TX SKB >1G "
741                                     "(0x%08x, len: %u)\n",
742                        meta->dmaaddr, skb->len);
743                 return -ENOMEM;
744         }
745
746         desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
747         desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
748         desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
749         desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
750                      (u32)(meta->skb->len - ring->frameoffset));
751         if (slot == ring->nr_slots - 1)
752                 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
753
754         set_desc_ctl(desc, desc_ctl);
755         set_desc_addr(desc, desc_addr);
756         /* Now transfer the whole frame. */
757         dmacontroller_poke_tx(ring, slot);
758
759         return 0;
760 }
761
762 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
763                    struct ieee80211_txb *txb)
764 {
765         /* We just received a packet from the kernel network subsystem.
766          * Add headers and DMA map the memory. Poke
767          * the device to send the stuff.
768          * Note that this is called from atomic context.
769          */
770         struct bcm43xx_dmaring *ring = bcm->current_core->dma->tx_ring1;
771         u8 i;
772         struct sk_buff *skb;
773
774         assert(ring->tx);
775         if (unlikely(free_slots(ring) < txb->nr_frags)) {
776                 /* The queue should be stopped,
777                  * if we are low on free slots.
778                  * If this ever triggers, we have to lower the suspend_mark.
779                  */
780                 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
781                 return -ENOMEM;
782         }
783
784         for (i = 0; i < txb->nr_frags; i++) {
785                 skb = txb->fragments[i];
786                 /* Take skb from ieee80211_txb_free */
787                 txb->fragments[i] = NULL;
788                 dma_tx_fragment(ring, skb, i);
789                 //TODO: handle failure of dma_tx_fragment
790         }
791         ieee80211_txb_free(txb);
792
793         return 0;
794 }
795
796 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
797                                    struct bcm43xx_xmitstatus *status)
798 {
799         struct bcm43xx_dmaring *ring;
800         struct bcm43xx_dmadesc *desc;
801         struct bcm43xx_dmadesc_meta *meta;
802         int is_last_fragment;
803         int slot;
804
805         ring = parse_cookie(bcm, status->cookie, &slot);
806         assert(ring);
807         assert(ring->tx);
808         assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
809         while (1) {
810                 assert(slot >= 0 && slot < ring->nr_slots);
811                 desc = ring->vbase + slot;
812                 meta = ring->meta + slot;
813
814                 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
815                 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
816                 free_descriptor_buffer(ring, desc, meta, 1);
817                 /* Everything belonging to the slot is unmapped
818                  * and freed, so we can return it.
819                  */
820                 return_slot(ring, slot);
821
822                 if (is_last_fragment)
823                         break;
824                 slot = next_slot(ring, slot);
825         }
826         bcm->stats.last_tx = jiffies;
827 }
828
829 static void dma_rx(struct bcm43xx_dmaring *ring,
830                    int *slot)
831 {
832         struct bcm43xx_dmadesc *desc;
833         struct bcm43xx_dmadesc_meta *meta;
834         struct bcm43xx_rxhdr *rxhdr;
835         struct sk_buff *skb;
836         u16 len;
837         int err;
838         dma_addr_t dmaaddr;
839
840         desc = ring->vbase + *slot;
841         meta = ring->meta + *slot;
842
843         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
844         skb = meta->skb;
845
846         if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
847                 /* We received an xmit status. */
848                 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
849                 struct bcm43xx_xmitstatus stat;
850
851                 stat.cookie = le16_to_cpu(hw->cookie);
852                 stat.flags = hw->flags;
853                 stat.cnt1 = hw->cnt1;
854                 stat.cnt2 = hw->cnt2;
855                 stat.seq = le16_to_cpu(hw->seq);
856                 stat.unknown = le16_to_cpu(hw->unknown);
857
858                 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
859                 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
860                 /* recycle the descriptor buffer. */
861                 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
862
863                 return;
864         }
865         rxhdr = (struct bcm43xx_rxhdr *)skb->data;
866         len = le16_to_cpu(rxhdr->frame_length);
867         if (len == 0) {
868                 int i = 0;
869
870                 do {
871                         udelay(2);
872                         barrier();
873                         len = le16_to_cpu(rxhdr->frame_length);
874                 } while (len == 0 && i++ < 5);
875                 if (unlikely(len == 0)) {
876                         /* recycle the descriptor buffer. */
877                         sync_descbuffer_for_device(ring, meta->dmaaddr,
878                                                    ring->rx_buffersize);
879                         goto drop;
880                 }
881         }
882         if (unlikely(len > ring->rx_buffersize)) {
883                 /* The data did not fit into one descriptor buffer
884                  * and is split over multiple buffers.
885                  * This should never happen, as we try to allocate buffers
886                  * big enough. So simply ignore this packet.
887                  */
888                 int cnt = 0;
889                 s32 tmp = len;
890
891                 while (1) {
892                         desc = ring->vbase + *slot;
893                         meta = ring->meta + *slot;
894                         /* recycle the descriptor buffer. */
895                         sync_descbuffer_for_device(ring, meta->dmaaddr,
896                                                    ring->rx_buffersize);
897                         *slot = next_slot(ring, *slot);
898                         cnt++;
899                         tmp -= ring->rx_buffersize;
900                         if (tmp <= 0)
901                                 break;
902                 }
903                 printkl(KERN_ERR PFX "DMA RX buffer too small "
904                                      "(len: %u, buffer: %u, nr-dropped: %d)\n",
905                         len, ring->rx_buffersize, cnt);
906                 goto drop;
907         }
908         len -= IEEE80211_FCS_LEN;
909
910         dmaaddr = meta->dmaaddr;
911         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
912         if (unlikely(err)) {
913                 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
914                 sync_descbuffer_for_device(ring, dmaaddr,
915                                            ring->rx_buffersize);
916                 goto drop;
917         }
918
919         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
920         skb_put(skb, len + ring->frameoffset);
921         skb_pull(skb, ring->frameoffset);
922
923         err = bcm43xx_rx(ring->bcm, skb, rxhdr);
924         if (err) {
925                 dev_kfree_skb_irq(skb);
926                 goto drop;
927         }
928
929 drop:
930         return;
931 }
932
933 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
934 {
935         u32 status;
936         u16 descptr;
937         int slot, current_slot;
938 #ifdef CONFIG_BCM43XX_DEBUG
939         int used_slots = 0;
940 #endif
941
942         assert(!ring->tx);
943         status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
944         descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
945         current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
946         assert(current_slot >= 0 && current_slot < ring->nr_slots);
947
948         slot = ring->current_slot;
949         for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
950                 dma_rx(ring, &slot);
951 #ifdef CONFIG_BCM43XX_DEBUG
952                 if (++used_slots > ring->max_used_slots)
953                         ring->max_used_slots = used_slots;
954 #endif
955         }
956         bcm43xx_write32(ring->bcm,
957                         ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
958                         (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
959         ring->current_slot = slot;
960 }
961
962 /* vim: set ts=8 sw=8 sts=8: */