Merge commit 'origin/master' into next
[pandora-kernel.git] / drivers / net / wireless / b43 / dma.c
1 /*
2
3   Broadcom B43 wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "b43.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
35
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <asm/div64.h>
42
43
44 /* Required number of TX DMA slots per TX frame.
45  * This currently is 2, because we put the header and the ieee80211 frame
46  * into separate slots. */
47 #define TX_SLOTS_PER_FRAME      2
48
49
50 /* 32bit DMA ops. */
51 static
52 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
53                                           int slot,
54                                           struct b43_dmadesc_meta **meta)
55 {
56         struct b43_dmadesc32 *desc;
57
58         *meta = &(ring->meta[slot]);
59         desc = ring->descbase;
60         desc = &(desc[slot]);
61
62         return (struct b43_dmadesc_generic *)desc;
63 }
64
65 static void op32_fill_descriptor(struct b43_dmaring *ring,
66                                  struct b43_dmadesc_generic *desc,
67                                  dma_addr_t dmaaddr, u16 bufsize,
68                                  int start, int end, int irq)
69 {
70         struct b43_dmadesc32 *descbase = ring->descbase;
71         int slot;
72         u32 ctl;
73         u32 addr;
74         u32 addrext;
75
76         slot = (int)(&(desc->dma32) - descbase);
77         B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
78
79         addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
80         addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
81             >> SSB_DMA_TRANSLATION_SHIFT;
82         addr |= ssb_dma_translation(ring->dev->dev);
83         ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
84         if (slot == ring->nr_slots - 1)
85                 ctl |= B43_DMA32_DCTL_DTABLEEND;
86         if (start)
87                 ctl |= B43_DMA32_DCTL_FRAMESTART;
88         if (end)
89                 ctl |= B43_DMA32_DCTL_FRAMEEND;
90         if (irq)
91                 ctl |= B43_DMA32_DCTL_IRQ;
92         ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
93             & B43_DMA32_DCTL_ADDREXT_MASK;
94
95         desc->dma32.control = cpu_to_le32(ctl);
96         desc->dma32.address = cpu_to_le32(addr);
97 }
98
99 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
100 {
101         b43_dma_write(ring, B43_DMA32_TXINDEX,
102                       (u32) (slot * sizeof(struct b43_dmadesc32)));
103 }
104
105 static void op32_tx_suspend(struct b43_dmaring *ring)
106 {
107         b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108                       | B43_DMA32_TXSUSPEND);
109 }
110
111 static void op32_tx_resume(struct b43_dmaring *ring)
112 {
113         b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
114                       & ~B43_DMA32_TXSUSPEND);
115 }
116
117 static int op32_get_current_rxslot(struct b43_dmaring *ring)
118 {
119         u32 val;
120
121         val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
122         val &= B43_DMA32_RXDPTR;
123
124         return (val / sizeof(struct b43_dmadesc32));
125 }
126
127 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
128 {
129         b43_dma_write(ring, B43_DMA32_RXINDEX,
130                       (u32) (slot * sizeof(struct b43_dmadesc32)));
131 }
132
133 static const struct b43_dma_ops dma32_ops = {
134         .idx2desc = op32_idx2desc,
135         .fill_descriptor = op32_fill_descriptor,
136         .poke_tx = op32_poke_tx,
137         .tx_suspend = op32_tx_suspend,
138         .tx_resume = op32_tx_resume,
139         .get_current_rxslot = op32_get_current_rxslot,
140         .set_current_rxslot = op32_set_current_rxslot,
141 };
142
143 /* 64bit DMA ops. */
144 static
145 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
146                                           int slot,
147                                           struct b43_dmadesc_meta **meta)
148 {
149         struct b43_dmadesc64 *desc;
150
151         *meta = &(ring->meta[slot]);
152         desc = ring->descbase;
153         desc = &(desc[slot]);
154
155         return (struct b43_dmadesc_generic *)desc;
156 }
157
158 static void op64_fill_descriptor(struct b43_dmaring *ring,
159                                  struct b43_dmadesc_generic *desc,
160                                  dma_addr_t dmaaddr, u16 bufsize,
161                                  int start, int end, int irq)
162 {
163         struct b43_dmadesc64 *descbase = ring->descbase;
164         int slot;
165         u32 ctl0 = 0, ctl1 = 0;
166         u32 addrlo, addrhi;
167         u32 addrext;
168
169         slot = (int)(&(desc->dma64) - descbase);
170         B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
171
172         addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
173         addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
174         addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
175             >> SSB_DMA_TRANSLATION_SHIFT;
176         addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
177         if (slot == ring->nr_slots - 1)
178                 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
179         if (start)
180                 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
181         if (end)
182                 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
183         if (irq)
184                 ctl0 |= B43_DMA64_DCTL0_IRQ;
185         ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
186         ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
187             & B43_DMA64_DCTL1_ADDREXT_MASK;
188
189         desc->dma64.control0 = cpu_to_le32(ctl0);
190         desc->dma64.control1 = cpu_to_le32(ctl1);
191         desc->dma64.address_low = cpu_to_le32(addrlo);
192         desc->dma64.address_high = cpu_to_le32(addrhi);
193 }
194
195 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
196 {
197         b43_dma_write(ring, B43_DMA64_TXINDEX,
198                       (u32) (slot * sizeof(struct b43_dmadesc64)));
199 }
200
201 static void op64_tx_suspend(struct b43_dmaring *ring)
202 {
203         b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
204                       | B43_DMA64_TXSUSPEND);
205 }
206
207 static void op64_tx_resume(struct b43_dmaring *ring)
208 {
209         b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
210                       & ~B43_DMA64_TXSUSPEND);
211 }
212
213 static int op64_get_current_rxslot(struct b43_dmaring *ring)
214 {
215         u32 val;
216
217         val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
218         val &= B43_DMA64_RXSTATDPTR;
219
220         return (val / sizeof(struct b43_dmadesc64));
221 }
222
223 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
224 {
225         b43_dma_write(ring, B43_DMA64_RXINDEX,
226                       (u32) (slot * sizeof(struct b43_dmadesc64)));
227 }
228
229 static const struct b43_dma_ops dma64_ops = {
230         .idx2desc = op64_idx2desc,
231         .fill_descriptor = op64_fill_descriptor,
232         .poke_tx = op64_poke_tx,
233         .tx_suspend = op64_tx_suspend,
234         .tx_resume = op64_tx_resume,
235         .get_current_rxslot = op64_get_current_rxslot,
236         .set_current_rxslot = op64_set_current_rxslot,
237 };
238
239 static inline int free_slots(struct b43_dmaring *ring)
240 {
241         return (ring->nr_slots - ring->used_slots);
242 }
243
244 static inline int next_slot(struct b43_dmaring *ring, int slot)
245 {
246         B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
247         if (slot == ring->nr_slots - 1)
248                 return 0;
249         return slot + 1;
250 }
251
252 static inline int prev_slot(struct b43_dmaring *ring, int slot)
253 {
254         B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
255         if (slot == 0)
256                 return ring->nr_slots - 1;
257         return slot - 1;
258 }
259
260 #ifdef CONFIG_B43_DEBUG
261 static void update_max_used_slots(struct b43_dmaring *ring,
262                                   int current_used_slots)
263 {
264         if (current_used_slots <= ring->max_used_slots)
265                 return;
266         ring->max_used_slots = current_used_slots;
267         if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
268                 b43dbg(ring->dev->wl,
269                        "max_used_slots increased to %d on %s ring %d\n",
270                        ring->max_used_slots,
271                        ring->tx ? "TX" : "RX", ring->index);
272         }
273 }
274 #else
275 static inline
276     void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
277 {
278 }
279 #endif /* DEBUG */
280
281 /* Request a slot for usage. */
282 static inline int request_slot(struct b43_dmaring *ring)
283 {
284         int slot;
285
286         B43_WARN_ON(!ring->tx);
287         B43_WARN_ON(ring->stopped);
288         B43_WARN_ON(free_slots(ring) == 0);
289
290         slot = next_slot(ring, ring->current_slot);
291         ring->current_slot = slot;
292         ring->used_slots++;
293
294         update_max_used_slots(ring, ring->used_slots);
295
296         return slot;
297 }
298
299 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
300 {
301         static const u16 map64[] = {
302                 B43_MMIO_DMA64_BASE0,
303                 B43_MMIO_DMA64_BASE1,
304                 B43_MMIO_DMA64_BASE2,
305                 B43_MMIO_DMA64_BASE3,
306                 B43_MMIO_DMA64_BASE4,
307                 B43_MMIO_DMA64_BASE5,
308         };
309         static const u16 map32[] = {
310                 B43_MMIO_DMA32_BASE0,
311                 B43_MMIO_DMA32_BASE1,
312                 B43_MMIO_DMA32_BASE2,
313                 B43_MMIO_DMA32_BASE3,
314                 B43_MMIO_DMA32_BASE4,
315                 B43_MMIO_DMA32_BASE5,
316         };
317
318         if (type == B43_DMA_64BIT) {
319                 B43_WARN_ON(!(controller_idx >= 0 &&
320                               controller_idx < ARRAY_SIZE(map64)));
321                 return map64[controller_idx];
322         }
323         B43_WARN_ON(!(controller_idx >= 0 &&
324                       controller_idx < ARRAY_SIZE(map32)));
325         return map32[controller_idx];
326 }
327
328 static inline
329     dma_addr_t map_descbuffer(struct b43_dmaring *ring,
330                               unsigned char *buf, size_t len, int tx)
331 {
332         dma_addr_t dmaaddr;
333
334         if (tx) {
335                 dmaaddr = ssb_dma_map_single(ring->dev->dev,
336                                              buf, len, DMA_TO_DEVICE);
337         } else {
338                 dmaaddr = ssb_dma_map_single(ring->dev->dev,
339                                              buf, len, DMA_FROM_DEVICE);
340         }
341
342         return dmaaddr;
343 }
344
345 static inline
346     void unmap_descbuffer(struct b43_dmaring *ring,
347                           dma_addr_t addr, size_t len, int tx)
348 {
349         if (tx) {
350                 ssb_dma_unmap_single(ring->dev->dev,
351                                      addr, len, DMA_TO_DEVICE);
352         } else {
353                 ssb_dma_unmap_single(ring->dev->dev,
354                                      addr, len, DMA_FROM_DEVICE);
355         }
356 }
357
358 static inline
359     void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
360                                  dma_addr_t addr, size_t len)
361 {
362         B43_WARN_ON(ring->tx);
363         ssb_dma_sync_single_for_cpu(ring->dev->dev,
364                                     addr, len, DMA_FROM_DEVICE);
365 }
366
367 static inline
368     void sync_descbuffer_for_device(struct b43_dmaring *ring,
369                                     dma_addr_t addr, size_t len)
370 {
371         B43_WARN_ON(ring->tx);
372         ssb_dma_sync_single_for_device(ring->dev->dev,
373                                        addr, len, DMA_FROM_DEVICE);
374 }
375
376 static inline
377     void free_descriptor_buffer(struct b43_dmaring *ring,
378                                 struct b43_dmadesc_meta *meta)
379 {
380         if (meta->skb) {
381                 dev_kfree_skb_any(meta->skb);
382                 meta->skb = NULL;
383         }
384 }
385
386 static int alloc_ringmemory(struct b43_dmaring *ring)
387 {
388         gfp_t flags = GFP_KERNEL;
389
390         /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
391          * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
392          * has shown that 4K is sufficient for the latter as long as the buffer
393          * does not cross an 8K boundary.
394          *
395          * For unknown reasons - possibly a hardware error - the BCM4311 rev
396          * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
397          * which accounts for the GFP_DMA flag below.
398          *
399          * The flags here must match the flags in free_ringmemory below!
400          */
401         if (ring->type == B43_DMA_64BIT)
402                 flags |= GFP_DMA;
403         ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
404                                                   B43_DMA_RINGMEMSIZE,
405                                                   &(ring->dmabase), flags);
406         if (!ring->descbase) {
407                 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
408                 return -ENOMEM;
409         }
410         memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
411
412         return 0;
413 }
414
415 static void free_ringmemory(struct b43_dmaring *ring)
416 {
417         gfp_t flags = GFP_KERNEL;
418
419         if (ring->type == B43_DMA_64BIT)
420                 flags |= GFP_DMA;
421
422         ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
423                                 ring->descbase, ring->dmabase, flags);
424 }
425
426 /* Reset the RX DMA channel */
427 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
428                                       enum b43_dmatype type)
429 {
430         int i;
431         u32 value;
432         u16 offset;
433
434         might_sleep();
435
436         offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
437         b43_write32(dev, mmio_base + offset, 0);
438         for (i = 0; i < 10; i++) {
439                 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
440                                                    B43_DMA32_RXSTATUS;
441                 value = b43_read32(dev, mmio_base + offset);
442                 if (type == B43_DMA_64BIT) {
443                         value &= B43_DMA64_RXSTAT;
444                         if (value == B43_DMA64_RXSTAT_DISABLED) {
445                                 i = -1;
446                                 break;
447                         }
448                 } else {
449                         value &= B43_DMA32_RXSTATE;
450                         if (value == B43_DMA32_RXSTAT_DISABLED) {
451                                 i = -1;
452                                 break;
453                         }
454                 }
455                 msleep(1);
456         }
457         if (i != -1) {
458                 b43err(dev->wl, "DMA RX reset timed out\n");
459                 return -ENODEV;
460         }
461
462         return 0;
463 }
464
465 /* Reset the TX DMA channel */
466 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
467                                       enum b43_dmatype type)
468 {
469         int i;
470         u32 value;
471         u16 offset;
472
473         might_sleep();
474
475         for (i = 0; i < 10; i++) {
476                 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
477                                                    B43_DMA32_TXSTATUS;
478                 value = b43_read32(dev, mmio_base + offset);
479                 if (type == B43_DMA_64BIT) {
480                         value &= B43_DMA64_TXSTAT;
481                         if (value == B43_DMA64_TXSTAT_DISABLED ||
482                             value == B43_DMA64_TXSTAT_IDLEWAIT ||
483                             value == B43_DMA64_TXSTAT_STOPPED)
484                                 break;
485                 } else {
486                         value &= B43_DMA32_TXSTATE;
487                         if (value == B43_DMA32_TXSTAT_DISABLED ||
488                             value == B43_DMA32_TXSTAT_IDLEWAIT ||
489                             value == B43_DMA32_TXSTAT_STOPPED)
490                                 break;
491                 }
492                 msleep(1);
493         }
494         offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
495         b43_write32(dev, mmio_base + offset, 0);
496         for (i = 0; i < 10; i++) {
497                 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
498                                                    B43_DMA32_TXSTATUS;
499                 value = b43_read32(dev, mmio_base + offset);
500                 if (type == B43_DMA_64BIT) {
501                         value &= B43_DMA64_TXSTAT;
502                         if (value == B43_DMA64_TXSTAT_DISABLED) {
503                                 i = -1;
504                                 break;
505                         }
506                 } else {
507                         value &= B43_DMA32_TXSTATE;
508                         if (value == B43_DMA32_TXSTAT_DISABLED) {
509                                 i = -1;
510                                 break;
511                         }
512                 }
513                 msleep(1);
514         }
515         if (i != -1) {
516                 b43err(dev->wl, "DMA TX reset timed out\n");
517                 return -ENODEV;
518         }
519         /* ensure the reset is completed. */
520         msleep(1);
521
522         return 0;
523 }
524
525 /* Check if a DMA mapping address is invalid. */
526 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
527                                   dma_addr_t addr,
528                                   size_t buffersize, bool dma_to_device)
529 {
530         if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
531                 return 1;
532
533         switch (ring->type) {
534         case B43_DMA_30BIT:
535                 if ((u64)addr + buffersize > (1ULL << 30))
536                         goto address_error;
537                 break;
538         case B43_DMA_32BIT:
539                 if ((u64)addr + buffersize > (1ULL << 32))
540                         goto address_error;
541                 break;
542         case B43_DMA_64BIT:
543                 /* Currently we can't have addresses beyond
544                  * 64bit in the kernel. */
545                 break;
546         }
547
548         /* The address is OK. */
549         return 0;
550
551 address_error:
552         /* We can't support this address. Unmap it again. */
553         unmap_descbuffer(ring, addr, buffersize, dma_to_device);
554
555         return 1;
556 }
557
558 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
559 {
560         unsigned char *f = skb->data + ring->frameoffset;
561
562         return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
563 }
564
565 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
566 {
567         struct b43_rxhdr_fw4 *rxhdr;
568         unsigned char *frame;
569
570         /* This poisons the RX buffer to detect DMA failures. */
571
572         rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
573         rxhdr->frame_len = 0;
574
575         B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
576         frame = skb->data + ring->frameoffset;
577         memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
578 }
579
580 static int setup_rx_descbuffer(struct b43_dmaring *ring,
581                                struct b43_dmadesc_generic *desc,
582                                struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
583 {
584         dma_addr_t dmaaddr;
585         struct sk_buff *skb;
586
587         B43_WARN_ON(ring->tx);
588
589         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
590         if (unlikely(!skb))
591                 return -ENOMEM;
592         b43_poison_rx_buffer(ring, skb);
593         dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
594         if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
595                 /* ugh. try to realloc in zone_dma */
596                 gfp_flags |= GFP_DMA;
597
598                 dev_kfree_skb_any(skb);
599
600                 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
601                 if (unlikely(!skb))
602                         return -ENOMEM;
603                 b43_poison_rx_buffer(ring, skb);
604                 dmaaddr = map_descbuffer(ring, skb->data,
605                                          ring->rx_buffersize, 0);
606                 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
607                         b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
608                         dev_kfree_skb_any(skb);
609                         return -EIO;
610                 }
611         }
612
613         meta->skb = skb;
614         meta->dmaaddr = dmaaddr;
615         ring->ops->fill_descriptor(ring, desc, dmaaddr,
616                                    ring->rx_buffersize, 0, 0, 0);
617
618         return 0;
619 }
620
621 /* Allocate the initial descbuffers.
622  * This is used for an RX ring only.
623  */
624 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
625 {
626         int i, err = -ENOMEM;
627         struct b43_dmadesc_generic *desc;
628         struct b43_dmadesc_meta *meta;
629
630         for (i = 0; i < ring->nr_slots; i++) {
631                 desc = ring->ops->idx2desc(ring, i, &meta);
632
633                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
634                 if (err) {
635                         b43err(ring->dev->wl,
636                                "Failed to allocate initial descbuffers\n");
637                         goto err_unwind;
638                 }
639         }
640         mb();
641         ring->used_slots = ring->nr_slots;
642         err = 0;
643       out:
644         return err;
645
646       err_unwind:
647         for (i--; i >= 0; i--) {
648                 desc = ring->ops->idx2desc(ring, i, &meta);
649
650                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
651                 dev_kfree_skb(meta->skb);
652         }
653         goto out;
654 }
655
656 /* Do initial setup of the DMA controller.
657  * Reset the controller, write the ring busaddress
658  * and switch the "enable" bit on.
659  */
660 static int dmacontroller_setup(struct b43_dmaring *ring)
661 {
662         int err = 0;
663         u32 value;
664         u32 addrext;
665         u32 trans = ssb_dma_translation(ring->dev->dev);
666
667         if (ring->tx) {
668                 if (ring->type == B43_DMA_64BIT) {
669                         u64 ringbase = (u64) (ring->dmabase);
670
671                         addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
672                             >> SSB_DMA_TRANSLATION_SHIFT;
673                         value = B43_DMA64_TXENABLE;
674                         value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
675                             & B43_DMA64_TXADDREXT_MASK;
676                         b43_dma_write(ring, B43_DMA64_TXCTL, value);
677                         b43_dma_write(ring, B43_DMA64_TXRINGLO,
678                                       (ringbase & 0xFFFFFFFF));
679                         b43_dma_write(ring, B43_DMA64_TXRINGHI,
680                                       ((ringbase >> 32) &
681                                        ~SSB_DMA_TRANSLATION_MASK)
682                                       | (trans << 1));
683                 } else {
684                         u32 ringbase = (u32) (ring->dmabase);
685
686                         addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
687                             >> SSB_DMA_TRANSLATION_SHIFT;
688                         value = B43_DMA32_TXENABLE;
689                         value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
690                             & B43_DMA32_TXADDREXT_MASK;
691                         b43_dma_write(ring, B43_DMA32_TXCTL, value);
692                         b43_dma_write(ring, B43_DMA32_TXRING,
693                                       (ringbase & ~SSB_DMA_TRANSLATION_MASK)
694                                       | trans);
695                 }
696         } else {
697                 err = alloc_initial_descbuffers(ring);
698                 if (err)
699                         goto out;
700                 if (ring->type == B43_DMA_64BIT) {
701                         u64 ringbase = (u64) (ring->dmabase);
702
703                         addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
704                             >> SSB_DMA_TRANSLATION_SHIFT;
705                         value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
706                         value |= B43_DMA64_RXENABLE;
707                         value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
708                             & B43_DMA64_RXADDREXT_MASK;
709                         b43_dma_write(ring, B43_DMA64_RXCTL, value);
710                         b43_dma_write(ring, B43_DMA64_RXRINGLO,
711                                       (ringbase & 0xFFFFFFFF));
712                         b43_dma_write(ring, B43_DMA64_RXRINGHI,
713                                       ((ringbase >> 32) &
714                                        ~SSB_DMA_TRANSLATION_MASK)
715                                       | (trans << 1));
716                         b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
717                                       sizeof(struct b43_dmadesc64));
718                 } else {
719                         u32 ringbase = (u32) (ring->dmabase);
720
721                         addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
722                             >> SSB_DMA_TRANSLATION_SHIFT;
723                         value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
724                         value |= B43_DMA32_RXENABLE;
725                         value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
726                             & B43_DMA32_RXADDREXT_MASK;
727                         b43_dma_write(ring, B43_DMA32_RXCTL, value);
728                         b43_dma_write(ring, B43_DMA32_RXRING,
729                                       (ringbase & ~SSB_DMA_TRANSLATION_MASK)
730                                       | trans);
731                         b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
732                                       sizeof(struct b43_dmadesc32));
733                 }
734         }
735
736 out:
737         return err;
738 }
739
740 /* Shutdown the DMA controller. */
741 static void dmacontroller_cleanup(struct b43_dmaring *ring)
742 {
743         if (ring->tx) {
744                 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
745                                            ring->type);
746                 if (ring->type == B43_DMA_64BIT) {
747                         b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
748                         b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
749                 } else
750                         b43_dma_write(ring, B43_DMA32_TXRING, 0);
751         } else {
752                 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
753                                            ring->type);
754                 if (ring->type == B43_DMA_64BIT) {
755                         b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
756                         b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
757                 } else
758                         b43_dma_write(ring, B43_DMA32_RXRING, 0);
759         }
760 }
761
762 static void free_all_descbuffers(struct b43_dmaring *ring)
763 {
764         struct b43_dmadesc_generic *desc;
765         struct b43_dmadesc_meta *meta;
766         int i;
767
768         if (!ring->used_slots)
769                 return;
770         for (i = 0; i < ring->nr_slots; i++) {
771                 desc = ring->ops->idx2desc(ring, i, &meta);
772
773                 if (!meta->skb) {
774                         B43_WARN_ON(!ring->tx);
775                         continue;
776                 }
777                 if (ring->tx) {
778                         unmap_descbuffer(ring, meta->dmaaddr,
779                                          meta->skb->len, 1);
780                 } else {
781                         unmap_descbuffer(ring, meta->dmaaddr,
782                                          ring->rx_buffersize, 0);
783                 }
784                 free_descriptor_buffer(ring, meta);
785         }
786 }
787
788 static u64 supported_dma_mask(struct b43_wldev *dev)
789 {
790         u32 tmp;
791         u16 mmio_base;
792
793         tmp = b43_read32(dev, SSB_TMSHIGH);
794         if (tmp & SSB_TMSHIGH_DMA64)
795                 return DMA_BIT_MASK(64);
796         mmio_base = b43_dmacontroller_base(0, 0);
797         b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
798         tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
799         if (tmp & B43_DMA32_TXADDREXT_MASK)
800                 return DMA_BIT_MASK(32);
801
802         return DMA_BIT_MASK(30);
803 }
804
805 static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
806 {
807         if (dmamask == DMA_BIT_MASK(30))
808                 return B43_DMA_30BIT;
809         if (dmamask == DMA_BIT_MASK(32))
810                 return B43_DMA_32BIT;
811         if (dmamask == DMA_BIT_MASK(64))
812                 return B43_DMA_64BIT;
813         B43_WARN_ON(1);
814         return B43_DMA_30BIT;
815 }
816
817 /* Main initialization function. */
818 static
819 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
820                                       int controller_index,
821                                       int for_tx,
822                                       enum b43_dmatype type)
823 {
824         struct b43_dmaring *ring;
825         int err;
826         dma_addr_t dma_test;
827
828         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
829         if (!ring)
830                 goto out;
831
832         ring->nr_slots = B43_RXRING_SLOTS;
833         if (for_tx)
834                 ring->nr_slots = B43_TXRING_SLOTS;
835
836         ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
837                              GFP_KERNEL);
838         if (!ring->meta)
839                 goto err_kfree_ring;
840
841         ring->type = type;
842         ring->dev = dev;
843         ring->mmio_base = b43_dmacontroller_base(type, controller_index);
844         ring->index = controller_index;
845         if (type == B43_DMA_64BIT)
846                 ring->ops = &dma64_ops;
847         else
848                 ring->ops = &dma32_ops;
849         if (for_tx) {
850                 ring->tx = 1;
851                 ring->current_slot = -1;
852         } else {
853                 if (ring->index == 0) {
854                         ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
855                         ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
856                 } else
857                         B43_WARN_ON(1);
858         }
859         spin_lock_init(&ring->lock);
860 #ifdef CONFIG_B43_DEBUG
861         ring->last_injected_overflow = jiffies;
862 #endif
863
864         if (for_tx) {
865                 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
866                 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
867
868                 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
869                                             b43_txhdr_size(dev),
870                                             GFP_KERNEL);
871                 if (!ring->txhdr_cache)
872                         goto err_kfree_meta;
873
874                 /* test for ability to dma to txhdr_cache */
875                 dma_test = ssb_dma_map_single(dev->dev,
876                                               ring->txhdr_cache,
877                                               b43_txhdr_size(dev),
878                                               DMA_TO_DEVICE);
879
880                 if (b43_dma_mapping_error(ring, dma_test,
881                                           b43_txhdr_size(dev), 1)) {
882                         /* ugh realloc */
883                         kfree(ring->txhdr_cache);
884                         ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
885                                                     b43_txhdr_size(dev),
886                                                     GFP_KERNEL | GFP_DMA);
887                         if (!ring->txhdr_cache)
888                                 goto err_kfree_meta;
889
890                         dma_test = ssb_dma_map_single(dev->dev,
891                                                       ring->txhdr_cache,
892                                                       b43_txhdr_size(dev),
893                                                       DMA_TO_DEVICE);
894
895                         if (b43_dma_mapping_error(ring, dma_test,
896                                                   b43_txhdr_size(dev), 1)) {
897
898                                 b43err(dev->wl,
899                                        "TXHDR DMA allocation failed\n");
900                                 goto err_kfree_txhdr_cache;
901                         }
902                 }
903
904                 ssb_dma_unmap_single(dev->dev,
905                                      dma_test, b43_txhdr_size(dev),
906                                      DMA_TO_DEVICE);
907         }
908
909         err = alloc_ringmemory(ring);
910         if (err)
911                 goto err_kfree_txhdr_cache;
912         err = dmacontroller_setup(ring);
913         if (err)
914                 goto err_free_ringmemory;
915
916       out:
917         return ring;
918
919       err_free_ringmemory:
920         free_ringmemory(ring);
921       err_kfree_txhdr_cache:
922         kfree(ring->txhdr_cache);
923       err_kfree_meta:
924         kfree(ring->meta);
925       err_kfree_ring:
926         kfree(ring);
927         ring = NULL;
928         goto out;
929 }
930
931 #define divide(a, b)    ({      \
932         typeof(a) __a = a;      \
933         do_div(__a, b);         \
934         __a;                    \
935   })
936
937 #define modulo(a, b)    ({      \
938         typeof(a) __a = a;      \
939         do_div(__a, b);         \
940   })
941
942 /* Main cleanup function. */
943 static void b43_destroy_dmaring(struct b43_dmaring *ring,
944                                 const char *ringname)
945 {
946         if (!ring)
947                 return;
948
949 #ifdef CONFIG_B43_DEBUG
950         {
951                 /* Print some statistics. */
952                 u64 failed_packets = ring->nr_failed_tx_packets;
953                 u64 succeed_packets = ring->nr_succeed_tx_packets;
954                 u64 nr_packets = failed_packets + succeed_packets;
955                 u64 permille_failed = 0, average_tries = 0;
956
957                 if (nr_packets)
958                         permille_failed = divide(failed_packets * 1000, nr_packets);
959                 if (nr_packets)
960                         average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
961
962                 b43dbg(ring->dev->wl, "DMA-%u %s: "
963                        "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
964                        "Average tries %llu.%02llu\n",
965                        (unsigned int)(ring->type), ringname,
966                        ring->max_used_slots,
967                        ring->nr_slots,
968                        (unsigned long long)failed_packets,
969                        (unsigned long long)nr_packets,
970                        (unsigned long long)divide(permille_failed, 10),
971                        (unsigned long long)modulo(permille_failed, 10),
972                        (unsigned long long)divide(average_tries, 100),
973                        (unsigned long long)modulo(average_tries, 100));
974         }
975 #endif /* DEBUG */
976
977         /* Device IRQs are disabled prior entering this function,
978          * so no need to take care of concurrency with rx handler stuff.
979          */
980         dmacontroller_cleanup(ring);
981         free_all_descbuffers(ring);
982         free_ringmemory(ring);
983
984         kfree(ring->txhdr_cache);
985         kfree(ring->meta);
986         kfree(ring);
987 }
988
989 #define destroy_ring(dma, ring) do {                            \
990         b43_destroy_dmaring((dma)->ring, __stringify(ring));    \
991         (dma)->ring = NULL;                                     \
992     } while (0)
993
994 void b43_dma_free(struct b43_wldev *dev)
995 {
996         struct b43_dma *dma;
997
998         if (b43_using_pio_transfers(dev))
999                 return;
1000         dma = &dev->dma;
1001
1002         destroy_ring(dma, rx_ring);
1003         destroy_ring(dma, tx_ring_AC_BK);
1004         destroy_ring(dma, tx_ring_AC_BE);
1005         destroy_ring(dma, tx_ring_AC_VI);
1006         destroy_ring(dma, tx_ring_AC_VO);
1007         destroy_ring(dma, tx_ring_mcast);
1008 }
1009
1010 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1011 {
1012         u64 orig_mask = mask;
1013         bool fallback = 0;
1014         int err;
1015
1016         /* Try to set the DMA mask. If it fails, try falling back to a
1017          * lower mask, as we can always also support a lower one. */
1018         while (1) {
1019                 err = ssb_dma_set_mask(dev->dev, mask);
1020                 if (!err)
1021                         break;
1022                 if (mask == DMA_BIT_MASK(64)) {
1023                         mask = DMA_BIT_MASK(32);
1024                         fallback = 1;
1025                         continue;
1026                 }
1027                 if (mask == DMA_BIT_MASK(32)) {
1028                         mask = DMA_BIT_MASK(30);
1029                         fallback = 1;
1030                         continue;
1031                 }
1032                 b43err(dev->wl, "The machine/kernel does not support "
1033                        "the required %u-bit DMA mask\n",
1034                        (unsigned int)dma_mask_to_engine_type(orig_mask));
1035                 return -EOPNOTSUPP;
1036         }
1037         if (fallback) {
1038                 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1039                         (unsigned int)dma_mask_to_engine_type(orig_mask),
1040                         (unsigned int)dma_mask_to_engine_type(mask));
1041         }
1042
1043         return 0;
1044 }
1045
1046 int b43_dma_init(struct b43_wldev *dev)
1047 {
1048         struct b43_dma *dma = &dev->dma;
1049         int err;
1050         u64 dmamask;
1051         enum b43_dmatype type;
1052
1053         dmamask = supported_dma_mask(dev);
1054         type = dma_mask_to_engine_type(dmamask);
1055         err = b43_dma_set_mask(dev, dmamask);
1056         if (err)
1057                 return err;
1058
1059         err = -ENOMEM;
1060         /* setup TX DMA channels. */
1061         dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1062         if (!dma->tx_ring_AC_BK)
1063                 goto out;
1064
1065         dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1066         if (!dma->tx_ring_AC_BE)
1067                 goto err_destroy_bk;
1068
1069         dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1070         if (!dma->tx_ring_AC_VI)
1071                 goto err_destroy_be;
1072
1073         dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1074         if (!dma->tx_ring_AC_VO)
1075                 goto err_destroy_vi;
1076
1077         dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1078         if (!dma->tx_ring_mcast)
1079                 goto err_destroy_vo;
1080
1081         /* setup RX DMA channel. */
1082         dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1083         if (!dma->rx_ring)
1084                 goto err_destroy_mcast;
1085
1086         /* No support for the TX status DMA ring. */
1087         B43_WARN_ON(dev->dev->id.revision < 5);
1088
1089         b43dbg(dev->wl, "%u-bit DMA initialized\n",
1090                (unsigned int)type);
1091         err = 0;
1092 out:
1093         return err;
1094
1095 err_destroy_mcast:
1096         destroy_ring(dma, tx_ring_mcast);
1097 err_destroy_vo:
1098         destroy_ring(dma, tx_ring_AC_VO);
1099 err_destroy_vi:
1100         destroy_ring(dma, tx_ring_AC_VI);
1101 err_destroy_be:
1102         destroy_ring(dma, tx_ring_AC_BE);
1103 err_destroy_bk:
1104         destroy_ring(dma, tx_ring_AC_BK);
1105         return err;
1106 }
1107
1108 /* Generate a cookie for the TX header. */
1109 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1110 {
1111         u16 cookie;
1112
1113         /* Use the upper 4 bits of the cookie as
1114          * DMA controller ID and store the slot number
1115          * in the lower 12 bits.
1116          * Note that the cookie must never be 0, as this
1117          * is a special value used in RX path.
1118          * It can also not be 0xFFFF because that is special
1119          * for multicast frames.
1120          */
1121         cookie = (((u16)ring->index + 1) << 12);
1122         B43_WARN_ON(slot & ~0x0FFF);
1123         cookie |= (u16)slot;
1124
1125         return cookie;
1126 }
1127
1128 /* Inspect a cookie and find out to which controller/slot it belongs. */
1129 static
1130 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1131 {
1132         struct b43_dma *dma = &dev->dma;
1133         struct b43_dmaring *ring = NULL;
1134
1135         switch (cookie & 0xF000) {
1136         case 0x1000:
1137                 ring = dma->tx_ring_AC_BK;
1138                 break;
1139         case 0x2000:
1140                 ring = dma->tx_ring_AC_BE;
1141                 break;
1142         case 0x3000:
1143                 ring = dma->tx_ring_AC_VI;
1144                 break;
1145         case 0x4000:
1146                 ring = dma->tx_ring_AC_VO;
1147                 break;
1148         case 0x5000:
1149                 ring = dma->tx_ring_mcast;
1150                 break;
1151         default:
1152                 B43_WARN_ON(1);
1153         }
1154         *slot = (cookie & 0x0FFF);
1155         B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1156
1157         return ring;
1158 }
1159
1160 static int dma_tx_fragment(struct b43_dmaring *ring,
1161                            struct sk_buff *skb)
1162 {
1163         const struct b43_dma_ops *ops = ring->ops;
1164         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1165         u8 *header;
1166         int slot, old_top_slot, old_used_slots;
1167         int err;
1168         struct b43_dmadesc_generic *desc;
1169         struct b43_dmadesc_meta *meta;
1170         struct b43_dmadesc_meta *meta_hdr;
1171         struct sk_buff *bounce_skb;
1172         u16 cookie;
1173         size_t hdrsize = b43_txhdr_size(ring->dev);
1174
1175         /* Important note: If the number of used DMA slots per TX frame
1176          * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1177          * the file has to be updated, too!
1178          */
1179
1180         old_top_slot = ring->current_slot;
1181         old_used_slots = ring->used_slots;
1182
1183         /* Get a slot for the header. */
1184         slot = request_slot(ring);
1185         desc = ops->idx2desc(ring, slot, &meta_hdr);
1186         memset(meta_hdr, 0, sizeof(*meta_hdr));
1187
1188         header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1189         cookie = generate_cookie(ring, slot);
1190         err = b43_generate_txhdr(ring->dev, header,
1191                                  skb->data, skb->len, info, cookie);
1192         if (unlikely(err)) {
1193                 ring->current_slot = old_top_slot;
1194                 ring->used_slots = old_used_slots;
1195                 return err;
1196         }
1197
1198         meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1199                                            hdrsize, 1);
1200         if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1201                 ring->current_slot = old_top_slot;
1202                 ring->used_slots = old_used_slots;
1203                 return -EIO;
1204         }
1205         ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1206                              hdrsize, 1, 0, 0);
1207
1208         /* Get a slot for the payload. */
1209         slot = request_slot(ring);
1210         desc = ops->idx2desc(ring, slot, &meta);
1211         memset(meta, 0, sizeof(*meta));
1212
1213         meta->skb = skb;
1214         meta->is_last_fragment = 1;
1215
1216         meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1217         /* create a bounce buffer in zone_dma on mapping failure. */
1218         if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1219                 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1220                 if (!bounce_skb) {
1221                         ring->current_slot = old_top_slot;
1222                         ring->used_slots = old_used_slots;
1223                         err = -ENOMEM;
1224                         goto out_unmap_hdr;
1225                 }
1226
1227                 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1228                 dev_kfree_skb_any(skb);
1229                 skb = bounce_skb;
1230                 meta->skb = skb;
1231                 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1232                 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1233                         ring->current_slot = old_top_slot;
1234                         ring->used_slots = old_used_slots;
1235                         err = -EIO;
1236                         goto out_free_bounce;
1237                 }
1238         }
1239
1240         ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1241
1242         if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1243                 /* Tell the firmware about the cookie of the last
1244                  * mcast frame, so it can clear the more-data bit in it. */
1245                 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1246                                 B43_SHM_SH_MCASTCOOKIE, cookie);
1247         }
1248         /* Now transfer the whole frame. */
1249         wmb();
1250         ops->poke_tx(ring, next_slot(ring, slot));
1251         return 0;
1252
1253 out_free_bounce:
1254         dev_kfree_skb_any(skb);
1255 out_unmap_hdr:
1256         unmap_descbuffer(ring, meta_hdr->dmaaddr,
1257                          hdrsize, 1);
1258         return err;
1259 }
1260
1261 static inline int should_inject_overflow(struct b43_dmaring *ring)
1262 {
1263 #ifdef CONFIG_B43_DEBUG
1264         if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1265                 /* Check if we should inject another ringbuffer overflow
1266                  * to test handling of this situation in the stack. */
1267                 unsigned long next_overflow;
1268
1269                 next_overflow = ring->last_injected_overflow + HZ;
1270                 if (time_after(jiffies, next_overflow)) {
1271                         ring->last_injected_overflow = jiffies;
1272                         b43dbg(ring->dev->wl,
1273                                "Injecting TX ring overflow on "
1274                                "DMA controller %d\n", ring->index);
1275                         return 1;
1276                 }
1277         }
1278 #endif /* CONFIG_B43_DEBUG */
1279         return 0;
1280 }
1281
1282 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1283 static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1284                                                    u8 queue_prio)
1285 {
1286         struct b43_dmaring *ring;
1287
1288         if (dev->qos_enabled) {
1289                 /* 0 = highest priority */
1290                 switch (queue_prio) {
1291                 default:
1292                         B43_WARN_ON(1);
1293                         /* fallthrough */
1294                 case 0:
1295                         ring = dev->dma.tx_ring_AC_VO;
1296                         break;
1297                 case 1:
1298                         ring = dev->dma.tx_ring_AC_VI;
1299                         break;
1300                 case 2:
1301                         ring = dev->dma.tx_ring_AC_BE;
1302                         break;
1303                 case 3:
1304                         ring = dev->dma.tx_ring_AC_BK;
1305                         break;
1306                 }
1307         } else
1308                 ring = dev->dma.tx_ring_AC_BE;
1309
1310         return ring;
1311 }
1312
1313 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1314 {
1315         struct b43_dmaring *ring;
1316         struct ieee80211_hdr *hdr;
1317         int err = 0;
1318         unsigned long flags;
1319         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1320
1321         hdr = (struct ieee80211_hdr *)skb->data;
1322         if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1323                 /* The multicast ring will be sent after the DTIM */
1324                 ring = dev->dma.tx_ring_mcast;
1325                 /* Set the more-data bit. Ucode will clear it on
1326                  * the last frame for us. */
1327                 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1328         } else {
1329                 /* Decide by priority where to put this frame. */
1330                 ring = select_ring_by_priority(
1331                         dev, skb_get_queue_mapping(skb));
1332         }
1333
1334         spin_lock_irqsave(&ring->lock, flags);
1335
1336         B43_WARN_ON(!ring->tx);
1337         /* Check if the queue was stopped in mac80211,
1338          * but we got called nevertheless.
1339          * That would be a mac80211 bug. */
1340         B43_WARN_ON(ring->stopped);
1341
1342         if (unlikely(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
1343                 b43warn(dev->wl, "DMA queue overflow\n");
1344                 err = -ENOSPC;
1345                 goto out_unlock;
1346         }
1347
1348         /* Assign the queue number to the ring (if not already done before)
1349          * so TX status handling can use it. The queue to ring mapping is
1350          * static, so we don't need to store it per frame. */
1351         ring->queue_prio = skb_get_queue_mapping(skb);
1352
1353         err = dma_tx_fragment(ring, skb);
1354         if (unlikely(err == -ENOKEY)) {
1355                 /* Drop this packet, as we don't have the encryption key
1356                  * anymore and must not transmit it unencrypted. */
1357                 dev_kfree_skb_any(skb);
1358                 err = 0;
1359                 goto out_unlock;
1360         }
1361         if (unlikely(err)) {
1362                 b43err(dev->wl, "DMA tx mapping failure\n");
1363                 goto out_unlock;
1364         }
1365         ring->nr_tx_packets++;
1366         if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1367             should_inject_overflow(ring)) {
1368                 /* This TX ring is full. */
1369                 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
1370                 ring->stopped = 1;
1371                 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1372                         b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1373                 }
1374         }
1375 out_unlock:
1376         spin_unlock_irqrestore(&ring->lock, flags);
1377
1378         return err;
1379 }
1380
1381 /* Called with IRQs disabled. */
1382 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1383                              const struct b43_txstatus *status)
1384 {
1385         const struct b43_dma_ops *ops;
1386         struct b43_dmaring *ring;
1387         struct b43_dmadesc_generic *desc;
1388         struct b43_dmadesc_meta *meta;
1389         int slot;
1390         bool frame_succeed;
1391
1392         ring = parse_cookie(dev, status->cookie, &slot);
1393         if (unlikely(!ring))
1394                 return;
1395
1396         spin_lock(&ring->lock); /* IRQs are already disabled. */
1397
1398         B43_WARN_ON(!ring->tx);
1399         ops = ring->ops;
1400         while (1) {
1401                 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1402                 desc = ops->idx2desc(ring, slot, &meta);
1403
1404                 if (meta->skb)
1405                         unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1406                                          1);
1407                 else
1408                         unmap_descbuffer(ring, meta->dmaaddr,
1409                                          b43_txhdr_size(dev), 1);
1410
1411                 if (meta->is_last_fragment) {
1412                         struct ieee80211_tx_info *info;
1413
1414                         BUG_ON(!meta->skb);
1415
1416                         info = IEEE80211_SKB_CB(meta->skb);
1417
1418                         /*
1419                          * Call back to inform the ieee80211 subsystem about
1420                          * the status of the transmission.
1421                          */
1422                         frame_succeed = b43_fill_txstatus_report(dev, info, status);
1423 #ifdef CONFIG_B43_DEBUG
1424                         if (frame_succeed)
1425                                 ring->nr_succeed_tx_packets++;
1426                         else
1427                                 ring->nr_failed_tx_packets++;
1428                         ring->nr_total_packet_tries += status->frame_count;
1429 #endif /* DEBUG */
1430                         ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1431
1432                         /* skb is freed by ieee80211_tx_status_irqsafe() */
1433                         meta->skb = NULL;
1434                 } else {
1435                         /* No need to call free_descriptor_buffer here, as
1436                          * this is only the txhdr, which is not allocated.
1437                          */
1438                         B43_WARN_ON(meta->skb);
1439                 }
1440
1441                 /* Everything unmapped and free'd. So it's not used anymore. */
1442                 ring->used_slots--;
1443
1444                 if (meta->is_last_fragment)
1445                         break;
1446                 slot = next_slot(ring, slot);
1447         }
1448         if (ring->stopped) {
1449                 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1450                 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1451                 ring->stopped = 0;
1452                 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1453                         b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1454                 }
1455         }
1456
1457         spin_unlock(&ring->lock);
1458 }
1459
1460 void b43_dma_get_tx_stats(struct b43_wldev *dev,
1461                           struct ieee80211_tx_queue_stats *stats)
1462 {
1463         const int nr_queues = dev->wl->hw->queues;
1464         struct b43_dmaring *ring;
1465         unsigned long flags;
1466         int i;
1467
1468         for (i = 0; i < nr_queues; i++) {
1469                 ring = select_ring_by_priority(dev, i);
1470
1471                 spin_lock_irqsave(&ring->lock, flags);
1472                 stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
1473                 stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
1474                 stats[i].count = ring->nr_tx_packets;
1475                 spin_unlock_irqrestore(&ring->lock, flags);
1476         }
1477 }
1478
1479 static void dma_rx(struct b43_dmaring *ring, int *slot)
1480 {
1481         const struct b43_dma_ops *ops = ring->ops;
1482         struct b43_dmadesc_generic *desc;
1483         struct b43_dmadesc_meta *meta;
1484         struct b43_rxhdr_fw4 *rxhdr;
1485         struct sk_buff *skb;
1486         u16 len;
1487         int err;
1488         dma_addr_t dmaaddr;
1489
1490         desc = ops->idx2desc(ring, *slot, &meta);
1491
1492         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1493         skb = meta->skb;
1494
1495         rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1496         len = le16_to_cpu(rxhdr->frame_len);
1497         if (len == 0) {
1498                 int i = 0;
1499
1500                 do {
1501                         udelay(2);
1502                         barrier();
1503                         len = le16_to_cpu(rxhdr->frame_len);
1504                 } while (len == 0 && i++ < 5);
1505                 if (unlikely(len == 0)) {
1506                         dmaaddr = meta->dmaaddr;
1507                         goto drop_recycle_buffer;
1508                 }
1509         }
1510         if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1511                 /* Something went wrong with the DMA.
1512                  * The device did not touch the buffer and did not overwrite the poison. */
1513                 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1514                 dmaaddr = meta->dmaaddr;
1515                 goto drop_recycle_buffer;
1516         }
1517         if (unlikely(len > ring->rx_buffersize)) {
1518                 /* The data did not fit into one descriptor buffer
1519                  * and is split over multiple buffers.
1520                  * This should never happen, as we try to allocate buffers
1521                  * big enough. So simply ignore this packet.
1522                  */
1523                 int cnt = 0;
1524                 s32 tmp = len;
1525
1526                 while (1) {
1527                         desc = ops->idx2desc(ring, *slot, &meta);
1528                         /* recycle the descriptor buffer. */
1529                         b43_poison_rx_buffer(ring, meta->skb);
1530                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1531                                                    ring->rx_buffersize);
1532                         *slot = next_slot(ring, *slot);
1533                         cnt++;
1534                         tmp -= ring->rx_buffersize;
1535                         if (tmp <= 0)
1536                                 break;
1537                 }
1538                 b43err(ring->dev->wl, "DMA RX buffer too small "
1539                        "(len: %u, buffer: %u, nr-dropped: %d)\n",
1540                        len, ring->rx_buffersize, cnt);
1541                 goto drop;
1542         }
1543
1544         dmaaddr = meta->dmaaddr;
1545         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1546         if (unlikely(err)) {
1547                 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1548                 goto drop_recycle_buffer;
1549         }
1550
1551         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1552         skb_put(skb, len + ring->frameoffset);
1553         skb_pull(skb, ring->frameoffset);
1554
1555         b43_rx(ring->dev, skb, rxhdr);
1556 drop:
1557         return;
1558
1559 drop_recycle_buffer:
1560         /* Poison and recycle the RX buffer. */
1561         b43_poison_rx_buffer(ring, skb);
1562         sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1563 }
1564
1565 void b43_dma_rx(struct b43_dmaring *ring)
1566 {
1567         const struct b43_dma_ops *ops = ring->ops;
1568         int slot, current_slot;
1569         int used_slots = 0;
1570
1571         B43_WARN_ON(ring->tx);
1572         current_slot = ops->get_current_rxslot(ring);
1573         B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1574
1575         slot = ring->current_slot;
1576         for (; slot != current_slot; slot = next_slot(ring, slot)) {
1577                 dma_rx(ring, &slot);
1578                 update_max_used_slots(ring, ++used_slots);
1579         }
1580         ops->set_current_rxslot(ring, slot);
1581         ring->current_slot = slot;
1582 }
1583
1584 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1585 {
1586         unsigned long flags;
1587
1588         spin_lock_irqsave(&ring->lock, flags);
1589         B43_WARN_ON(!ring->tx);
1590         ring->ops->tx_suspend(ring);
1591         spin_unlock_irqrestore(&ring->lock, flags);
1592 }
1593
1594 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1595 {
1596         unsigned long flags;
1597
1598         spin_lock_irqsave(&ring->lock, flags);
1599         B43_WARN_ON(!ring->tx);
1600         ring->ops->tx_resume(ring);
1601         spin_unlock_irqrestore(&ring->lock, flags);
1602 }
1603
1604 void b43_dma_tx_suspend(struct b43_wldev *dev)
1605 {
1606         b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1607         b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1608         b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1609         b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1610         b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1611         b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1612 }
1613
1614 void b43_dma_tx_resume(struct b43_wldev *dev)
1615 {
1616         b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1617         b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1618         b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1619         b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1620         b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1621         b43_power_saving_ctl_bits(dev, 0);
1622 }
1623
1624 #ifdef CONFIG_B43_PIO
1625 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1626                            u16 mmio_base, bool enable)
1627 {
1628         u32 ctl;
1629
1630         if (type == B43_DMA_64BIT) {
1631                 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1632                 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1633                 if (enable)
1634                         ctl |= B43_DMA64_RXDIRECTFIFO;
1635                 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1636         } else {
1637                 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1638                 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1639                 if (enable)
1640                         ctl |= B43_DMA32_RXDIRECTFIFO;
1641                 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1642         }
1643 }
1644
1645 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1646  * This is called from PIO code, so DMA structures are not available. */
1647 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1648                             unsigned int engine_index, bool enable)
1649 {
1650         enum b43_dmatype type;
1651         u16 mmio_base;
1652
1653         type = dma_mask_to_engine_type(supported_dma_mask(dev));
1654
1655         mmio_base = b43_dmacontroller_base(type, engine_index);
1656         direct_fifo_rx(dev, type, mmio_base, enable);
1657 }
1658 #endif /* CONFIG_B43_PIO */