3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <asm/div64.h>
45 /* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48 #define TX_SLOTS_PER_FRAME 2
53 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
55 struct b43_dmadesc_meta **meta)
57 struct b43_dmadesc32 *desc;
59 *meta = &(ring->meta[slot]);
60 desc = ring->descbase;
63 return (struct b43_dmadesc_generic *)desc;
66 static void op32_fill_descriptor(struct b43_dmaring *ring,
67 struct b43_dmadesc_generic *desc,
68 dma_addr_t dmaaddr, u16 bufsize,
69 int start, int end, int irq)
71 struct b43_dmadesc32 *descbase = ring->descbase;
77 slot = (int)(&(desc->dma32) - descbase);
78 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
80 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
81 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
82 >> SSB_DMA_TRANSLATION_SHIFT;
83 addr |= ssb_dma_translation(ring->dev->dev);
84 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
85 if (slot == ring->nr_slots - 1)
86 ctl |= B43_DMA32_DCTL_DTABLEEND;
88 ctl |= B43_DMA32_DCTL_FRAMESTART;
90 ctl |= B43_DMA32_DCTL_FRAMEEND;
92 ctl |= B43_DMA32_DCTL_IRQ;
93 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
94 & B43_DMA32_DCTL_ADDREXT_MASK;
96 desc->dma32.control = cpu_to_le32(ctl);
97 desc->dma32.address = cpu_to_le32(addr);
100 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
102 b43_dma_write(ring, B43_DMA32_TXINDEX,
103 (u32) (slot * sizeof(struct b43_dmadesc32)));
106 static void op32_tx_suspend(struct b43_dmaring *ring)
108 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
109 | B43_DMA32_TXSUSPEND);
112 static void op32_tx_resume(struct b43_dmaring *ring)
114 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
115 & ~B43_DMA32_TXSUSPEND);
118 static int op32_get_current_rxslot(struct b43_dmaring *ring)
122 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
123 val &= B43_DMA32_RXDPTR;
125 return (val / sizeof(struct b43_dmadesc32));
128 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
130 b43_dma_write(ring, B43_DMA32_RXINDEX,
131 (u32) (slot * sizeof(struct b43_dmadesc32)));
134 static const struct b43_dma_ops dma32_ops = {
135 .idx2desc = op32_idx2desc,
136 .fill_descriptor = op32_fill_descriptor,
137 .poke_tx = op32_poke_tx,
138 .tx_suspend = op32_tx_suspend,
139 .tx_resume = op32_tx_resume,
140 .get_current_rxslot = op32_get_current_rxslot,
141 .set_current_rxslot = op32_set_current_rxslot,
146 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
148 struct b43_dmadesc_meta **meta)
150 struct b43_dmadesc64 *desc;
152 *meta = &(ring->meta[slot]);
153 desc = ring->descbase;
154 desc = &(desc[slot]);
156 return (struct b43_dmadesc_generic *)desc;
159 static void op64_fill_descriptor(struct b43_dmaring *ring,
160 struct b43_dmadesc_generic *desc,
161 dma_addr_t dmaaddr, u16 bufsize,
162 int start, int end, int irq)
164 struct b43_dmadesc64 *descbase = ring->descbase;
166 u32 ctl0 = 0, ctl1 = 0;
170 slot = (int)(&(desc->dma64) - descbase);
171 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
173 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
174 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
175 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
176 >> SSB_DMA_TRANSLATION_SHIFT;
177 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
178 if (slot == ring->nr_slots - 1)
179 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
181 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
183 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
185 ctl0 |= B43_DMA64_DCTL0_IRQ;
186 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
187 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
188 & B43_DMA64_DCTL1_ADDREXT_MASK;
190 desc->dma64.control0 = cpu_to_le32(ctl0);
191 desc->dma64.control1 = cpu_to_le32(ctl1);
192 desc->dma64.address_low = cpu_to_le32(addrlo);
193 desc->dma64.address_high = cpu_to_le32(addrhi);
196 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
198 b43_dma_write(ring, B43_DMA64_TXINDEX,
199 (u32) (slot * sizeof(struct b43_dmadesc64)));
202 static void op64_tx_suspend(struct b43_dmaring *ring)
204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
205 | B43_DMA64_TXSUSPEND);
208 static void op64_tx_resume(struct b43_dmaring *ring)
210 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
211 & ~B43_DMA64_TXSUSPEND);
214 static int op64_get_current_rxslot(struct b43_dmaring *ring)
218 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
219 val &= B43_DMA64_RXSTATDPTR;
221 return (val / sizeof(struct b43_dmadesc64));
224 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
226 b43_dma_write(ring, B43_DMA64_RXINDEX,
227 (u32) (slot * sizeof(struct b43_dmadesc64)));
230 static const struct b43_dma_ops dma64_ops = {
231 .idx2desc = op64_idx2desc,
232 .fill_descriptor = op64_fill_descriptor,
233 .poke_tx = op64_poke_tx,
234 .tx_suspend = op64_tx_suspend,
235 .tx_resume = op64_tx_resume,
236 .get_current_rxslot = op64_get_current_rxslot,
237 .set_current_rxslot = op64_set_current_rxslot,
240 static inline int free_slots(struct b43_dmaring *ring)
242 return (ring->nr_slots - ring->used_slots);
245 static inline int next_slot(struct b43_dmaring *ring, int slot)
247 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
248 if (slot == ring->nr_slots - 1)
253 static inline int prev_slot(struct b43_dmaring *ring, int slot)
255 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
257 return ring->nr_slots - 1;
261 #ifdef CONFIG_B43_DEBUG
262 static void update_max_used_slots(struct b43_dmaring *ring,
263 int current_used_slots)
265 if (current_used_slots <= ring->max_used_slots)
267 ring->max_used_slots = current_used_slots;
268 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
269 b43dbg(ring->dev->wl,
270 "max_used_slots increased to %d on %s ring %d\n",
271 ring->max_used_slots,
272 ring->tx ? "TX" : "RX", ring->index);
277 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
282 /* Request a slot for usage. */
283 static inline int request_slot(struct b43_dmaring *ring)
287 B43_WARN_ON(!ring->tx);
288 B43_WARN_ON(ring->stopped);
289 B43_WARN_ON(free_slots(ring) == 0);
291 slot = next_slot(ring, ring->current_slot);
292 ring->current_slot = slot;
295 update_max_used_slots(ring, ring->used_slots);
300 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
302 static const u16 map64[] = {
303 B43_MMIO_DMA64_BASE0,
304 B43_MMIO_DMA64_BASE1,
305 B43_MMIO_DMA64_BASE2,
306 B43_MMIO_DMA64_BASE3,
307 B43_MMIO_DMA64_BASE4,
308 B43_MMIO_DMA64_BASE5,
310 static const u16 map32[] = {
311 B43_MMIO_DMA32_BASE0,
312 B43_MMIO_DMA32_BASE1,
313 B43_MMIO_DMA32_BASE2,
314 B43_MMIO_DMA32_BASE3,
315 B43_MMIO_DMA32_BASE4,
316 B43_MMIO_DMA32_BASE5,
319 if (type == B43_DMA_64BIT) {
320 B43_WARN_ON(!(controller_idx >= 0 &&
321 controller_idx < ARRAY_SIZE(map64)));
322 return map64[controller_idx];
324 B43_WARN_ON(!(controller_idx >= 0 &&
325 controller_idx < ARRAY_SIZE(map32)));
326 return map32[controller_idx];
330 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
331 unsigned char *buf, size_t len, int tx)
336 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
337 buf, len, DMA_TO_DEVICE);
339 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
340 buf, len, DMA_FROM_DEVICE);
347 void unmap_descbuffer(struct b43_dmaring *ring,
348 dma_addr_t addr, size_t len, int tx)
351 dma_unmap_single(ring->dev->dev->dma_dev,
352 addr, len, DMA_TO_DEVICE);
354 dma_unmap_single(ring->dev->dev->dma_dev,
355 addr, len, DMA_FROM_DEVICE);
360 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
361 dma_addr_t addr, size_t len)
363 B43_WARN_ON(ring->tx);
364 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
365 addr, len, DMA_FROM_DEVICE);
369 void sync_descbuffer_for_device(struct b43_dmaring *ring,
370 dma_addr_t addr, size_t len)
372 B43_WARN_ON(ring->tx);
373 dma_sync_single_for_device(ring->dev->dev->dma_dev,
374 addr, len, DMA_FROM_DEVICE);
378 void free_descriptor_buffer(struct b43_dmaring *ring,
379 struct b43_dmadesc_meta *meta)
382 dev_kfree_skb_any(meta->skb);
387 static int alloc_ringmemory(struct b43_dmaring *ring)
389 gfp_t flags = GFP_KERNEL;
391 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
392 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
393 * has shown that 4K is sufficient for the latter as long as the buffer
394 * does not cross an 8K boundary.
396 * For unknown reasons - possibly a hardware error - the BCM4311 rev
397 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
398 * which accounts for the GFP_DMA flag below.
400 * The flags here must match the flags in free_ringmemory below!
402 if (ring->type == B43_DMA_64BIT)
404 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
406 &(ring->dmabase), flags);
407 if (!ring->descbase) {
408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
411 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
416 static void free_ringmemory(struct b43_dmaring *ring)
418 gfp_t flags = GFP_KERNEL;
420 if (ring->type == B43_DMA_64BIT)
423 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
424 ring->descbase, ring->dmabase);
427 /* Reset the RX DMA channel */
428 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
429 enum b43_dmatype type)
437 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
438 b43_write32(dev, mmio_base + offset, 0);
439 for (i = 0; i < 10; i++) {
440 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
442 value = b43_read32(dev, mmio_base + offset);
443 if (type == B43_DMA_64BIT) {
444 value &= B43_DMA64_RXSTAT;
445 if (value == B43_DMA64_RXSTAT_DISABLED) {
450 value &= B43_DMA32_RXSTATE;
451 if (value == B43_DMA32_RXSTAT_DISABLED) {
459 b43err(dev->wl, "DMA RX reset timed out\n");
466 /* Reset the TX DMA channel */
467 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
468 enum b43_dmatype type)
476 for (i = 0; i < 10; i++) {
477 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
479 value = b43_read32(dev, mmio_base + offset);
480 if (type == B43_DMA_64BIT) {
481 value &= B43_DMA64_TXSTAT;
482 if (value == B43_DMA64_TXSTAT_DISABLED ||
483 value == B43_DMA64_TXSTAT_IDLEWAIT ||
484 value == B43_DMA64_TXSTAT_STOPPED)
487 value &= B43_DMA32_TXSTATE;
488 if (value == B43_DMA32_TXSTAT_DISABLED ||
489 value == B43_DMA32_TXSTAT_IDLEWAIT ||
490 value == B43_DMA32_TXSTAT_STOPPED)
495 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
496 b43_write32(dev, mmio_base + offset, 0);
497 for (i = 0; i < 10; i++) {
498 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
500 value = b43_read32(dev, mmio_base + offset);
501 if (type == B43_DMA_64BIT) {
502 value &= B43_DMA64_TXSTAT;
503 if (value == B43_DMA64_TXSTAT_DISABLED) {
508 value &= B43_DMA32_TXSTATE;
509 if (value == B43_DMA32_TXSTAT_DISABLED) {
517 b43err(dev->wl, "DMA TX reset timed out\n");
520 /* ensure the reset is completed. */
526 /* Check if a DMA mapping address is invalid. */
527 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
529 size_t buffersize, bool dma_to_device)
531 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
534 switch (ring->type) {
536 if ((u64)addr + buffersize > (1ULL << 30))
540 if ((u64)addr + buffersize > (1ULL << 32))
544 /* Currently we can't have addresses beyond
545 * 64bit in the kernel. */
549 /* The address is OK. */
553 /* We can't support this address. Unmap it again. */
554 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
559 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
561 unsigned char *f = skb->data + ring->frameoffset;
563 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
566 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
568 struct b43_rxhdr_fw4 *rxhdr;
569 unsigned char *frame;
571 /* This poisons the RX buffer to detect DMA failures. */
573 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
574 rxhdr->frame_len = 0;
576 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
577 frame = skb->data + ring->frameoffset;
578 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
581 static int setup_rx_descbuffer(struct b43_dmaring *ring,
582 struct b43_dmadesc_generic *desc,
583 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
588 B43_WARN_ON(ring->tx);
590 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
593 b43_poison_rx_buffer(ring, skb);
594 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
595 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
596 /* ugh. try to realloc in zone_dma */
597 gfp_flags |= GFP_DMA;
599 dev_kfree_skb_any(skb);
601 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
604 b43_poison_rx_buffer(ring, skb);
605 dmaaddr = map_descbuffer(ring, skb->data,
606 ring->rx_buffersize, 0);
607 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
608 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
609 dev_kfree_skb_any(skb);
615 meta->dmaaddr = dmaaddr;
616 ring->ops->fill_descriptor(ring, desc, dmaaddr,
617 ring->rx_buffersize, 0, 0, 0);
622 /* Allocate the initial descbuffers.
623 * This is used for an RX ring only.
625 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
627 int i, err = -ENOMEM;
628 struct b43_dmadesc_generic *desc;
629 struct b43_dmadesc_meta *meta;
631 for (i = 0; i < ring->nr_slots; i++) {
632 desc = ring->ops->idx2desc(ring, i, &meta);
634 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
636 b43err(ring->dev->wl,
637 "Failed to allocate initial descbuffers\n");
642 ring->used_slots = ring->nr_slots;
648 for (i--; i >= 0; i--) {
649 desc = ring->ops->idx2desc(ring, i, &meta);
651 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
652 dev_kfree_skb(meta->skb);
657 /* Do initial setup of the DMA controller.
658 * Reset the controller, write the ring busaddress
659 * and switch the "enable" bit on.
661 static int dmacontroller_setup(struct b43_dmaring *ring)
666 u32 trans = ssb_dma_translation(ring->dev->dev);
669 if (ring->type == B43_DMA_64BIT) {
670 u64 ringbase = (u64) (ring->dmabase);
672 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
673 >> SSB_DMA_TRANSLATION_SHIFT;
674 value = B43_DMA64_TXENABLE;
675 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
676 & B43_DMA64_TXADDREXT_MASK;
677 b43_dma_write(ring, B43_DMA64_TXCTL, value);
678 b43_dma_write(ring, B43_DMA64_TXRINGLO,
679 (ringbase & 0xFFFFFFFF));
680 b43_dma_write(ring, B43_DMA64_TXRINGHI,
682 ~SSB_DMA_TRANSLATION_MASK)
685 u32 ringbase = (u32) (ring->dmabase);
687 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
688 >> SSB_DMA_TRANSLATION_SHIFT;
689 value = B43_DMA32_TXENABLE;
690 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
691 & B43_DMA32_TXADDREXT_MASK;
692 b43_dma_write(ring, B43_DMA32_TXCTL, value);
693 b43_dma_write(ring, B43_DMA32_TXRING,
694 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
698 err = alloc_initial_descbuffers(ring);
701 if (ring->type == B43_DMA_64BIT) {
702 u64 ringbase = (u64) (ring->dmabase);
704 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
705 >> SSB_DMA_TRANSLATION_SHIFT;
706 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
707 value |= B43_DMA64_RXENABLE;
708 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
709 & B43_DMA64_RXADDREXT_MASK;
710 b43_dma_write(ring, B43_DMA64_RXCTL, value);
711 b43_dma_write(ring, B43_DMA64_RXRINGLO,
712 (ringbase & 0xFFFFFFFF));
713 b43_dma_write(ring, B43_DMA64_RXRINGHI,
715 ~SSB_DMA_TRANSLATION_MASK)
717 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
718 sizeof(struct b43_dmadesc64));
720 u32 ringbase = (u32) (ring->dmabase);
722 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
723 >> SSB_DMA_TRANSLATION_SHIFT;
724 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
725 value |= B43_DMA32_RXENABLE;
726 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
727 & B43_DMA32_RXADDREXT_MASK;
728 b43_dma_write(ring, B43_DMA32_RXCTL, value);
729 b43_dma_write(ring, B43_DMA32_RXRING,
730 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
732 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
733 sizeof(struct b43_dmadesc32));
741 /* Shutdown the DMA controller. */
742 static void dmacontroller_cleanup(struct b43_dmaring *ring)
745 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
747 if (ring->type == B43_DMA_64BIT) {
748 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
749 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
751 b43_dma_write(ring, B43_DMA32_TXRING, 0);
753 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
755 if (ring->type == B43_DMA_64BIT) {
756 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
757 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
759 b43_dma_write(ring, B43_DMA32_RXRING, 0);
763 static void free_all_descbuffers(struct b43_dmaring *ring)
765 struct b43_dmadesc_generic *desc;
766 struct b43_dmadesc_meta *meta;
769 if (!ring->used_slots)
771 for (i = 0; i < ring->nr_slots; i++) {
772 desc = ring->ops->idx2desc(ring, i, &meta);
774 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
775 B43_WARN_ON(!ring->tx);
779 unmap_descbuffer(ring, meta->dmaaddr,
782 unmap_descbuffer(ring, meta->dmaaddr,
783 ring->rx_buffersize, 0);
785 free_descriptor_buffer(ring, meta);
789 static u64 supported_dma_mask(struct b43_wldev *dev)
794 tmp = b43_read32(dev, SSB_TMSHIGH);
795 if (tmp & SSB_TMSHIGH_DMA64)
796 return DMA_BIT_MASK(64);
797 mmio_base = b43_dmacontroller_base(0, 0);
798 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
799 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
800 if (tmp & B43_DMA32_TXADDREXT_MASK)
801 return DMA_BIT_MASK(32);
803 return DMA_BIT_MASK(30);
806 static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
808 if (dmamask == DMA_BIT_MASK(30))
809 return B43_DMA_30BIT;
810 if (dmamask == DMA_BIT_MASK(32))
811 return B43_DMA_32BIT;
812 if (dmamask == DMA_BIT_MASK(64))
813 return B43_DMA_64BIT;
815 return B43_DMA_30BIT;
818 /* Main initialization function. */
820 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
821 int controller_index,
823 enum b43_dmatype type)
825 struct b43_dmaring *ring;
829 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
833 ring->nr_slots = B43_RXRING_SLOTS;
835 ring->nr_slots = B43_TXRING_SLOTS;
837 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
841 for (i = 0; i < ring->nr_slots; i++)
842 ring->meta->skb = B43_DMA_PTR_POISON;
846 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
847 ring->index = controller_index;
848 if (type == B43_DMA_64BIT)
849 ring->ops = &dma64_ops;
851 ring->ops = &dma32_ops;
854 ring->current_slot = -1;
856 if (ring->index == 0) {
857 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
858 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
862 #ifdef CONFIG_B43_DEBUG
863 ring->last_injected_overflow = jiffies;
867 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
868 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
870 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
873 if (!ring->txhdr_cache)
876 /* test for ability to dma to txhdr_cache */
877 dma_test = dma_map_single(dev->dev->dma_dev,
882 if (b43_dma_mapping_error(ring, dma_test,
883 b43_txhdr_size(dev), 1)) {
885 kfree(ring->txhdr_cache);
886 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
888 GFP_KERNEL | GFP_DMA);
889 if (!ring->txhdr_cache)
892 dma_test = dma_map_single(dev->dev->dma_dev,
897 if (b43_dma_mapping_error(ring, dma_test,
898 b43_txhdr_size(dev), 1)) {
901 "TXHDR DMA allocation failed\n");
902 goto err_kfree_txhdr_cache;
906 dma_unmap_single(dev->dev->dma_dev,
907 dma_test, b43_txhdr_size(dev),
911 err = alloc_ringmemory(ring);
913 goto err_kfree_txhdr_cache;
914 err = dmacontroller_setup(ring);
916 goto err_free_ringmemory;
922 free_ringmemory(ring);
923 err_kfree_txhdr_cache:
924 kfree(ring->txhdr_cache);
933 #define divide(a, b) ({ \
939 #define modulo(a, b) ({ \
944 /* Main cleanup function. */
945 static void b43_destroy_dmaring(struct b43_dmaring *ring,
946 const char *ringname)
951 #ifdef CONFIG_B43_DEBUG
953 /* Print some statistics. */
954 u64 failed_packets = ring->nr_failed_tx_packets;
955 u64 succeed_packets = ring->nr_succeed_tx_packets;
956 u64 nr_packets = failed_packets + succeed_packets;
957 u64 permille_failed = 0, average_tries = 0;
960 permille_failed = divide(failed_packets * 1000, nr_packets);
962 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
964 b43dbg(ring->dev->wl, "DMA-%u %s: "
965 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
966 "Average tries %llu.%02llu\n",
967 (unsigned int)(ring->type), ringname,
968 ring->max_used_slots,
970 (unsigned long long)failed_packets,
971 (unsigned long long)nr_packets,
972 (unsigned long long)divide(permille_failed, 10),
973 (unsigned long long)modulo(permille_failed, 10),
974 (unsigned long long)divide(average_tries, 100),
975 (unsigned long long)modulo(average_tries, 100));
979 /* Device IRQs are disabled prior entering this function,
980 * so no need to take care of concurrency with rx handler stuff.
982 dmacontroller_cleanup(ring);
983 free_all_descbuffers(ring);
984 free_ringmemory(ring);
986 kfree(ring->txhdr_cache);
991 #define destroy_ring(dma, ring) do { \
992 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
993 (dma)->ring = NULL; \
996 void b43_dma_free(struct b43_wldev *dev)
1000 if (b43_using_pio_transfers(dev))
1004 destroy_ring(dma, rx_ring);
1005 destroy_ring(dma, tx_ring_AC_BK);
1006 destroy_ring(dma, tx_ring_AC_BE);
1007 destroy_ring(dma, tx_ring_AC_VI);
1008 destroy_ring(dma, tx_ring_AC_VO);
1009 destroy_ring(dma, tx_ring_mcast);
1012 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1014 u64 orig_mask = mask;
1018 /* Try to set the DMA mask. If it fails, try falling back to a
1019 * lower mask, as we can always also support a lower one. */
1021 err = dma_set_mask(dev->dev->dma_dev, mask);
1023 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1027 if (mask == DMA_BIT_MASK(64)) {
1028 mask = DMA_BIT_MASK(32);
1032 if (mask == DMA_BIT_MASK(32)) {
1033 mask = DMA_BIT_MASK(30);
1037 b43err(dev->wl, "The machine/kernel does not support "
1038 "the required %u-bit DMA mask\n",
1039 (unsigned int)dma_mask_to_engine_type(orig_mask));
1043 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1044 (unsigned int)dma_mask_to_engine_type(orig_mask),
1045 (unsigned int)dma_mask_to_engine_type(mask));
1051 int b43_dma_init(struct b43_wldev *dev)
1053 struct b43_dma *dma = &dev->dma;
1056 enum b43_dmatype type;
1058 dmamask = supported_dma_mask(dev);
1059 type = dma_mask_to_engine_type(dmamask);
1060 err = b43_dma_set_mask(dev, dmamask);
1065 /* setup TX DMA channels. */
1066 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1067 if (!dma->tx_ring_AC_BK)
1070 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1071 if (!dma->tx_ring_AC_BE)
1072 goto err_destroy_bk;
1074 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1075 if (!dma->tx_ring_AC_VI)
1076 goto err_destroy_be;
1078 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1079 if (!dma->tx_ring_AC_VO)
1080 goto err_destroy_vi;
1082 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1083 if (!dma->tx_ring_mcast)
1084 goto err_destroy_vo;
1086 /* setup RX DMA channel. */
1087 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1089 goto err_destroy_mcast;
1091 /* No support for the TX status DMA ring. */
1092 B43_WARN_ON(dev->dev->id.revision < 5);
1094 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1095 (unsigned int)type);
1101 destroy_ring(dma, tx_ring_mcast);
1103 destroy_ring(dma, tx_ring_AC_VO);
1105 destroy_ring(dma, tx_ring_AC_VI);
1107 destroy_ring(dma, tx_ring_AC_BE);
1109 destroy_ring(dma, tx_ring_AC_BK);
1113 /* Generate a cookie for the TX header. */
1114 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1118 /* Use the upper 4 bits of the cookie as
1119 * DMA controller ID and store the slot number
1120 * in the lower 12 bits.
1121 * Note that the cookie must never be 0, as this
1122 * is a special value used in RX path.
1123 * It can also not be 0xFFFF because that is special
1124 * for multicast frames.
1126 cookie = (((u16)ring->index + 1) << 12);
1127 B43_WARN_ON(slot & ~0x0FFF);
1128 cookie |= (u16)slot;
1133 /* Inspect a cookie and find out to which controller/slot it belongs. */
1135 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1137 struct b43_dma *dma = &dev->dma;
1138 struct b43_dmaring *ring = NULL;
1140 switch (cookie & 0xF000) {
1142 ring = dma->tx_ring_AC_BK;
1145 ring = dma->tx_ring_AC_BE;
1148 ring = dma->tx_ring_AC_VI;
1151 ring = dma->tx_ring_AC_VO;
1154 ring = dma->tx_ring_mcast;
1157 *slot = (cookie & 0x0FFF);
1158 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1159 b43dbg(dev->wl, "TX-status contains "
1160 "invalid cookie: 0x%04X\n", cookie);
1167 static int dma_tx_fragment(struct b43_dmaring *ring,
1168 struct sk_buff *skb)
1170 const struct b43_dma_ops *ops = ring->ops;
1171 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1172 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1174 int slot, old_top_slot, old_used_slots;
1176 struct b43_dmadesc_generic *desc;
1177 struct b43_dmadesc_meta *meta;
1178 struct b43_dmadesc_meta *meta_hdr;
1180 size_t hdrsize = b43_txhdr_size(ring->dev);
1182 /* Important note: If the number of used DMA slots per TX frame
1183 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1184 * the file has to be updated, too!
1187 old_top_slot = ring->current_slot;
1188 old_used_slots = ring->used_slots;
1190 /* Get a slot for the header. */
1191 slot = request_slot(ring);
1192 desc = ops->idx2desc(ring, slot, &meta_hdr);
1193 memset(meta_hdr, 0, sizeof(*meta_hdr));
1195 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1196 cookie = generate_cookie(ring, slot);
1197 err = b43_generate_txhdr(ring->dev, header,
1199 if (unlikely(err)) {
1200 ring->current_slot = old_top_slot;
1201 ring->used_slots = old_used_slots;
1205 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1207 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1208 ring->current_slot = old_top_slot;
1209 ring->used_slots = old_used_slots;
1212 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1215 /* Get a slot for the payload. */
1216 slot = request_slot(ring);
1217 desc = ops->idx2desc(ring, slot, &meta);
1218 memset(meta, 0, sizeof(*meta));
1221 meta->is_last_fragment = 1;
1222 priv_info->bouncebuffer = NULL;
1224 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1225 /* create a bounce buffer in zone_dma on mapping failure. */
1226 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1227 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1228 GFP_ATOMIC | GFP_DMA);
1229 if (!priv_info->bouncebuffer) {
1230 ring->current_slot = old_top_slot;
1231 ring->used_slots = old_used_slots;
1236 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1237 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1238 kfree(priv_info->bouncebuffer);
1239 priv_info->bouncebuffer = NULL;
1240 ring->current_slot = old_top_slot;
1241 ring->used_slots = old_used_slots;
1247 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1249 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1250 /* Tell the firmware about the cookie of the last
1251 * mcast frame, so it can clear the more-data bit in it. */
1252 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1253 B43_SHM_SH_MCASTCOOKIE, cookie);
1255 /* Now transfer the whole frame. */
1257 ops->poke_tx(ring, next_slot(ring, slot));
1261 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1266 static inline int should_inject_overflow(struct b43_dmaring *ring)
1268 #ifdef CONFIG_B43_DEBUG
1269 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1270 /* Check if we should inject another ringbuffer overflow
1271 * to test handling of this situation in the stack. */
1272 unsigned long next_overflow;
1274 next_overflow = ring->last_injected_overflow + HZ;
1275 if (time_after(jiffies, next_overflow)) {
1276 ring->last_injected_overflow = jiffies;
1277 b43dbg(ring->dev->wl,
1278 "Injecting TX ring overflow on "
1279 "DMA controller %d\n", ring->index);
1283 #endif /* CONFIG_B43_DEBUG */
1287 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1288 static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1291 struct b43_dmaring *ring;
1293 if (dev->qos_enabled) {
1294 /* 0 = highest priority */
1295 switch (queue_prio) {
1300 ring = dev->dma.tx_ring_AC_VO;
1303 ring = dev->dma.tx_ring_AC_VI;
1306 ring = dev->dma.tx_ring_AC_BE;
1309 ring = dev->dma.tx_ring_AC_BK;
1313 ring = dev->dma.tx_ring_AC_BE;
1318 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1320 struct b43_dmaring *ring;
1321 struct ieee80211_hdr *hdr;
1323 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1325 hdr = (struct ieee80211_hdr *)skb->data;
1326 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1327 /* The multicast ring will be sent after the DTIM */
1328 ring = dev->dma.tx_ring_mcast;
1329 /* Set the more-data bit. Ucode will clear it on
1330 * the last frame for us. */
1331 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1333 /* Decide by priority where to put this frame. */
1334 ring = select_ring_by_priority(
1335 dev, skb_get_queue_mapping(skb));
1338 B43_WARN_ON(!ring->tx);
1340 if (unlikely(ring->stopped)) {
1341 /* We get here only because of a bug in mac80211.
1342 * Because of a race, one packet may be queued after
1343 * the queue is stopped, thus we got called when we shouldn't.
1344 * For now, just refuse the transmit. */
1345 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1346 b43err(dev->wl, "Packet after queue stopped\n");
1351 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1352 /* If we get here, we have a real error with the queue
1353 * full, but queues not stopped. */
1354 b43err(dev->wl, "DMA queue overflow\n");
1359 /* Assign the queue number to the ring (if not already done before)
1360 * so TX status handling can use it. The queue to ring mapping is
1361 * static, so we don't need to store it per frame. */
1362 ring->queue_prio = skb_get_queue_mapping(skb);
1364 err = dma_tx_fragment(ring, skb);
1365 if (unlikely(err == -ENOKEY)) {
1366 /* Drop this packet, as we don't have the encryption key
1367 * anymore and must not transmit it unencrypted. */
1368 dev_kfree_skb_any(skb);
1372 if (unlikely(err)) {
1373 b43err(dev->wl, "DMA tx mapping failure\n");
1376 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1377 should_inject_overflow(ring)) {
1378 /* This TX ring is full. */
1379 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
1381 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1382 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1390 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1391 const struct b43_txstatus *status)
1393 const struct b43_dma_ops *ops;
1394 struct b43_dmaring *ring;
1395 struct b43_dmadesc_generic *desc;
1396 struct b43_dmadesc_meta *meta;
1397 int slot, firstused;
1400 ring = parse_cookie(dev, status->cookie, &slot);
1401 if (unlikely(!ring))
1403 B43_WARN_ON(!ring->tx);
1405 /* Sanity check: TX packets are processed in-order on one ring.
1406 * Check if the slot deduced from the cookie really is the first
1408 firstused = ring->current_slot - ring->used_slots + 1;
1410 firstused = ring->nr_slots + firstused;
1411 if (unlikely(slot != firstused)) {
1412 /* This possibly is a firmware bug and will result in
1413 * malfunction, memory leaks and/or stall of DMA functionality. */
1414 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1415 "Expected %d, but got %d\n",
1416 ring->index, firstused, slot);
1422 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1423 desc = ops->idx2desc(ring, slot, &meta);
1425 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1426 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1428 slot, firstused, ring->index);
1432 struct b43_private_tx_info *priv_info =
1433 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1435 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1436 kfree(priv_info->bouncebuffer);
1437 priv_info->bouncebuffer = NULL;
1439 unmap_descbuffer(ring, meta->dmaaddr,
1440 b43_txhdr_size(dev), 1);
1443 if (meta->is_last_fragment) {
1444 struct ieee80211_tx_info *info;
1446 if (unlikely(!meta->skb)) {
1447 /* This is a scatter-gather fragment of a frame, so
1448 * the skb pointer must not be NULL. */
1449 b43dbg(dev->wl, "TX status unexpected NULL skb "
1450 "at slot %d (first=%d) on ring %d\n",
1451 slot, firstused, ring->index);
1455 info = IEEE80211_SKB_CB(meta->skb);
1458 * Call back to inform the ieee80211 subsystem about
1459 * the status of the transmission.
1461 frame_succeed = b43_fill_txstatus_report(dev, info, status);
1462 #ifdef CONFIG_B43_DEBUG
1464 ring->nr_succeed_tx_packets++;
1466 ring->nr_failed_tx_packets++;
1467 ring->nr_total_packet_tries += status->frame_count;
1469 ieee80211_tx_status(dev->wl->hw, meta->skb);
1471 /* skb will be freed by ieee80211_tx_status().
1472 * Poison our pointer. */
1473 meta->skb = B43_DMA_PTR_POISON;
1475 /* No need to call free_descriptor_buffer here, as
1476 * this is only the txhdr, which is not allocated.
1478 if (unlikely(meta->skb)) {
1479 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1480 "at slot %d (first=%d) on ring %d\n",
1481 slot, firstused, ring->index);
1486 /* Everything unmapped and free'd. So it's not used anymore. */
1489 if (meta->is_last_fragment) {
1490 /* This is the last scatter-gather
1491 * fragment of the frame. We are done. */
1494 slot = next_slot(ring, slot);
1496 if (ring->stopped) {
1497 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1498 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1500 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1501 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1506 static void dma_rx(struct b43_dmaring *ring, int *slot)
1508 const struct b43_dma_ops *ops = ring->ops;
1509 struct b43_dmadesc_generic *desc;
1510 struct b43_dmadesc_meta *meta;
1511 struct b43_rxhdr_fw4 *rxhdr;
1512 struct sk_buff *skb;
1517 desc = ops->idx2desc(ring, *slot, &meta);
1519 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1522 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1523 len = le16_to_cpu(rxhdr->frame_len);
1530 len = le16_to_cpu(rxhdr->frame_len);
1531 } while (len == 0 && i++ < 5);
1532 if (unlikely(len == 0)) {
1533 dmaaddr = meta->dmaaddr;
1534 goto drop_recycle_buffer;
1537 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1538 /* Something went wrong with the DMA.
1539 * The device did not touch the buffer and did not overwrite the poison. */
1540 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1541 dmaaddr = meta->dmaaddr;
1542 goto drop_recycle_buffer;
1544 if (unlikely(len > ring->rx_buffersize)) {
1545 /* The data did not fit into one descriptor buffer
1546 * and is split over multiple buffers.
1547 * This should never happen, as we try to allocate buffers
1548 * big enough. So simply ignore this packet.
1554 desc = ops->idx2desc(ring, *slot, &meta);
1555 /* recycle the descriptor buffer. */
1556 b43_poison_rx_buffer(ring, meta->skb);
1557 sync_descbuffer_for_device(ring, meta->dmaaddr,
1558 ring->rx_buffersize);
1559 *slot = next_slot(ring, *slot);
1561 tmp -= ring->rx_buffersize;
1565 b43err(ring->dev->wl, "DMA RX buffer too small "
1566 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1567 len, ring->rx_buffersize, cnt);
1571 dmaaddr = meta->dmaaddr;
1572 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1573 if (unlikely(err)) {
1574 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1575 goto drop_recycle_buffer;
1578 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1579 skb_put(skb, len + ring->frameoffset);
1580 skb_pull(skb, ring->frameoffset);
1582 b43_rx(ring->dev, skb, rxhdr);
1586 drop_recycle_buffer:
1587 /* Poison and recycle the RX buffer. */
1588 b43_poison_rx_buffer(ring, skb);
1589 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1592 void b43_dma_rx(struct b43_dmaring *ring)
1594 const struct b43_dma_ops *ops = ring->ops;
1595 int slot, current_slot;
1598 B43_WARN_ON(ring->tx);
1599 current_slot = ops->get_current_rxslot(ring);
1600 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1602 slot = ring->current_slot;
1603 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1604 dma_rx(ring, &slot);
1605 update_max_used_slots(ring, ++used_slots);
1607 ops->set_current_rxslot(ring, slot);
1608 ring->current_slot = slot;
1611 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1613 B43_WARN_ON(!ring->tx);
1614 ring->ops->tx_suspend(ring);
1617 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1619 B43_WARN_ON(!ring->tx);
1620 ring->ops->tx_resume(ring);
1623 void b43_dma_tx_suspend(struct b43_wldev *dev)
1625 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1626 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1627 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1628 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1629 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1630 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1633 void b43_dma_tx_resume(struct b43_wldev *dev)
1635 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1636 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1637 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1638 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1639 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1640 b43_power_saving_ctl_bits(dev, 0);
1643 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1644 u16 mmio_base, bool enable)
1648 if (type == B43_DMA_64BIT) {
1649 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1650 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1652 ctl |= B43_DMA64_RXDIRECTFIFO;
1653 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1655 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1656 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1658 ctl |= B43_DMA32_RXDIRECTFIFO;
1659 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1663 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1664 * This is called from PIO code, so DMA structures are not available. */
1665 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1666 unsigned int engine_index, bool enable)
1668 enum b43_dmatype type;
1671 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1673 mmio_base = b43_dmacontroller_base(type, engine_index);
1674 direct_fifo_rx(dev, type, mmio_base, enable);