2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
21 #include <brcmu_utils.h>
27 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
28 * a contiguous 8kB physical address.
30 #define D64RINGALIGN_BITS 13
31 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
32 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
34 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
36 /* transmit channel control */
37 #define D64_XC_XE 0x00000001 /* transmit enable */
38 #define D64_XC_SE 0x00000002 /* transmit suspend request */
39 #define D64_XC_LE 0x00000004 /* loopback enable */
40 #define D64_XC_FL 0x00000010 /* flush request */
41 #define D64_XC_PD 0x00000800 /* parity check disable */
42 #define D64_XC_AE 0x00030000 /* address extension bits */
43 #define D64_XC_AE_SHIFT 16
45 /* transmit descriptor table pointer */
46 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
48 /* transmit channel status */
49 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
50 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
51 #define D64_XS0_XS_SHIFT 28
52 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
53 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
54 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
55 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
56 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
58 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
59 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
60 #define D64_XS1_XE_SHIFT 28
61 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
62 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
63 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
64 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
65 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
66 #define D64_XS1_XE_COREE 0x50000000 /* core error */
68 /* receive channel control */
70 #define D64_RC_RE 0x00000001
71 /* receive frame offset */
72 #define D64_RC_RO_MASK 0x000000fe
73 #define D64_RC_RO_SHIFT 1
74 /* direct fifo receive (pio) mode */
75 #define D64_RC_FM 0x00000100
76 /* separate rx header descriptor enable */
77 #define D64_RC_SH 0x00000200
78 /* overflow continue */
79 #define D64_RC_OC 0x00000400
80 /* parity check disable */
81 #define D64_RC_PD 0x00000800
82 /* address extension bits */
83 #define D64_RC_AE 0x00030000
84 #define D64_RC_AE_SHIFT 16
86 /* flags for dma controller */
88 #define DMA_CTRL_PEN (1 << 0)
89 /* rx overflow continue */
90 #define DMA_CTRL_ROC (1 << 1)
91 /* allow rx scatter to multiple descriptors */
92 #define DMA_CTRL_RXMULTI (1 << 2)
93 /* Unframed Rx/Tx data */
94 #define DMA_CTRL_UNFRAMED (1 << 3)
96 /* receive descriptor table pointer */
97 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
99 /* receive channel status */
100 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
101 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
102 #define D64_RS0_RS_SHIFT 28
103 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
104 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
105 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
106 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
107 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
109 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
110 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
111 #define D64_RS1_RE_SHIFT 28
112 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
113 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
114 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
115 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
116 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
117 #define D64_RS1_RE_COREE 0x50000000 /* core error */
120 #define D64_FA_OFF_MASK 0xffff /* offset */
121 #define D64_FA_SEL_MASK 0xf0000 /* select */
122 #define D64_FA_SEL_SHIFT 16
123 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
124 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
125 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
126 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
127 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
128 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
129 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
130 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
131 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
132 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
134 /* descriptor control flags 1 */
135 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
136 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
137 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
138 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
139 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
141 /* descriptor control flags 2 */
142 /* buffer byte count. real data len must <= 16KB */
143 #define D64_CTRL2_BC_MASK 0x00007fff
144 /* address extension bits */
145 #define D64_CTRL2_AE 0x00030000
146 #define D64_CTRL2_AE_SHIFT 16
148 #define D64_CTRL2_PARITY 0x00040000
150 /* control flags in the range [27:20] are core-specific and not defined here */
151 #define D64_CTRL_CORE_MASK 0x0ff00000
153 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
154 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
155 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
156 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
159 * packet headroom necessary to accommodate the largest header
160 * in the system, (i.e TXOFF). By doing, we avoid the need to
161 * allocate an extra buffer for the header when bridging to WL.
162 * There is a compile time check in wlc.c which ensure that this
163 * value is at least as big as TXOFF. This value is used in
167 #define BCMEXTRAHDROOM 172
171 #define DMA_ERROR(args) \
173 if (!(*di->msg_level & 1)) \
178 #define DMA_TRACE(args) \
180 if (!(*di->msg_level & 2)) \
186 #define DMA_ERROR(args)
187 #define DMA_TRACE(args)
190 #define DMA_NONE(args)
192 #define MAXNAMEL 8 /* 8 char names */
194 /* macros to convert between byte offsets and indexes */
195 #define B2I(bytes, type) ((bytes) / sizeof(type))
196 #define I2B(index, type) ((index) * sizeof(type))
198 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
199 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
201 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
202 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
206 * Descriptors are only read by the hardware, never written back.
209 u32 ctrl1; /* misc control bits & bufcount */
210 u32 ctrl2; /* buffer count and address extension */
211 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
212 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
215 /* dma engine software state */
217 struct dma_pub dma; /* exported structure */
218 uint *msg_level; /* message level pointer */
219 char name[MAXNAMEL]; /* callers name for diag msgs */
221 struct pci_dev *pbus; /* bus handle */
223 bool dma64; /* this dma engine is operating in 64-bit mode */
224 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
226 /* 64-bit dma tx engine registers */
227 struct dma64regs *d64txregs;
228 /* 64-bit dma rx engine registers */
229 struct dma64regs *d64rxregs;
230 /* pointer to dma64 tx descriptor ring */
231 struct dma64desc *txd64;
232 /* pointer to dma64 rx descriptor ring */
233 struct dma64desc *rxd64;
235 u16 dmadesc_align; /* alignment requirement for dma descriptors */
237 u16 ntxd; /* # tx descriptors tunable */
238 u16 txin; /* index of next descriptor to reclaim */
239 u16 txout; /* index of next descriptor to post */
240 /* pointer to parallel array of pointers to packets */
241 struct sk_buff **txp;
242 /* Aligned physical address of descriptor ring */
244 /* Original physical address of descriptor ring */
245 dma_addr_t txdpaorig;
246 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
247 u32 txdalloc; /* #bytes allocated for the ring */
248 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
249 * is not just an index, it needs all 13 bits to be
250 * an offset from the addr register.
253 u16 nrxd; /* # rx descriptors tunable */
254 u16 rxin; /* index of next descriptor to reclaim */
255 u16 rxout; /* index of next descriptor to post */
256 /* pointer to parallel array of pointers to packets */
257 struct sk_buff **rxp;
258 /* Aligned physical address of descriptor ring */
260 /* Original physical address of descriptor ring */
261 dma_addr_t rxdpaorig;
262 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
263 u32 rxdalloc; /* #bytes allocated for the ring */
264 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
267 unsigned int rxbufsize; /* rx buffer size in bytes, not including
270 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper
271 * stack, e.g. some rx pkt buffers will be
272 * bridged to tx side without byte copying.
273 * The extra headroom needs to be large enough
274 * to fit txheader needs. Some dongle driver may
277 uint nrxpost; /* # rx buffers to keep posted */
278 unsigned int rxoffset; /* rxcontrol offset */
279 /* add to get dma address of descriptor ring, low 32 bits */
283 /* add to get dma address of data buffer, low 32 bits */
287 /* descriptor base need to be aligned or not */
292 * default dma message level (if input msg_level
293 * pointer is null in dma_attach())
295 static uint dma_msg_level;
297 /* Check for odd number of 1's */
298 static u32 parity32(u32 data)
309 static bool dma64_dd_parity(struct dma64desc *dd)
311 return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2);
314 /* descriptor bumping functions */
316 static uint xxd(uint x, uint n)
318 return x & (n - 1); /* faster than %, but n must be power of 2 */
321 static uint txd(struct dma_info *di, uint x)
323 return xxd(x, di->ntxd);
326 static uint rxd(struct dma_info *di, uint x)
328 return xxd(x, di->nrxd);
331 static uint nexttxd(struct dma_info *di, uint i)
333 return txd(di, i + 1);
336 static uint prevtxd(struct dma_info *di, uint i)
338 return txd(di, i - 1);
341 static uint nextrxd(struct dma_info *di, uint i)
343 return txd(di, i + 1);
346 static uint ntxdactive(struct dma_info *di, uint h, uint t)
351 static uint nrxdactive(struct dma_info *di, uint h, uint t)
356 static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
358 uint dmactrlflags = di->dma.dmactrlflags;
361 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
365 dmactrlflags &= ~mask;
366 dmactrlflags |= flags;
368 /* If trying to enable parity, check if parity is actually supported */
369 if (dmactrlflags & DMA_CTRL_PEN) {
372 control = R_REG(&di->d64txregs->control);
373 W_REG(&di->d64txregs->control,
374 control | D64_XC_PD);
375 if (R_REG(&di->d64txregs->control) & D64_XC_PD)
376 /* We *can* disable it so it is supported,
377 * restore control register
379 W_REG(&di->d64txregs->control,
382 /* Not supported, don't allow it to be enabled */
383 dmactrlflags &= ~DMA_CTRL_PEN;
386 di->dma.dmactrlflags = dmactrlflags;
391 static bool _dma64_addrext(struct dma64regs *dma64regs)
394 OR_REG(&dma64regs->control, D64_XC_AE);
395 w = R_REG(&dma64regs->control);
396 AND_REG(&dma64regs->control, ~D64_XC_AE);
397 return (w & D64_XC_AE) == D64_XC_AE;
401 * return true if this dma engine supports DmaExtendedAddrChanges,
404 static bool _dma_isaddrext(struct dma_info *di)
406 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
408 /* not all tx or rx channel are available */
409 if (di->d64txregs != NULL) {
410 if (!_dma64_addrext(di->d64txregs))
411 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
412 "AE set\n", di->name));
414 } else if (di->d64rxregs != NULL) {
415 if (!_dma64_addrext(di->d64rxregs))
416 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
417 "AE set\n", di->name));
424 static bool _dma_descriptor_align(struct dma_info *di)
428 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
429 if (di->d64txregs != NULL) {
430 W_REG(&di->d64txregs->addrlow, 0xff0);
431 addrl = R_REG(&di->d64txregs->addrlow);
434 } else if (di->d64rxregs != NULL) {
435 W_REG(&di->d64rxregs->addrlow, 0xff0);
436 addrl = R_REG(&di->d64rxregs->addrlow);
444 * Descriptor table must start at the DMA hardware dictated alignment, so
445 * allocated memory must be large enough to support this requirement.
447 static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
448 u16 align_bits, uint *alloced,
452 u16 align = (1 << align_bits);
453 if (!IS_ALIGNED(PAGE_SIZE, align))
457 return pci_alloc_consistent(pdev, size, pap);
461 u8 dma_align_sizetobits(uint size)
469 /* This function ensures that the DMA descriptor ring will not get allocated
470 * across Page boundary. If the allocation is done across the page boundary
471 * at the first time, then it is freed and the allocation is done at
472 * descriptor ring size aligned location. This will ensure that the ring will
473 * not cross page boundary
475 static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
476 u16 *alignbits, uint *alloced,
481 u32 alignbytes = 1 << *alignbits;
483 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
488 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
489 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
491 *alignbits = dma_align_sizetobits(size);
492 pci_free_consistent(di->pbus, size, va, *descpa);
493 va = dma_alloc_consistent(di->pbus, size, *alignbits,
499 static bool dma64_alloc(struct dma_info *di, uint direction)
508 ddlen = sizeof(struct dma64desc);
510 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
511 align_bits = di->dmadesc_align;
512 align = (1 << align_bits);
514 if (direction == DMA_TX) {
515 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
516 &alloced, &di->txdpaorig);
518 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
519 " failed\n", di->name));
522 align = (1 << align_bits);
523 di->txd64 = (struct dma64desc *)
524 roundup((unsigned long)va, align);
525 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
526 di->txdpa = di->txdpaorig + di->txdalign;
527 di->txdalloc = alloced;
529 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
530 &alloced, &di->rxdpaorig);
532 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
533 " failed\n", di->name));
536 align = (1 << align_bits);
537 di->rxd64 = (struct dma64desc *)
538 roundup((unsigned long)va, align);
539 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
540 di->rxdpa = di->rxdpaorig + di->rxdalign;
541 di->rxdalloc = alloced;
547 static bool _dma_alloc(struct dma_info *di, uint direction)
549 return dma64_alloc(di, direction);
552 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
553 void *dmaregstx, void *dmaregsrx, uint ntxd,
554 uint nrxd, uint rxbufsize, int rxextheadroom,
555 uint nrxpost, uint rxoffset, uint *msg_level)
560 /* allocate private info structure */
561 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
564 printk(KERN_ERR "dma_attach: out of memory\n");
569 di->msg_level = msg_level ? msg_level : &dma_msg_level;
572 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
574 /* init dma reg pointer */
575 di->d64txregs = (struct dma64regs *) dmaregstx;
576 di->d64rxregs = (struct dma64regs *) dmaregsrx;
579 * Default flags (which can be changed by the driver calling
580 * dma_ctrlflags before enable): For backwards compatibility
581 * both Rx Overflow Continue and Parity are DISABLED.
583 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
585 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
586 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
587 "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
588 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
589 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
591 /* make a private copy of our callers name */
592 strncpy(di->name, name, MAXNAMEL);
593 di->name[MAXNAMEL - 1] = '\0';
595 di->pbus = ((struct si_info *)sih)->pbus;
598 di->ntxd = (u16) ntxd;
599 di->nrxd = (u16) nrxd;
601 /* the actual dma size doesn't include the extra headroom */
603 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
604 if (rxbufsize > BCMEXTRAHDROOM)
605 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
607 di->rxbufsize = (u16) rxbufsize;
609 di->nrxpost = (u16) nrxpost;
610 di->rxoffset = (u8) rxoffset;
613 * figure out the DMA physical address offset for dd and data
614 * PCI/PCIE: they map silicon backplace address to zero
615 * based memory, need offset
616 * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
617 * swapped region for data buffer, not descriptor
620 di->dataoffsetlow = 0;
621 /* add offset for pcie with DMA64 bus */
623 di->ddoffsethigh = SI_PCIE_DMA_H32;
624 di->dataoffsetlow = di->ddoffsetlow;
625 di->dataoffsethigh = di->ddoffsethigh;
626 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
627 if ((ai_coreid(sih) == SDIOD_CORE_ID)
628 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
630 else if ((ai_coreid(sih) == I2S_CORE_ID) &&
631 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
634 di->addrext = _dma_isaddrext(di);
636 /* does the descriptor need to be aligned and if yes, on 4K/8K or not */
637 di->aligndesc_4k = _dma_descriptor_align(di);
638 if (di->aligndesc_4k) {
639 di->dmadesc_align = D64RINGALIGN_BITS;
640 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
641 /* for smaller dd table, HW relax alignment reqmnt */
642 di->dmadesc_align = D64RINGALIGN_BITS - 1;
644 di->dmadesc_align = 4; /* 16 byte alignment */
647 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
648 di->aligndesc_4k, di->dmadesc_align));
650 /* allocate tx packet pointer vector */
652 size = ntxd * sizeof(void *);
653 di->txp = kzalloc(size, GFP_ATOMIC);
654 if (di->txp == NULL) {
655 DMA_ERROR(("%s: dma_attach: out of tx memory\n",
661 /* allocate rx packet pointer vector */
663 size = nrxd * sizeof(void *);
664 di->rxp = kzalloc(size, GFP_ATOMIC);
665 if (di->rxp == NULL) {
666 DMA_ERROR(("%s: dma_attach: out of rx memory\n",
673 * allocate transmit descriptor ring, only need ntxd descriptors
674 * but it must be aligned
677 if (!_dma_alloc(di, DMA_TX))
682 * allocate receive descriptor ring, only need nrxd descriptors
683 * but it must be aligned
686 if (!_dma_alloc(di, DMA_RX))
690 if ((di->ddoffsetlow != 0) && !di->addrext) {
691 if (di->txdpa > SI_PCI_DMA_SZ) {
692 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
693 "supported\n", di->name, (u32)di->txdpa));
696 if (di->rxdpa > SI_PCI_DMA_SZ) {
697 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
698 "supported\n", di->name, (u32)di->rxdpa));
703 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
704 "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
705 di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
708 return (struct dma_pub *) di;
711 dma_detach((struct dma_pub *)di);
716 dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
717 dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
719 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
721 /* PCI bus with big(>1G) physical address, use address extension */
722 if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
723 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
724 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
725 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
726 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
728 /* address extension for 32-bit PCI */
731 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
732 pa &= ~PCI32ADDR_HIGH;
734 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
735 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
736 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
737 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
738 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
740 if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
741 if (dma64_dd_parity(&ddring[outidx]))
742 ddring[outidx].ctrl2 =
743 cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
747 /* !! may be called with core in reset */
748 void dma_detach(struct dma_pub *pub)
750 struct dma_info *di = (struct dma_info *)pub;
752 DMA_TRACE(("%s: dma_detach\n", di->name));
754 /* free dma descriptor rings */
756 pci_free_consistent(di->pbus, di->txdalloc,
757 ((s8 *)di->txd64 - di->txdalign),
760 pci_free_consistent(di->pbus, di->rxdalloc,
761 ((s8 *)di->rxd64 - di->rxdalign),
764 /* free packet pointer vectors */
768 /* free our private info structure */
773 /* initialize descriptor table base address */
775 _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
777 if (!di->aligndesc_4k) {
778 if (direction == DMA_TX)
784 if ((di->ddoffsetlow == 0)
785 || !(pa & PCI32ADDR_HIGH)) {
786 if (direction == DMA_TX) {
787 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
788 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
790 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
791 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
794 /* DMA64 32bits address extension */
797 /* shift the high bit(s) from pa to ae */
798 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
799 pa &= ~PCI32ADDR_HIGH;
801 if (direction == DMA_TX) {
802 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
803 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
804 SET_REG(&di->d64txregs->control,
805 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
807 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
808 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
809 SET_REG(&di->d64rxregs->control,
810 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
815 static void _dma_rxenable(struct dma_info *di)
817 uint dmactrlflags = di->dma.dmactrlflags;
820 DMA_TRACE(("%s: dma_rxenable\n", di->name));
823 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
826 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
827 control |= D64_RC_PD;
829 if (dmactrlflags & DMA_CTRL_ROC)
830 control |= D64_RC_OC;
832 W_REG(&di->d64rxregs->control,
833 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
836 void dma_rxinit(struct dma_pub *pub)
838 struct dma_info *di = (struct dma_info *)pub;
840 DMA_TRACE(("%s: dma_rxinit\n", di->name));
845 di->rxin = di->rxout = 0;
847 /* clear rx descriptor ring */
848 memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
850 /* DMA engine with out alignment requirement requires table to be inited
851 * before enabling the engine
853 if (!di->aligndesc_4k)
854 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
858 if (di->aligndesc_4k)
859 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
862 static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
870 /* return if no packets posted */
875 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
876 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
878 /* ignore curr if forceall */
879 if (!forceall && (i == curr))
882 /* get the packet pointer that corresponds to the rx descriptor */
886 pa = cpu_to_le32(di->rxd64[i].addrlow) - di->dataoffsetlow;
888 /* clear this packet from the descriptor ring */
889 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
891 di->rxd64[i].addrlow = 0xdeadbeef;
892 di->rxd64[i].addrhigh = 0xdeadbeef;
894 di->rxin = nextrxd(di, i);
899 static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
904 return dma64_getnextrxp(di, forceall);
908 * !! rx entry routine
909 * returns a pointer to the next frame received, or NULL if there are no more
910 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
911 * supported with pkts chain
912 * otherwise, it's treated as giant pkt and will be tossed.
913 * The DMA scattering starts with normal DMA header, followed by first
914 * buffer data. After it reaches the max size of buffer, the data continues
915 * in next DMA descriptor buffer WITHOUT DMA header
917 struct sk_buff *dma_rx(struct dma_pub *pub)
919 struct dma_info *di = (struct dma_info *)pub;
920 struct sk_buff *p, *head, *tail;
926 head = _dma_getnextrxp(di, false);
930 len = le16_to_cpu(*(u16 *) (head->data));
931 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
932 dma_spin_for_len(len, head);
934 /* set actual length */
935 pkt_len = min((di->rxoffset + len), di->rxbufsize);
936 __skb_trim(head, pkt_len);
937 resid = len - (di->rxbufsize - di->rxoffset);
939 /* check for single or multi-buffer rx */
942 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
944 pkt_len = min_t(uint, resid, di->rxbufsize);
945 __skb_trim(p, pkt_len);
948 resid -= di->rxbufsize;
955 B2I(((R_REG(&di->d64rxregs->status0) &
957 di->rcvptrbase) & D64_RS0_CD_MASK,
959 DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
960 di->rxin, di->rxout, cur));
964 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
965 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
967 brcmu_pkt_buf_free_skb(head);
976 static bool dma64_rxidle(struct dma_info *di)
978 DMA_TRACE(("%s: dma_rxidle\n", di->name));
983 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
984 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
988 * post receive buffers
989 * return false is refill failed completely and ring is empty this will stall
990 * the rx dma and user might want to call rxfill again asap. This unlikely
991 * happens on memory-rich NIC, but often on memory-constrained dongle
993 bool dma_rxfill(struct dma_pub *pub)
995 struct dma_info *di = (struct dma_info *)pub;
1002 uint extra_offset = 0;
1008 * Determine how many receive buffers we're lacking
1009 * from the full complement, allocate, initialize,
1010 * and post them, then update the chip rx lastdscr.
1016 n = di->nrxpost - nrxdactive(di, rxin, rxout);
1018 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
1020 if (di->rxbufsize > BCMEXTRAHDROOM)
1021 extra_offset = di->rxextrahdrroom;
1023 for (i = 0; i < n; i++) {
1025 * the di->rxbufsize doesn't include the extra headroom,
1026 * we need to add it to the size to be allocated
1028 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1031 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1033 if (i == 0 && dma64_rxidle(di)) {
1034 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
1041 /* reserve an extra headroom, if applicable */
1043 skb_pull(p, extra_offset);
1045 /* Do a cached write instead of uncached write since DMA_MAP
1046 * will flush the cache.
1048 *(u32 *) (p->data) = 0;
1050 pa = pci_map_single(di->pbus, p->data,
1051 di->rxbufsize, PCI_DMA_FROMDEVICE);
1053 /* save the free packet pointer */
1056 /* reset flags for each descriptor */
1058 if (rxout == (di->nrxd - 1))
1059 flags = D64_CTRL1_EOT;
1061 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1063 rxout = nextrxd(di, rxout);
1068 /* update the chip lastdscr pointer */
1069 W_REG(&di->d64rxregs->ptr,
1070 di->rcvptrbase + I2B(rxout, struct dma64desc));
1075 void dma_rxreclaim(struct dma_pub *pub)
1077 struct dma_info *di = (struct dma_info *)pub;
1080 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1082 while ((p = _dma_getnextrxp(di, true)))
1083 brcmu_pkt_buf_free_skb(p);
1086 void dma_counterreset(struct dma_pub *pub)
1088 /* reset all software counters */
1094 /* get the address of the var in order to change later */
1095 unsigned long dma_getvar(struct dma_pub *pub, const char *name)
1097 struct dma_info *di = (struct dma_info *)pub;
1099 if (!strcmp(name, "&txavail"))
1100 return (unsigned long)&(di->dma.txavail);
1104 /* 64-bit DMA functions */
1106 void dma_txinit(struct dma_pub *pub)
1108 struct dma_info *di = (struct dma_info *)pub;
1109 u32 control = D64_XC_XE;
1111 DMA_TRACE(("%s: dma_txinit\n", di->name));
1116 di->txin = di->txout = 0;
1117 di->dma.txavail = di->ntxd - 1;
1119 /* clear tx descriptor ring */
1120 memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
1122 /* DMA engine with out alignment requirement requires table to be inited
1123 * before enabling the engine
1125 if (!di->aligndesc_4k)
1126 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1128 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1129 control |= D64_XC_PD;
1130 OR_REG(&di->d64txregs->control, control);
1132 /* DMA engine with alignment requirement requires table to be inited
1133 * before enabling the engine
1135 if (di->aligndesc_4k)
1136 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1139 void dma_txsuspend(struct dma_pub *pub)
1141 struct dma_info *di = (struct dma_info *)pub;
1143 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1148 OR_REG(&di->d64txregs->control, D64_XC_SE);
1151 void dma_txresume(struct dma_pub *pub)
1153 struct dma_info *di = (struct dma_info *)pub;
1155 DMA_TRACE(("%s: dma_txresume\n", di->name));
1160 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1163 bool dma_txsuspended(struct dma_pub *pub)
1165 struct dma_info *di = (struct dma_info *)pub;
1167 return (di->ntxd == 0) ||
1168 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1172 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1174 struct dma_info *di = (struct dma_info *)pub;
1177 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1178 (range == DMA_RANGE_ALL) ? "all" :
1180 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1183 if (di->txin == di->txout)
1186 while ((p = dma_getnexttxp(pub, range))) {
1187 /* For unframed data, we don't have any packets to free */
1188 if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
1189 brcmu_pkt_buf_free_skb(p);
1193 bool dma_txreset(struct dma_pub *pub)
1195 struct dma_info *di = (struct dma_info *)pub;
1201 /* suspend tx DMA first */
1202 W_REG(&di->d64txregs->control, D64_XC_SE);
1204 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1205 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1206 && (status != D64_XS0_XS_STOPPED), 10000);
1208 W_REG(&di->d64txregs->control, 0);
1210 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1211 != D64_XS0_XS_DISABLED), 10000);
1213 /* wait for the last transaction to complete */
1216 return status == D64_XS0_XS_DISABLED;
1219 bool dma_rxreset(struct dma_pub *pub)
1221 struct dma_info *di = (struct dma_info *)pub;
1227 W_REG(&di->d64rxregs->control, 0);
1229 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1230 != D64_RS0_RS_DISABLED), 10000);
1232 return status == D64_RS0_RS_DISABLED;
1236 * !! tx entry routine
1237 * WARNING: call must check the return value for error.
1238 * the error(toss frames) could be fatal and cause many subsequent hard
1241 int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1243 struct dma_info *di = (struct dma_info *)pub;
1244 struct sk_buff *p, *next;
1245 unsigned char *data;
1251 DMA_TRACE(("%s: dma_txfast\n", di->name));
1256 * Walk the chain of packet buffers
1257 * allocating and initializing transmit descriptor entries.
1259 for (p = p0; p; p = next) {
1264 /* return nonzero if out of tx descriptors */
1265 if (nexttxd(di, txout) == di->txin)
1271 /* get physical address of buffer start */
1272 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1276 flags |= D64_CTRL1_SOF;
1278 /* With a DMA segment list, Descriptor table is filled
1279 * using the segment list instead of looping over
1280 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1281 * is when end of segment list is reached.
1284 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1285 if (txout == (di->ntxd - 1))
1286 flags |= D64_CTRL1_EOT;
1288 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1290 txout = nexttxd(di, txout);
1293 /* if last txd eof not set, fix it */
1294 if (!(flags & D64_CTRL1_EOF))
1295 di->txd64[prevtxd(di, txout)].ctrl1 =
1296 cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
1298 /* save the packet */
1299 di->txp[prevtxd(di, txout)] = p0;
1301 /* bump the tx descriptor index */
1306 W_REG(&di->d64txregs->ptr,
1307 di->xmtptrbase + I2B(txout, struct dma64desc));
1309 /* tx flow control */
1310 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
1315 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1316 brcmu_pkt_buf_free_skb(p0);
1317 di->dma.txavail = 0;
1323 * Reclaim next completed txd (txds if using chained buffers) in the range
1324 * specified and return associated packet.
1325 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1326 * transmitted as noted by the hardware "CurrDescr" pointer.
1327 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1328 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1329 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1330 * return associated packet regardless of the value of hardware pointers.
1332 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1334 struct dma_info *di = (struct dma_info *)pub;
1337 struct sk_buff *txp;
1339 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1340 (range == DMA_RANGE_ALL) ? "all" :
1342 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1351 if (range == DMA_RANGE_ALL)
1354 struct dma64regs *dregs = di->d64txregs;
1356 end = (u16) (B2I(((R_REG(&dregs->status0) &
1358 di->xmtptrbase) & D64_XS0_CD_MASK,
1361 if (range == DMA_RANGE_TRANSFERED) {
1363 (u16) (R_REG(&dregs->status1) &
1366 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1367 active_desc = B2I(active_desc, struct dma64desc);
1368 if (end != active_desc)
1369 end = prevtxd(di, active_desc);
1373 if ((start == 0) && (end > di->txout))
1376 for (i = start; i != end && !txp; i = nexttxd(di, i)) {
1380 pa = cpu_to_le32(di->txd64[i].addrlow) - di->dataoffsetlow;
1383 (cpu_to_le32(di->txd64[i].ctrl2) &
1386 di->txd64[i].addrlow = 0xdeadbeef;
1387 di->txd64[i].addrhigh = 0xdeadbeef;
1392 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1397 /* tx flow control */
1398 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
1403 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
1404 "force %d\n", start, end, di->txout, forceall));
1409 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1410 * modified. The modified portion of the packet is not under control of the DMA
1411 * engine. This function calls a caller-supplied function for each packet in
1412 * the caller specified dma chain.
1414 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1415 (void *pkt, void *arg_a), void *arg_a)
1417 struct dma_info *di = (struct dma_info *) dmah;
1419 uint end = di->txout;
1420 struct sk_buff *skb;
1421 struct ieee80211_tx_info *tx_info;
1424 skb = (struct sk_buff *)di->txp[i];
1426 tx_info = (struct ieee80211_tx_info *)skb->cb;
1427 (callback_fnc)(tx_info, arg_a);