2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
23 #include <bcmendian.h>
33 #define DMA_ERROR(args) \
35 if (!(*di->msg_level & 1)) \
40 #define DMA_TRACE(args) \
42 if (!(*di->msg_level & 2)) \
48 #define DMA_ERROR(args)
49 #define DMA_TRACE(args)
52 #define DMA_NONE(args)
54 #define d32txregs dregs.d32_u.txregs_32
55 #define d32rxregs dregs.d32_u.rxregs_32
56 #define txd32 dregs.d32_u.txd_32
57 #define rxd32 dregs.d32_u.rxd_32
59 #define d64txregs dregs.d64_u.txregs_64
60 #define d64rxregs dregs.d64_u.rxregs_64
61 #define txd64 dregs.d64_u.txd_64
62 #define rxd64 dregs.d64_u.rxd_64
64 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
65 static uint dma_msg_level;
67 #define MAXNAMEL 8 /* 8 char names */
69 #define DI_INFO(dmah) ((dma_info_t *)dmah)
71 /* dma engine software state */
72 typedef struct dma_info {
73 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
74 * which could be const
76 uint *msg_level; /* message level pointer */
77 char name[MAXNAMEL]; /* callers name for diag msgs */
79 void *osh; /* os handle */
80 si_t *sih; /* sb handle */
82 bool dma64; /* this dma engine is operating in 64-bit mode */
83 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
87 dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */
88 dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */
89 dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */
90 dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */
93 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
94 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
95 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
96 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
100 u16 dmadesc_align; /* alignment requirement for dma descriptors */
102 u16 ntxd; /* # tx descriptors tunable */
103 u16 txin; /* index of next descriptor to reclaim */
104 u16 txout; /* index of next descriptor to post */
105 void **txp; /* pointer to parallel array of pointers to packets */
106 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
107 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
108 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
109 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
110 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
111 u32 txdalloc; /* #bytes allocated for the ring */
112 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
113 * is not just an index, it needs all 13 bits to be
114 * an offset from the addr register.
117 u16 nrxd; /* # rx descriptors tunable */
118 u16 rxin; /* index of next descriptor to reclaim */
119 u16 rxout; /* index of next descriptor to post */
120 void **rxp; /* pointer to parallel array of pointers to packets */
121 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
122 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
123 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
124 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
125 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
126 u32 rxdalloc; /* #bytes allocated for the ring */
127 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
130 unsigned int rxbufsize; /* rx buffer size in bytes,
131 * not including the extra headroom
133 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
134 * e.g. some rx pkt buffers will be bridged to tx side
135 * without byte copying. The extra headroom needs to be
136 * large enough to fit txheader needs.
137 * Some dongle driver may not need it.
139 uint nrxpost; /* # rx buffers to keep posted */
140 unsigned int rxoffset; /* rxcontrol offset */
141 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
142 uint ddoffsethigh; /* high 32 bits */
143 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
144 uint dataoffsethigh; /* high 32 bits */
145 bool aligndesc_4k; /* descriptor base need to be aligned or not */
149 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
150 * Otherwise it will support only 64-bit.
152 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
153 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
155 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
158 #define DMA32_ENAB(di) 1
159 #define DMA64_ENAB(di) 1
160 #define DMA64_MODE(di) ((di)->dma64)
161 #else /* !BCMDMA32 */
162 #define DMA32_ENAB(di) 0
163 #define DMA64_ENAB(di) 1
164 #define DMA64_MODE(di) 1
165 #endif /* !BCMDMA32 */
167 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
168 #ifdef BCMDMASGLISTOSL
169 #define DMASGLIST_ENAB true
171 #define DMASGLIST_ENAB false
172 #endif /* BCMDMASGLISTOSL */
174 /* descriptor bumping macros */
175 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
176 #define TXD(x) XXD((x), di->ntxd)
177 #define RXD(x) XXD((x), di->nrxd)
178 #define NEXTTXD(i) TXD((i) + 1)
179 #define PREVTXD(i) TXD((i) - 1)
180 #define NEXTRXD(i) RXD((i) + 1)
181 #define PREVRXD(i) RXD((i) - 1)
183 #define NTXDACTIVE(h, t) TXD((t) - (h))
184 #define NRXDACTIVE(h, t) RXD((t) - (h))
186 /* macros to convert between byte offsets and indexes */
187 #define B2I(bytes, type) ((bytes) / sizeof(type))
188 #define I2B(index, type) ((index) * sizeof(type))
190 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
191 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
193 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
194 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
196 /* Common prototypes */
197 static bool _dma_isaddrext(dma_info_t *di);
198 static bool _dma_descriptor_align(dma_info_t *di);
199 static bool _dma_alloc(dma_info_t *di, uint direction);
200 static void _dma_detach(dma_info_t *di);
201 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
202 static void _dma_rxinit(dma_info_t *di);
203 static void *_dma_rx(dma_info_t *di);
204 static bool _dma_rxfill(dma_info_t *di);
205 static void _dma_rxreclaim(dma_info_t *di);
206 static void _dma_rxenable(dma_info_t *di);
207 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
208 static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
211 static void _dma_txblock(dma_info_t *di);
212 static void _dma_txunblock(dma_info_t *di);
213 static uint _dma_txactive(dma_info_t *di);
214 static uint _dma_rxactive(dma_info_t *di);
215 static uint _dma_txpending(dma_info_t *di);
216 static uint _dma_txcommitted(dma_info_t *di);
218 static void *_dma_peeknexttxp(dma_info_t *di);
219 static void *_dma_peeknextrxp(dma_info_t *di);
220 static unsigned long _dma_getvar(dma_info_t *di, const char *name);
221 static void _dma_counterreset(dma_info_t *di);
222 static void _dma_fifoloopbackenable(dma_info_t *di);
223 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
224 static u8 dma_align_sizetobits(uint size);
225 static void *dma_ringalloc(osl_t *osh, u32 boundary, uint size,
226 u16 *alignbits, uint *alloced,
227 dmaaddr_t *descpa, osldma_t **dmah);
229 /* Prototypes for 32-bit routines */
230 static bool dma32_alloc(dma_info_t *di, uint direction);
231 static bool dma32_txreset(dma_info_t *di);
232 static bool dma32_rxreset(dma_info_t *di);
233 static bool dma32_txsuspendedidle(dma_info_t *di);
234 static int dma32_txfast(dma_info_t *di, void *p0, bool commit);
235 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range);
236 static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
237 static void dma32_txrotate(dma_info_t *di);
238 static bool dma32_rxidle(dma_info_t *di);
239 static void dma32_txinit(dma_info_t *di);
240 static bool dma32_txenabled(dma_info_t *di);
241 static void dma32_txsuspend(dma_info_t *di);
242 static void dma32_txresume(dma_info_t *di);
243 static bool dma32_txsuspended(dma_info_t *di);
244 static void dma32_txreclaim(dma_info_t *di, txd_range_t range);
245 static bool dma32_txstopped(dma_info_t *di);
246 static bool dma32_rxstopped(dma_info_t *di);
247 static bool dma32_rxenabled(dma_info_t *di);
249 static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs);
251 /* Prototypes for 64-bit routines */
252 static bool dma64_alloc(dma_info_t *di, uint direction);
253 static bool dma64_txreset(dma_info_t *di);
254 static bool dma64_rxreset(dma_info_t *di);
255 static bool dma64_txsuspendedidle(dma_info_t *di);
256 static int dma64_txfast(dma_info_t *di, void *p0, bool commit);
257 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
258 static void *dma64_getpos(dma_info_t *di, bool direction);
259 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
260 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
261 static void dma64_txrotate(dma_info_t *di);
263 static bool dma64_rxidle(dma_info_t *di);
264 static void dma64_txinit(dma_info_t *di);
265 static bool dma64_txenabled(dma_info_t *di);
266 static void dma64_txsuspend(dma_info_t *di);
267 static void dma64_txresume(dma_info_t *di);
268 static bool dma64_txsuspended(dma_info_t *di);
269 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
270 static bool dma64_txstopped(dma_info_t *di);
271 static bool dma64_rxstopped(dma_info_t *di);
272 static bool dma64_rxenabled(dma_info_t *di);
273 static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs);
275 static inline u32 parity32(u32 data);
277 const di_fcn_t dma64proc = {
278 (di_detach_t) _dma_detach,
279 (di_txinit_t) dma64_txinit,
280 (di_txreset_t) dma64_txreset,
281 (di_txenabled_t) dma64_txenabled,
282 (di_txsuspend_t) dma64_txsuspend,
283 (di_txresume_t) dma64_txresume,
284 (di_txsuspended_t) dma64_txsuspended,
285 (di_txsuspendedidle_t) dma64_txsuspendedidle,
286 (di_txfast_t) dma64_txfast,
287 (di_txunframed_t) dma64_txunframed,
288 (di_getpos_t) dma64_getpos,
289 (di_txstopped_t) dma64_txstopped,
290 (di_txreclaim_t) dma64_txreclaim,
291 (di_getnexttxp_t) dma64_getnexttxp,
292 (di_peeknexttxp_t) _dma_peeknexttxp,
293 (di_txblock_t) _dma_txblock,
294 (di_txunblock_t) _dma_txunblock,
295 (di_txactive_t) _dma_txactive,
296 (di_txrotate_t) dma64_txrotate,
298 (di_rxinit_t) _dma_rxinit,
299 (di_rxreset_t) dma64_rxreset,
300 (di_rxidle_t) dma64_rxidle,
301 (di_rxstopped_t) dma64_rxstopped,
302 (di_rxenable_t) _dma_rxenable,
303 (di_rxenabled_t) dma64_rxenabled,
305 (di_rxfill_t) _dma_rxfill,
306 (di_rxreclaim_t) _dma_rxreclaim,
307 (di_getnextrxp_t) _dma_getnextrxp,
308 (di_peeknextrxp_t) _dma_peeknextrxp,
309 (di_rxparam_get_t) _dma_rx_param_get,
311 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
312 (di_getvar_t) _dma_getvar,
313 (di_counterreset_t) _dma_counterreset,
314 (di_ctrlflags_t) _dma_ctrlflags,
318 (di_rxactive_t) _dma_rxactive,
319 (di_txpending_t) _dma_txpending,
320 (di_txcommitted_t) _dma_txcommitted,
324 static const di_fcn_t dma32proc = {
325 (di_detach_t) _dma_detach,
326 (di_txinit_t) dma32_txinit,
327 (di_txreset_t) dma32_txreset,
328 (di_txenabled_t) dma32_txenabled,
329 (di_txsuspend_t) dma32_txsuspend,
330 (di_txresume_t) dma32_txresume,
331 (di_txsuspended_t) dma32_txsuspended,
332 (di_txsuspendedidle_t) dma32_txsuspendedidle,
333 (di_txfast_t) dma32_txfast,
336 (di_txstopped_t) dma32_txstopped,
337 (di_txreclaim_t) dma32_txreclaim,
338 (di_getnexttxp_t) dma32_getnexttxp,
339 (di_peeknexttxp_t) _dma_peeknexttxp,
340 (di_txblock_t) _dma_txblock,
341 (di_txunblock_t) _dma_txunblock,
342 (di_txactive_t) _dma_txactive,
343 (di_txrotate_t) dma32_txrotate,
345 (di_rxinit_t) _dma_rxinit,
346 (di_rxreset_t) dma32_rxreset,
347 (di_rxidle_t) dma32_rxidle,
348 (di_rxstopped_t) dma32_rxstopped,
349 (di_rxenable_t) _dma_rxenable,
350 (di_rxenabled_t) dma32_rxenabled,
352 (di_rxfill_t) _dma_rxfill,
353 (di_rxreclaim_t) _dma_rxreclaim,
354 (di_getnextrxp_t) _dma_getnextrxp,
355 (di_peeknextrxp_t) _dma_peeknextrxp,
356 (di_rxparam_get_t) _dma_rx_param_get,
358 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
359 (di_getvar_t) _dma_getvar,
360 (di_counterreset_t) _dma_counterreset,
361 (di_ctrlflags_t) _dma_ctrlflags,
365 (di_rxactive_t) _dma_rxactive,
366 (di_txpending_t) _dma_txpending,
367 (di_txcommitted_t) _dma_txcommitted,
371 hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih, void *dmaregstx,
372 void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
373 int rxextheadroom, uint nrxpost, uint rxoffset,
379 /* allocate private info structure */
380 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
383 printf("dma_attach: out of memory\n");
388 di->msg_level = msg_level ? msg_level : &dma_msg_level;
390 /* old chips w/o sb is no longer supported */
395 ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
399 /* check arguments */
400 ASSERT(ISPOWEROF2(ntxd));
401 ASSERT(ISPOWEROF2(nrxd));
404 ASSERT(dmaregsrx == NULL);
406 ASSERT(dmaregstx == NULL);
408 /* init dma reg pointer */
409 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
410 ASSERT(ntxd <= D64MAXDD);
411 ASSERT(nrxd <= D64MAXDD);
412 di->d64txregs = (dma64regs_t *) dmaregstx;
413 di->d64rxregs = (dma64regs_t *) dmaregsrx;
414 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
415 } else if (DMA32_ENAB(di)) {
416 ASSERT(ntxd <= D32MAXDD);
417 ASSERT(nrxd <= D32MAXDD);
418 di->d32txregs = (dma32regs_t *) dmaregstx;
419 di->d32rxregs = (dma32regs_t *) dmaregsrx;
420 di->hnddma.di_fn = (const di_fcn_t *)&dma32proc;
422 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
427 /* Default flags (which can be changed by the driver calling dma_ctrlflags
428 * before enable): For backwards compatibility both Rx Overflow Continue
429 * and Parity are DISABLED.
432 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
435 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
437 /* make a private copy of our callers name */
438 strncpy(di->name, name, MAXNAMEL);
439 di->name[MAXNAMEL - 1] = '\0';
445 di->ntxd = (u16) ntxd;
446 di->nrxd = (u16) nrxd;
448 /* the actual dma size doesn't include the extra headroom */
450 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
451 if (rxbufsize > BCMEXTRAHDROOM)
452 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
454 di->rxbufsize = (u16) rxbufsize;
456 di->nrxpost = (u16) nrxpost;
457 di->rxoffset = (u8) rxoffset;
460 * figure out the DMA physical address offset for dd and data
461 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
462 * Other bus: use zero
463 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
466 di->dataoffsetlow = 0;
467 /* for pci bus, add offset */
468 if (sih->bustype == PCI_BUS) {
469 if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) {
470 /* pcie with DMA64 */
472 di->ddoffsethigh = SI_PCIE_DMA_H32;
474 /* pci(DMA32/DMA64) or pcie with DMA32 */
475 di->ddoffsetlow = SI_PCI_DMA;
476 di->ddoffsethigh = 0;
478 di->dataoffsetlow = di->ddoffsetlow;
479 di->dataoffsethigh = di->ddoffsethigh;
481 #if defined(__mips__) && defined(IL_BIGENDIAN)
482 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
483 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
484 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
485 if ((si_coreid(sih) == SDIOD_CORE_ID)
486 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
488 else if ((si_coreid(sih) == I2S_CORE_ID) &&
489 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
492 di->addrext = _dma_isaddrext(di);
494 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
495 di->aligndesc_4k = _dma_descriptor_align(di);
496 if (di->aligndesc_4k) {
497 if (DMA64_MODE(di)) {
498 di->dmadesc_align = D64RINGALIGN_BITS;
499 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
500 /* for smaller dd table, HW relax the alignment requirement */
501 di->dmadesc_align = D64RINGALIGN_BITS - 1;
504 di->dmadesc_align = D32RINGALIGN_BITS;
506 di->dmadesc_align = 4; /* 16 byte alignment */
508 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
509 di->aligndesc_4k, di->dmadesc_align));
511 /* allocate tx packet pointer vector */
513 size = ntxd * sizeof(void *);
514 di->txp = kzalloc(size, GFP_ATOMIC);
515 if (di->txp == NULL) {
516 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
521 /* allocate rx packet pointer vector */
523 size = nrxd * sizeof(void *);
524 di->rxp = kzalloc(size, GFP_ATOMIC);
525 if (di->rxp == NULL) {
526 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
531 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
533 if (!_dma_alloc(di, DMA_TX))
537 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
539 if (!_dma_alloc(di, DMA_RX))
543 if ((di->ddoffsetlow != 0) && !di->addrext) {
544 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
545 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
548 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
549 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
554 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
556 /* allocate DMA mapping vectors */
557 if (DMASGLIST_ENAB) {
559 size = ntxd * sizeof(hnddma_seg_map_t);
560 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
561 if (di->txp_dmah == NULL)
566 size = nrxd * sizeof(hnddma_seg_map_t);
567 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
568 if (di->rxp_dmah == NULL)
573 return (hnddma_t *) di;
580 /* init the tx or rx descriptor */
582 dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, dmaaddr_t pa, uint outidx,
583 u32 *flags, u32 bufcount)
585 /* dma32 uses 32-bit control to fit both flags and bufcounter */
586 *flags = *flags | (bufcount & CTRL_BC_MASK);
588 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
589 W_SM(&ddring[outidx].addr,
590 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
591 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
593 /* address extension */
596 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
597 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
599 *flags |= (ae << CTRL_AE_SHIFT);
600 W_SM(&ddring[outidx].addr,
601 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
602 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
606 /* Check for odd number of 1's */
607 static inline u32 parity32(u32 data)
618 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
621 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
622 u32 *flags, u32 bufcount)
624 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
626 /* PCI bus with big(>1G) physical address, use address extension */
627 #if defined(__mips__) && defined(IL_BIGENDIAN)
628 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
629 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
631 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
632 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
633 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
635 W_SM(&ddring[outidx].addrlow,
636 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
637 W_SM(&ddring[outidx].addrhigh,
638 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
639 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
640 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
642 /* address extension for 32-bit PCI */
646 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
647 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
648 ASSERT(PHYSADDRHI(pa) == 0);
650 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
651 W_SM(&ddring[outidx].addrlow,
652 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
653 W_SM(&ddring[outidx].addrhigh,
654 BUS_SWAP32(0 + di->dataoffsethigh));
655 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
656 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
658 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
659 if (DMA64_DD_PARITY(&ddring[outidx])) {
660 W_SM(&ddring[outidx].ctrl2,
661 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
666 static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs)
670 OR_REG(osh, &dma32regs->control, XC_AE);
671 w = R_REG(osh, &dma32regs->control);
672 AND_REG(osh, &dma32regs->control, ~XC_AE);
673 return (w & XC_AE) == XC_AE;
676 static bool _dma_alloc(dma_info_t *di, uint direction)
678 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
679 return dma64_alloc(di, direction);
680 } else if (DMA32_ENAB(di)) {
681 return dma32_alloc(di, direction);
686 /* !! may be called with core in reset */
687 static void _dma_detach(dma_info_t *di)
690 DMA_TRACE(("%s: dma_detach\n", di->name));
692 /* shouldn't be here if descriptors are unreclaimed */
693 ASSERT(di->txin == di->txout);
694 ASSERT(di->rxin == di->rxout);
696 /* free dma descriptor rings */
697 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
699 DMA_FREE_CONSISTENT(di->osh,
701 di->txdalign), di->txdalloc,
702 (di->txdpaorig), &di->tx_dmah);
704 DMA_FREE_CONSISTENT(di->osh,
706 di->rxdalign), di->rxdalloc,
707 (di->rxdpaorig), &di->rx_dmah);
708 } else if (DMA32_ENAB(di)) {
710 DMA_FREE_CONSISTENT(di->osh,
712 di->txdalign), di->txdalloc,
713 (di->txdpaorig), &di->tx_dmah);
715 DMA_FREE_CONSISTENT(di->osh,
717 di->rxdalign), di->rxdalloc,
718 (di->rxdpaorig), &di->rx_dmah);
722 /* free packet pointer vectors */
724 kfree((void *)di->txp);
726 kfree((void *)di->rxp);
728 /* free tx packet DMA handles */
732 /* free rx packet DMA handles */
736 /* free our private info structure */
741 static bool _dma_descriptor_align(dma_info_t *di)
743 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
746 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
747 if (di->d64txregs != NULL) {
748 W_REG(di->osh, &di->d64txregs->addrlow, 0xff0);
749 addrl = R_REG(di->osh, &di->d64txregs->addrlow);
752 } else if (di->d64rxregs != NULL) {
753 W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0);
754 addrl = R_REG(di->osh, &di->d64rxregs->addrlow);
762 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
763 static bool _dma_isaddrext(dma_info_t *di)
765 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
766 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
768 /* not all tx or rx channel are available */
769 if (di->d64txregs != NULL) {
770 if (!_dma64_addrext(di->osh, di->d64txregs)) {
771 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
775 } else if (di->d64rxregs != NULL) {
776 if (!_dma64_addrext(di->osh, di->d64rxregs)) {
777 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
783 } else if (DMA32_ENAB(di)) {
785 return _dma32_addrext(di->osh, di->d32txregs);
786 else if (di->d32rxregs)
787 return _dma32_addrext(di->osh, di->d32rxregs);
794 /* initialize descriptor table base address */
795 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
797 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
798 if (!di->aligndesc_4k) {
799 if (direction == DMA_TX)
800 di->xmtptrbase = PHYSADDRLO(pa);
802 di->rcvptrbase = PHYSADDRLO(pa);
805 if ((di->ddoffsetlow == 0)
806 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
807 if (direction == DMA_TX) {
808 W_REG(di->osh, &di->d64txregs->addrlow,
809 (PHYSADDRLO(pa) + di->ddoffsetlow));
810 W_REG(di->osh, &di->d64txregs->addrhigh,
811 (PHYSADDRHI(pa) + di->ddoffsethigh));
813 W_REG(di->osh, &di->d64rxregs->addrlow,
814 (PHYSADDRLO(pa) + di->ddoffsetlow));
815 W_REG(di->osh, &di->d64rxregs->addrhigh,
816 (PHYSADDRHI(pa) + di->ddoffsethigh));
819 /* DMA64 32bits address extension */
822 ASSERT(PHYSADDRHI(pa) == 0);
824 /* shift the high bit(s) from pa to ae */
825 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
826 PCI32ADDR_HIGH_SHIFT;
827 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
829 if (direction == DMA_TX) {
830 W_REG(di->osh, &di->d64txregs->addrlow,
831 (PHYSADDRLO(pa) + di->ddoffsetlow));
832 W_REG(di->osh, &di->d64txregs->addrhigh,
834 SET_REG(di->osh, &di->d64txregs->control,
835 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
837 W_REG(di->osh, &di->d64rxregs->addrlow,
838 (PHYSADDRLO(pa) + di->ddoffsetlow));
839 W_REG(di->osh, &di->d64rxregs->addrhigh,
841 SET_REG(di->osh, &di->d64rxregs->control,
842 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
846 } else if (DMA32_ENAB(di)) {
847 ASSERT(PHYSADDRHI(pa) == 0);
848 if ((di->ddoffsetlow == 0)
849 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
850 if (direction == DMA_TX)
851 W_REG(di->osh, &di->d32txregs->addr,
852 (PHYSADDRLO(pa) + di->ddoffsetlow));
854 W_REG(di->osh, &di->d32rxregs->addr,
855 (PHYSADDRLO(pa) + di->ddoffsetlow));
857 /* dma32 address extension */
861 /* shift the high bit(s) from pa to ae */
862 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
863 PCI32ADDR_HIGH_SHIFT;
864 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
866 if (direction == DMA_TX) {
867 W_REG(di->osh, &di->d32txregs->addr,
868 (PHYSADDRLO(pa) + di->ddoffsetlow));
869 SET_REG(di->osh, &di->d32txregs->control, XC_AE,
872 W_REG(di->osh, &di->d32rxregs->addr,
873 (PHYSADDRLO(pa) + di->ddoffsetlow));
874 SET_REG(di->osh, &di->d32rxregs->control, RC_AE,
882 static void _dma_fifoloopbackenable(dma_info_t *di)
884 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
886 if (DMA64_ENAB(di) && DMA64_MODE(di))
887 OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
888 else if (DMA32_ENAB(di))
889 OR_REG(di->osh, &di->d32txregs->control, XC_LE);
894 static void _dma_rxinit(dma_info_t *di)
896 DMA_TRACE(("%s: dma_rxinit\n", di->name));
901 di->rxin = di->rxout = 0;
903 /* clear rx descriptor ring */
904 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
905 BZERO_SM((void *)di->rxd64,
906 (di->nrxd * sizeof(dma64dd_t)));
908 /* DMA engine with out alignment requirement requires table to be inited
909 * before enabling the engine
911 if (!di->aligndesc_4k)
912 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
916 if (di->aligndesc_4k)
917 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
918 } else if (DMA32_ENAB(di)) {
919 BZERO_SM((void *)di->rxd32,
920 (di->nrxd * sizeof(dma32dd_t)));
922 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
927 static void _dma_rxenable(dma_info_t *di)
929 uint dmactrlflags = di->hnddma.dmactrlflags;
931 DMA_TRACE(("%s: dma_rxenable\n", di->name));
933 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
935 (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
938 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
939 control |= D64_RC_PD;
941 if (dmactrlflags & DMA_CTRL_ROC)
942 control |= D64_RC_OC;
944 W_REG(di->osh, &di->d64rxregs->control,
945 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
946 } else if (DMA32_ENAB(di)) {
948 (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE;
950 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
953 if (dmactrlflags & DMA_CTRL_ROC)
956 W_REG(di->osh, &di->d32rxregs->control,
957 ((di->rxoffset << RC_RO_SHIFT) | control));
963 _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
965 /* the normal values fit into 16 bits */
966 *rxoffset = (u16) di->rxoffset;
967 *rxbufsize = (u16) di->rxbufsize;
970 /* !! rx entry routine
971 * returns a pointer to the next frame received, or NULL if there are no more
972 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
974 * otherwise, it's treated as giant pkt and will be tossed.
975 * The DMA scattering starts with normal DMA header, followed by first buffer data.
976 * After it reaches the max size of buffer, the data continues in next DMA descriptor
977 * buffer WITHOUT DMA header
979 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
981 void *p, *head, *tail;
987 head = _dma_getnextrxp(di, false);
991 len = ltoh16(*(u16 *) (PKTDATA(head)));
992 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
994 #if defined(__mips__)
996 while (!(len = *(u16 *) OSL_UNCACHED(PKTDATA(head))))
999 *(u16 *) PKTDATA(head) = htol16((u16) len);
1001 #endif /* defined(__mips__) */
1003 /* set actual length */
1004 pkt_len = min((di->rxoffset + len), di->rxbufsize);
1005 PKTSETLEN(head, pkt_len);
1006 resid = len - (di->rxbufsize - di->rxoffset);
1008 /* check for single or multi-buffer rx */
1011 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
1012 PKTSETNEXT(tail, p);
1013 pkt_len = min(resid, (int)di->rxbufsize);
1014 PKTSETLEN(p, pkt_len);
1017 resid -= di->rxbufsize;
1024 cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ?
1025 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1027 di->rcvptrbase) & D64_RS0_CD_MASK,
1028 dma64dd_t) : B2I(R_REG(di->osh,
1030 status) & RS_CD_MASK,
1032 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1033 di->rxin, di->rxout, cur));
1037 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
1038 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
1040 PKTFREE(di->osh, head, false);
1041 di->hnddma.rxgiants++;
1049 /* post receive buffers
1050 * return false is refill failed completely and ring is empty
1051 * this will stall the rx dma and user might want to call rxfill again asap
1052 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1054 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
1062 uint extra_offset = 0;
1068 * Determine how many receive buffers we're lacking
1069 * from the full complement, allocate, initialize,
1070 * and post them, then update the chip rx lastdscr.
1076 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
1078 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
1080 if (di->rxbufsize > BCMEXTRAHDROOM)
1081 extra_offset = di->rxextrahdrroom;
1083 for (i = 0; i < n; i++) {
1084 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1085 size to be allocated
1088 p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
1091 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1094 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1095 if (dma64_rxidle(di)) {
1096 DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name));
1099 } else if (DMA32_ENAB(di)) {
1100 if (dma32_rxidle(di)) {
1101 DMA_ERROR(("%s: rxfill32: ring is empty !\n", di->name));
1107 di->hnddma.rxnobuf++;
1110 /* reserve an extra headroom, if applicable */
1112 PKTPULL(p, extra_offset);
1114 /* Do a cached write instead of uncached write since DMA_MAP
1115 * will flush the cache.
1117 *(u32 *) (PKTDATA(p)) = 0;
1120 bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t));
1122 pa = DMA_MAP(di->osh, PKTDATA(p),
1123 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1125 ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
1127 /* save the free packet pointer */
1128 ASSERT(di->rxp[rxout] == NULL);
1131 /* reset flags for each descriptor */
1133 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1134 if (rxout == (di->nrxd - 1))
1135 flags = D64_CTRL1_EOT;
1137 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1139 } else if (DMA32_ENAB(di)) {
1140 if (rxout == (di->nrxd - 1))
1143 ASSERT(PHYSADDRHI(pa) == 0);
1144 dma32_dd_upd(di, di->rxd32, pa, rxout, &flags,
1148 rxout = NEXTRXD(rxout);
1153 /* update the chip lastdscr pointer */
1154 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1155 W_REG(di->osh, &di->d64rxregs->ptr,
1156 di->rcvptrbase + I2B(rxout, dma64dd_t));
1157 } else if (DMA32_ENAB(di)) {
1158 W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
1165 /* like getnexttxp but no reclaim */
1166 static void *_dma_peeknexttxp(dma_info_t *di)
1173 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1175 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1176 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1178 } else if (DMA32_ENAB(di)) {
1180 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1185 for (i = di->txin; i != end; i = NEXTTXD(i))
1192 /* like getnextrxp but not take off the ring */
1193 static void *_dma_peeknextrxp(dma_info_t *di)
1200 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1202 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1203 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
1205 } else if (DMA32_ENAB(di)) {
1207 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1212 for (i = di->rxin; i != end; i = NEXTRXD(i))
1219 static void _dma_rxreclaim(dma_info_t *di)
1223 /* "unused local" warning suppression for OSLs that
1224 * define PKTFREE() without using the di->osh arg
1228 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1230 while ((p = _dma_getnextrxp(di, true)))
1231 PKTFREE(di->osh, p, false);
1234 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
1239 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1240 return dma64_getnextrxp(di, forceall);
1241 } else if (DMA32_ENAB(di)) {
1242 return dma32_getnextrxp(di, forceall);
1247 static void _dma_txblock(dma_info_t *di)
1249 di->hnddma.txavail = 0;
1252 static void _dma_txunblock(dma_info_t *di)
1254 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1257 static uint _dma_txactive(dma_info_t *di)
1259 return NTXDACTIVE(di->txin, di->txout);
1262 static uint _dma_txpending(dma_info_t *di)
1266 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1268 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1269 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1271 } else if (DMA32_ENAB(di)) {
1273 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1278 return NTXDACTIVE(curr, di->txout);
1281 static uint _dma_txcommitted(dma_info_t *di)
1284 uint txin = di->txin;
1286 if (txin == di->txout)
1289 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1290 ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
1291 } else if (DMA32_ENAB(di)) {
1292 ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t);
1296 return NTXDACTIVE(di->txin, ptr);
1299 static uint _dma_rxactive(dma_info_t *di)
1301 return NRXDACTIVE(di->rxin, di->rxout);
1304 static void _dma_counterreset(dma_info_t *di)
1306 /* reset all software counter */
1307 di->hnddma.rxgiants = 0;
1308 di->hnddma.rxnobuf = 0;
1309 di->hnddma.txnobuf = 0;
1312 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1314 uint dmactrlflags = di->hnddma.dmactrlflags;
1317 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1321 ASSERT((flags & ~mask) == 0);
1323 dmactrlflags &= ~mask;
1324 dmactrlflags |= flags;
1326 /* If trying to enable parity, check if parity is actually supported */
1327 if (dmactrlflags & DMA_CTRL_PEN) {
1330 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1331 control = R_REG(di->osh, &di->d64txregs->control);
1332 W_REG(di->osh, &di->d64txregs->control,
1333 control | D64_XC_PD);
1334 if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) {
1335 /* We *can* disable it so it is supported,
1336 * restore control register
1338 W_REG(di->osh, &di->d64txregs->control,
1341 /* Not supported, don't allow it to be enabled */
1342 dmactrlflags &= ~DMA_CTRL_PEN;
1344 } else if (DMA32_ENAB(di)) {
1345 control = R_REG(di->osh, &di->d32txregs->control);
1346 W_REG(di->osh, &di->d32txregs->control,
1348 if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) {
1349 W_REG(di->osh, &di->d32txregs->control,
1352 /* Not supported, don't allow it to be enabled */
1353 dmactrlflags &= ~DMA_CTRL_PEN;
1359 di->hnddma.dmactrlflags = dmactrlflags;
1361 return dmactrlflags;
1364 /* get the address of the var in order to change later */
1365 static unsigned long _dma_getvar(dma_info_t *di, const char *name)
1367 if (!strcmp(name, "&txavail"))
1368 return (unsigned long)&(di->hnddma.txavail);
1375 void dma_txpioloopback(osl_t *osh, dma32regs_t *regs)
1377 OR_REG(osh, ®s->control, XC_LE);
1381 u8 dma_align_sizetobits(uint size)
1385 ASSERT(!(size & (size - 1)));
1386 while (size >>= 1) {
1392 /* This function ensures that the DMA descriptor ring will not get allocated
1393 * across Page boundary. If the allocation is done across the page boundary
1394 * at the first time, then it is freed and the allocation is done at
1395 * descriptor ring size aligned location. This will ensure that the ring will
1396 * not cross page boundary
1398 static void *dma_ringalloc(osl_t *osh, u32 boundary, uint size,
1399 u16 *alignbits, uint *alloced,
1400 dmaaddr_t *descpa, osldma_t **dmah)
1404 u32 alignbytes = 1 << *alignbits;
1406 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa,
1411 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
1412 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1414 *alignbits = dma_align_sizetobits(size);
1415 DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah);
1416 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced,
1422 /* 32-bit DMA functions */
1424 static void dma32_txinit(dma_info_t *di)
1426 u32 control = XC_XE;
1428 DMA_TRACE(("%s: dma_txinit\n", di->name));
1433 di->txin = di->txout = 0;
1434 di->hnddma.txavail = di->ntxd - 1;
1436 /* clear tx descriptor ring */
1437 BZERO_SM((void *)di->txd32, (di->ntxd * sizeof(dma32dd_t)));
1439 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1441 W_REG(di->osh, &di->d32txregs->control, control);
1442 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1445 static bool dma32_txenabled(dma_info_t *di)
1449 /* If the chip is dead, it is not enabled :-) */
1450 xc = R_REG(di->osh, &di->d32txregs->control);
1451 return (xc != 0xffffffff) && (xc & XC_XE);
1454 static void dma32_txsuspend(dma_info_t *di)
1456 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1461 OR_REG(di->osh, &di->d32txregs->control, XC_SE);
1464 static void dma32_txresume(dma_info_t *di)
1466 DMA_TRACE(("%s: dma_txresume\n", di->name));
1471 AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
1474 static bool dma32_txsuspended(dma_info_t *di)
1476 return (di->ntxd == 0)
1477 || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1480 static void dma32_txreclaim(dma_info_t *di, txd_range_t range)
1484 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1485 (range == HNDDMA_RANGE_ALL) ? "all" :
1487 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1490 if (di->txin == di->txout)
1493 while ((p = dma32_getnexttxp(di, range)))
1494 PKTFREE(di->osh, p, true);
1497 static bool dma32_txstopped(dma_info_t *di)
1499 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1503 static bool dma32_rxstopped(dma_info_t *di)
1505 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1509 static bool dma32_alloc(dma_info_t *di, uint direction)
1518 ddlen = sizeof(dma32dd_t);
1520 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1523 align_bits = di->dmadesc_align;
1524 align = (1 << align_bits);
1526 if (direction == DMA_TX) {
1527 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1528 &alloced, &di->txdpaorig, &di->tx_dmah);
1530 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1534 PHYSADDRHISET(di->txdpa, 0);
1535 ASSERT(PHYSADDRHI(di->txdpaorig) == 0);
1536 di->txd32 = (dma32dd_t *) roundup((unsigned long)va, align);
1538 (uint) ((s8 *)di->txd32 - (s8 *) va);
1540 PHYSADDRLOSET(di->txdpa,
1541 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1542 /* Make sure that alignment didn't overflow */
1543 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1545 di->txdalloc = alloced;
1546 ASSERT(IS_ALIGNED((unsigned long)di->txd32, align));
1548 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1549 &alloced, &di->rxdpaorig, &di->rx_dmah);
1551 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1555 PHYSADDRHISET(di->rxdpa, 0);
1556 ASSERT(PHYSADDRHI(di->rxdpaorig) == 0);
1557 di->rxd32 = (dma32dd_t *) roundup((unsigned long)va, align);
1559 (uint) ((s8 *)di->rxd32 - (s8 *) va);
1561 PHYSADDRLOSET(di->rxdpa,
1562 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1563 /* Make sure that alignment didn't overflow */
1564 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1565 di->rxdalloc = alloced;
1566 ASSERT(IS_ALIGNED((unsigned long)di->rxd32, align));
1572 static bool dma32_txreset(dma_info_t *di)
1579 /* suspend tx DMA first */
1580 W_REG(di->osh, &di->d32txregs->control, XC_SE);
1582 (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
1583 != XS_XS_DISABLED) && (status != XS_XS_IDLE)
1584 && (status != XS_XS_STOPPED), (10000));
1586 W_REG(di->osh, &di->d32txregs->control, 0);
1587 SPINWAIT(((status = (R_REG(di->osh,
1588 &di->d32txregs->status) & XS_XS_MASK)) !=
1589 XS_XS_DISABLED), 10000);
1591 /* wait for the last transaction to complete */
1594 return status == XS_XS_DISABLED;
1597 static bool dma32_rxidle(dma_info_t *di)
1599 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1604 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1605 R_REG(di->osh, &di->d32rxregs->ptr));
1608 static bool dma32_rxreset(dma_info_t *di)
1615 W_REG(di->osh, &di->d32rxregs->control, 0);
1616 SPINWAIT(((status = (R_REG(di->osh,
1617 &di->d32rxregs->status) & RS_RS_MASK)) !=
1618 RS_RS_DISABLED), 10000);
1620 return status == RS_RS_DISABLED;
1623 static bool dma32_rxenabled(dma_info_t *di)
1627 rc = R_REG(di->osh, &di->d32rxregs->control);
1628 return (rc != 0xffffffff) && (rc & RC_RE);
1631 static bool dma32_txsuspendedidle(dma_info_t *di)
1636 if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
1639 if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1643 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1647 /* !! tx entry routine
1648 * supports full 32bit dma engine buffer addressing so
1649 * dma buffers can cross 4 Kbyte page boundaries.
1651 * WARNING: call must check the return value for error.
1652 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1654 static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
1657 unsigned char *data;
1663 DMA_TRACE(("%s: dma_txfast\n", di->name));
1668 * Walk the chain of packet buffers
1669 * allocating and initializing transmit descriptor entries.
1671 for (p = p0; p; p = next) {
1673 hnddma_seg_map_t *map;
1678 len += PKTDMAPAD(di->osh, p);
1682 /* return nonzero if out of tx descriptors */
1683 if (NEXTTXD(txout) == di->txin)
1690 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
1692 /* get physical address of buffer start */
1693 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
1694 &di->txp_dmah[txout]);
1696 if (DMASGLIST_ENAB) {
1697 map = &di->txp_dmah[txout];
1699 /* See if all the segments can be accounted for */
1701 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1709 for (j = 1; j <= nsegs; j++) {
1711 if (p == p0 && j == 1)
1714 /* With a DMA segment list, Descriptor table is filled
1715 * using the segment list instead of looping over
1716 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1717 * end of segment list is reached.
1719 if ((!DMASGLIST_ENAB && next == NULL) ||
1720 (DMASGLIST_ENAB && j == nsegs))
1721 flags |= (CTRL_IOC | CTRL_EOF);
1722 if (txout == (di->ntxd - 1))
1725 if (DMASGLIST_ENAB) {
1726 len = map->segs[j - 1].length;
1727 pa = map->segs[j - 1].addr;
1729 ASSERT(PHYSADDRHI(pa) == 0);
1731 dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
1732 ASSERT(di->txp[txout] == NULL);
1734 txout = NEXTTXD(txout);
1737 /* See above. No need to loop over individual buffers */
1742 /* if last txd eof not set, fix it */
1743 if (!(flags & CTRL_EOF))
1744 W_SM(&di->txd32[PREVTXD(txout)].ctrl,
1745 BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
1747 /* save the packet */
1748 di->txp[PREVTXD(txout)] = p0;
1750 /* bump the tx descriptor index */
1755 W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
1757 /* tx flow control */
1758 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1763 DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
1764 PKTFREE(di->osh, p0, true);
1765 di->hnddma.txavail = 0;
1766 di->hnddma.txnobuf++;
1771 * Reclaim next completed txd (txds if using chained buffers) in the range
1772 * specified and return associated packet.
1773 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1774 * transmitted as noted by the hardware "CurrDescr" pointer.
1775 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1776 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1777 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1778 * return associated packet regardless of the value of hardware pointers.
1780 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range)
1786 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1787 (range == HNDDMA_RANGE_ALL) ? "all" :
1789 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1798 if (range == HNDDMA_RANGE_ALL)
1801 dma32regs_t *dregs = di->d32txregs;
1804 (u16) B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK,
1807 if (range == HNDDMA_RANGE_TRANSFERED) {
1809 (u16) ((R_REG(di->osh, &dregs->status) &
1810 XS_AD_MASK) >> XS_AD_SHIFT);
1811 active_desc = (u16) B2I(active_desc, dma32dd_t);
1812 if (end != active_desc)
1813 end = PREVTXD(active_desc);
1817 if ((start == 0) && (end > di->txout))
1820 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1822 hnddma_seg_map_t *map = NULL;
1823 uint size, j, nsegs;
1826 (BUS_SWAP32(R_SM(&di->txd32[i].addr)) -
1827 di->dataoffsetlow));
1828 PHYSADDRHISET(pa, 0);
1830 if (DMASGLIST_ENAB) {
1831 map = &di->txp_dmah[i];
1832 size = map->origsize;
1836 (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) &
1841 for (j = nsegs; j > 0; j--) {
1842 W_SM(&di->txd32[i].addr, 0xdeadbeef);
1850 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
1855 /* tx flow control */
1856 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1861 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1865 static void *dma32_getnextrxp(dma_info_t *di, bool forceall)
1870 /* if forcing, dma engine must be disabled */
1871 ASSERT(!forceall || !dma32_rxenabled(di));
1875 /* return if no packets posted */
1880 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t);
1882 /* ignore curr if forceall */
1883 if (!forceall && (i == curr))
1886 /* get the packet pointer that corresponds to the rx descriptor */
1892 (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) -
1893 di->dataoffsetlow));
1894 PHYSADDRHISET(pa, 0);
1896 /* clear this packet from the descriptor ring */
1897 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1899 W_SM(&di->rxd32[i].addr, 0xdeadbeef);
1901 di->rxin = NEXTRXD(i);
1907 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1909 static void dma32_txrotate(dma_info_t *di)
1918 ASSERT(dma32_txsuspendedidle(di));
1920 nactive = _dma_txactive(di);
1922 (((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK)
1923 >> XS_AD_SHIFT), dma32dd_t));
1924 rot = TXD(ad - di->txin);
1926 ASSERT(rot < di->ntxd);
1928 /* full-ring case is a lot harder - don't worry about this */
1929 if (rot >= (di->ntxd - nactive)) {
1930 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1935 last = PREVTXD(di->txout);
1937 /* move entries starting at last and moving backwards to first */
1938 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1939 new = TXD(old + rot);
1942 * Move the tx dma descriptor.
1943 * EOT is set only in the last entry in the ring.
1945 w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
1946 if (new == (di->ntxd - 1))
1948 W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
1949 W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
1951 /* zap the old tx dma descriptor address field */
1952 W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
1954 /* move the corresponding txp[] entry */
1955 ASSERT(di->txp[new] == NULL);
1956 di->txp[new] = di->txp[old];
1958 /* Move the segment map as well */
1959 if (DMASGLIST_ENAB) {
1960 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
1961 sizeof(hnddma_seg_map_t));
1962 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
1965 di->txp[old] = NULL;
1968 /* update txin and txout */
1970 di->txout = TXD(di->txout + rot);
1971 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1974 W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
1977 /* 64-bit DMA functions */
1979 static void dma64_txinit(dma_info_t *di)
1981 u32 control = D64_XC_XE;
1983 DMA_TRACE(("%s: dma_txinit\n", di->name));
1988 di->txin = di->txout = 0;
1989 di->hnddma.txavail = di->ntxd - 1;
1991 /* clear tx descriptor ring */
1992 BZERO_SM((void *)di->txd64, (di->ntxd * sizeof(dma64dd_t)));
1994 /* DMA engine with out alignment requirement requires table to be inited
1995 * before enabling the engine
1997 if (!di->aligndesc_4k)
1998 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2000 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
2001 control |= D64_XC_PD;
2002 OR_REG(di->osh, &di->d64txregs->control, control);
2004 /* DMA engine with alignment requirement requires table to be inited
2005 * before enabling the engine
2007 if (di->aligndesc_4k)
2008 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2011 static bool dma64_txenabled(dma_info_t *di)
2015 /* If the chip is dead, it is not enabled :-) */
2016 xc = R_REG(di->osh, &di->d64txregs->control);
2017 return (xc != 0xffffffff) && (xc & D64_XC_XE);
2020 static void dma64_txsuspend(dma_info_t *di)
2022 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
2027 OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2030 static void dma64_txresume(dma_info_t *di)
2032 DMA_TRACE(("%s: dma_txresume\n", di->name));
2037 AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
2040 static bool dma64_txsuspended(dma_info_t *di)
2042 return (di->ntxd == 0) ||
2043 ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) ==
2047 static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
2051 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
2052 (range == HNDDMA_RANGE_ALL) ? "all" :
2054 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2057 if (di->txin == di->txout)
2060 while ((p = dma64_getnexttxp(di, range))) {
2061 /* For unframed data, we don't have any packets to free */
2062 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
2063 PKTFREE(di->osh, p, true);
2067 static bool dma64_txstopped(dma_info_t *di)
2069 return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2070 D64_XS0_XS_STOPPED);
2073 static bool dma64_rxstopped(dma_info_t *di)
2075 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
2076 D64_RS0_RS_STOPPED);
2079 static bool dma64_alloc(dma_info_t *di, uint direction)
2088 ddlen = sizeof(dma64dd_t);
2090 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
2091 align_bits = di->dmadesc_align;
2092 align = (1 << align_bits);
2094 if (direction == DMA_TX) {
2095 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2096 &alloced, &di->txdpaorig, &di->tx_dmah);
2098 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
2101 align = (1 << align_bits);
2102 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
2103 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
2104 PHYSADDRLOSET(di->txdpa,
2105 PHYSADDRLO(di->txdpaorig) + di->txdalign);
2106 /* Make sure that alignment didn't overflow */
2107 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
2109 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
2110 di->txdalloc = alloced;
2111 ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
2113 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2114 &alloced, &di->rxdpaorig, &di->rx_dmah);
2116 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
2119 align = (1 << align_bits);
2120 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
2121 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
2122 PHYSADDRLOSET(di->rxdpa,
2123 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
2124 /* Make sure that alignment didn't overflow */
2125 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
2127 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
2128 di->rxdalloc = alloced;
2129 ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align));
2135 static bool dma64_txreset(dma_info_t *di)
2142 /* suspend tx DMA first */
2143 W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2145 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2146 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
2147 && (status != D64_XS0_XS_STOPPED), 10000);
2149 W_REG(di->osh, &di->d64txregs->control, 0);
2151 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2152 != D64_XS0_XS_DISABLED), 10000);
2154 /* wait for the last transaction to complete */
2157 return status == D64_XS0_XS_DISABLED;
2160 static bool dma64_rxidle(dma_info_t *di)
2162 DMA_TRACE(("%s: dma_rxidle\n", di->name));
2167 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
2168 (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK));
2171 static bool dma64_rxreset(dma_info_t *di)
2178 W_REG(di->osh, &di->d64rxregs->control, 0);
2180 (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK))
2181 != D64_RS0_RS_DISABLED), 10000);
2183 return status == D64_RS0_RS_DISABLED;
2186 static bool dma64_rxenabled(dma_info_t *di)
2190 rc = R_REG(di->osh, &di->d64rxregs->control);
2191 return (rc != 0xffffffff) && (rc & D64_RC_RE);
2194 static bool dma64_txsuspendedidle(dma_info_t *di)
2200 if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
2203 if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2210 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2211 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2212 * If DMA is idle, we return NULL.
2214 static void *dma64_getpos(dma_info_t *di, bool direction)
2220 if (direction == DMA_TX) {
2222 R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK;
2223 idle = !NTXDACTIVE(di->txin, di->txout);
2224 va = di->txp[B2I(cd_offset, dma64dd_t)];
2227 R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK;
2228 idle = !NRXDACTIVE(di->rxin, di->rxout);
2229 va = di->rxp[B2I(cd_offset, dma64dd_t)];
2232 /* If DMA is IDLE, return NULL */
2234 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
2241 /* TX of unframed data
2243 * Adds a DMA ring descriptor for the data pointed to by "buf".
2244 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2245 * that take a pointer to a "packet"
2246 * Each call to this is results in a single descriptor being added for "len" bytes of
2247 * data starting at "buf", it doesn't handle chained buffers.
2249 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
2253 dmaaddr_t pa; /* phys addr */
2257 /* return nonzero if out of tx descriptors */
2258 if (NEXTTXD(txout) == di->txin)
2264 pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]);
2266 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
2268 if (txout == (di->ntxd - 1))
2269 flags |= D64_CTRL1_EOT;
2271 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2272 ASSERT(di->txp[txout] == NULL);
2274 /* save the buffer pointer - used by dma_getpos */
2275 di->txp[txout] = buf;
2277 txout = NEXTTXD(txout);
2278 /* bump the tx descriptor index */
2283 W_REG(di->osh, &di->d64txregs->ptr,
2284 di->xmtptrbase + I2B(txout, dma64dd_t));
2287 /* tx flow control */
2288 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2293 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
2294 di->hnddma.txavail = 0;
2295 di->hnddma.txnobuf++;
2299 /* !! tx entry routine
2300 * WARNING: call must check the return value for error.
2301 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2303 static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
2306 unsigned char *data;
2312 DMA_TRACE(("%s: dma_txfast\n", di->name));
2317 * Walk the chain of packet buffers
2318 * allocating and initializing transmit descriptor entries.
2320 for (p = p0; p; p = next) {
2322 hnddma_seg_map_t *map;
2327 len += PKTDMAPAD(di->osh, p);
2328 #endif /* BCM_DMAPAD */
2331 /* return nonzero if out of tx descriptors */
2332 if (NEXTTXD(txout) == di->txin)
2338 /* get physical address of buffer start */
2340 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
2342 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
2343 &di->txp_dmah[txout]);
2345 if (DMASGLIST_ENAB) {
2346 map = &di->txp_dmah[txout];
2348 /* See if all the segments can be accounted for */
2350 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
2358 for (j = 1; j <= nsegs; j++) {
2360 if (p == p0 && j == 1)
2361 flags |= D64_CTRL1_SOF;
2363 /* With a DMA segment list, Descriptor table is filled
2364 * using the segment list instead of looping over
2365 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2366 * end of segment list is reached.
2368 if ((!DMASGLIST_ENAB && next == NULL) ||
2369 (DMASGLIST_ENAB && j == nsegs))
2370 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2371 if (txout == (di->ntxd - 1))
2372 flags |= D64_CTRL1_EOT;
2374 if (DMASGLIST_ENAB) {
2375 len = map->segs[j - 1].length;
2376 pa = map->segs[j - 1].addr;
2378 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2379 ASSERT(di->txp[txout] == NULL);
2381 txout = NEXTTXD(txout);
2384 /* See above. No need to loop over individual buffers */
2389 /* if last txd eof not set, fix it */
2390 if (!(flags & D64_CTRL1_EOF))
2391 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
2392 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2394 /* save the packet */
2395 di->txp[PREVTXD(txout)] = p0;
2397 /* bump the tx descriptor index */
2402 W_REG(di->osh, &di->d64txregs->ptr,
2403 di->xmtptrbase + I2B(txout, dma64dd_t));
2405 /* tx flow control */
2406 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2411 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
2412 PKTFREE(di->osh, p0, true);
2413 di->hnddma.txavail = 0;
2414 di->hnddma.txnobuf++;
2419 * Reclaim next completed txd (txds if using chained buffers) in the range
2420 * specified and return associated packet.
2421 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2422 * transmitted as noted by the hardware "CurrDescr" pointer.
2423 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2424 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2425 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2426 * return associated packet regardless of the value of hardware pointers.
2428 static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
2434 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
2435 (range == HNDDMA_RANGE_ALL) ? "all" :
2437 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2446 if (range == HNDDMA_RANGE_ALL)
2449 dma64regs_t *dregs = di->d64txregs;
2453 (((R_REG(di->osh, &dregs->status0) &
2455 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
2457 if (range == HNDDMA_RANGE_TRANSFERED) {
2459 (u16) (R_REG(di->osh, &dregs->status1) &
2462 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
2463 active_desc = B2I(active_desc, dma64dd_t);
2464 if (end != active_desc)
2465 end = PREVTXD(active_desc);
2469 if ((start == 0) && (end > di->txout))
2472 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
2474 hnddma_seg_map_t *map = NULL;
2475 uint size, j, nsegs;
2478 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
2479 di->dataoffsetlow));
2481 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
2482 di->dataoffsethigh));
2484 if (DMASGLIST_ENAB) {
2485 map = &di->txp_dmah[i];
2486 size = map->origsize;
2490 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
2495 for (j = nsegs; j > 0; j--) {
2496 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
2497 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
2505 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
2510 /* tx flow control */
2511 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2516 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
2520 static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
2526 /* if forcing, dma engine must be disabled */
2527 ASSERT(!forceall || !dma64_rxenabled(di));
2531 /* return if no packets posted */
2536 B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) -
2537 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
2539 /* ignore curr if forceall */
2540 if (!forceall && (i == curr))
2543 /* get the packet pointer that corresponds to the rx descriptor */
2549 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
2550 di->dataoffsetlow));
2552 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
2553 di->dataoffsethigh));
2555 /* clear this packet from the descriptor ring */
2556 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2558 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
2559 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
2561 di->rxin = NEXTRXD(i);
2566 static bool _dma64_addrext(osl_t *osh, dma64regs_t * dma64regs)
2569 OR_REG(osh, &dma64regs->control, D64_XC_AE);
2570 w = R_REG(osh, &dma64regs->control);
2571 AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
2572 return (w & D64_XC_AE) == D64_XC_AE;
2576 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2578 static void dma64_txrotate(dma_info_t *di)
2587 ASSERT(dma64_txsuspendedidle(di));
2589 nactive = _dma_txactive(di);
2591 ((((R_REG(di->osh, &di->d64txregs->status1) &
2593 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
2594 rot = TXD(ad - di->txin);
2596 ASSERT(rot < di->ntxd);
2598 /* full-ring case is a lot harder - don't worry about this */
2599 if (rot >= (di->ntxd - nactive)) {
2600 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
2605 last = PREVTXD(di->txout);
2607 /* move entries starting at last and moving backwards to first */
2608 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
2609 new = TXD(old + rot);
2612 * Move the tx dma descriptor.
2613 * EOT is set only in the last entry in the ring.
2615 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2616 if (new == (di->ntxd - 1))
2618 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
2620 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
2621 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
2623 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
2624 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
2626 /* zap the old tx dma descriptor address field */
2627 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
2628 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
2630 /* move the corresponding txp[] entry */
2631 ASSERT(di->txp[new] == NULL);
2632 di->txp[new] = di->txp[old];
2635 if (DMASGLIST_ENAB) {
2636 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
2637 sizeof(hnddma_seg_map_t));
2638 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
2641 di->txp[old] = NULL;
2644 /* update txin and txout */
2646 di->txout = TXD(di->txout + rot);
2647 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2650 W_REG(di->osh, &di->d64txregs->ptr,
2651 di->xmtptrbase + I2B(di->txout, dma64dd_t));
2654 uint dma_addrwidth(si_t *sih, void *dmaregs)
2656 dma32regs_t *dma32regs;
2661 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2662 /* DMA engine is 64-bit capable */
2663 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
2664 /* backplane are 64-bit capable */
2665 if (si_backplane64(sih))
2666 /* If bus is System Backplane or PCIE then we can access 64-bits */
2667 if ((BUSTYPE(sih->bustype) == SI_BUS) ||
2668 ((BUSTYPE(sih->bustype) == PCI_BUS) &&
2669 (sih->buscoretype == PCIE_CORE_ID)))
2670 return DMADDRWIDTH_64;
2672 /* DMA64 is always 32-bit capable, AE is always true */
2673 ASSERT(_dma64_addrext(osh, (dma64regs_t *) dmaregs));
2675 return DMADDRWIDTH_32;
2678 /* Start checking for 32-bit / 30-bit addressing */
2679 dma32regs = (dma32regs_t *) dmaregs;
2681 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2682 if ((BUSTYPE(sih->bustype) == SI_BUS) ||
2683 ((BUSTYPE(sih->bustype) == PCI_BUS)
2684 && sih->buscoretype == PCIE_CORE_ID)
2685 || (_dma32_addrext(osh, dma32regs)))
2686 return DMADDRWIDTH_32;
2689 return DMADDRWIDTH_30;