1 //------------------------------------------------------------------------------
2 // <copyright file="ar6k.c" company="Atheros">
3 // Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
6 // Permission to use, copy, modify, and/or distribute this software for any
7 // purpose with or without fee is hereby granted, provided that the above
8 // copyright notice and this permission notice appear in all copies.
10 // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 //------------------------------------------------------------------------------
20 //==============================================================================
21 // AR6K device layer that handles register level I/O
23 // Author(s): ="Atheros"
24 //==============================================================================
28 #include "hw/mbox_host_reg.h"
30 #include "../htc_debug.h"
32 #include "htc_packet.h"
35 #define MAILBOX_FOR_BLOCK_SIZE 1
37 int DevEnableInterrupts(struct ar6k_device *pDev);
38 int DevDisableInterrupts(struct ar6k_device *pDev);
40 static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev);
42 void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket)
45 HTC_PACKET_ENQUEUE(&pDev->RegisterIOList,pPacket);
49 struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev)
51 struct htc_packet *pPacket;
54 pPacket = HTC_PACKET_DEQUEUE(&pDev->RegisterIOList);
60 void DevCleanup(struct ar6k_device *pDev)
62 DevCleanupGMbox(pDev);
64 if (pDev->HifAttached) {
65 HIFDetachHTC(pDev->HIFDevice);
66 pDev->HifAttached = false;
69 DevCleanupVirtualScatterSupport(pDev);
71 if (A_IS_MUTEX_VALID(&pDev->Lock)) {
72 A_MUTEX_DELETE(&pDev->Lock);
76 int DevSetup(struct ar6k_device *pDev)
78 u32 blocksizes[AR6K_MAILBOXES];
81 HTC_CALLBACKS htcCallbacks;
85 DL_LIST_INIT(&pDev->ScatterReqHead);
86 /* initialize our free list of IO packets */
87 INIT_HTC_PACKET_QUEUE(&pDev->RegisterIOList);
88 A_MUTEX_INIT(&pDev->Lock);
90 A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS));
91 /* the device layer handles these */
92 htcCallbacks.rwCompletionHandler = DevRWCompletionHandler;
93 htcCallbacks.dsrHandler = DevDsrHandler;
94 htcCallbacks.context = pDev;
96 status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks);
102 pDev->HifAttached = true;
104 /* get the addresses for all 4 mailboxes */
105 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
106 &pDev->MailBoxInfo, sizeof(pDev->MailBoxInfo));
113 /* carve up register I/O packets (these are for ASYNC register I/O ) */
114 for (i = 0; i < AR6K_MAX_REG_IO_BUFFERS; i++) {
115 struct htc_packet *pIOPacket;
116 pIOPacket = &pDev->RegIOBuffers[i].HtcPacket;
117 SET_HTC_PACKET_INFO_RX_REFILL(pIOPacket,
119 pDev->RegIOBuffers[i].Buffer,
120 AR6K_REG_IO_BUFFER_SIZE,
122 AR6KFreeIOPacket(pDev,pIOPacket);
125 /* get the block sizes */
126 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
127 blocksizes, sizeof(blocksizes));
134 /* note: we actually get the block size of a mailbox other than 0, for SDIO the block
135 * size on mailbox 0 is artificially set to 1. So we use the block size that is set
136 * for the other 3 mailboxes */
137 pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
138 /* must be a power of 2 */
139 A_ASSERT((pDev->BlockSize & (pDev->BlockSize - 1)) == 0);
141 /* assemble mask, used for padding to a block */
142 pDev->BlockMask = pDev->BlockSize - 1;
144 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("BlockSize: %d, MailboxAddress:0x%X \n",
145 pDev->BlockSize, pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX]));
147 pDev->GetPendingEventsFunc = NULL;
148 /* see if the HIF layer implements the get pending events function */
149 HIFConfigureDevice(pDev->HIFDevice,
150 HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
151 &pDev->GetPendingEventsFunc,
152 sizeof(pDev->GetPendingEventsFunc));
154 /* assume we can process HIF interrupt events asynchronously */
155 pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
157 /* see if the HIF layer overrides this assumption */
158 HIFConfigureDevice(pDev->HIFDevice,
159 HIF_DEVICE_GET_IRQ_PROC_MODE,
160 &pDev->HifIRQProcessingMode,
161 sizeof(pDev->HifIRQProcessingMode));
163 switch (pDev->HifIRQProcessingMode) {
164 case HIF_DEVICE_IRQ_SYNC_ONLY:
165 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("HIF Interrupt processing is SYNC ONLY\n"));
166 /* see if HIF layer wants HTC to yield */
167 HIFConfigureDevice(pDev->HIFDevice,
168 HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
169 &pDev->HifIRQYieldParams,
170 sizeof(pDev->HifIRQYieldParams));
172 if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) {
173 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
174 ("HIF requests that DSR yield per %d RECV packets \n",
175 pDev->HifIRQYieldParams.RecvPacketYieldCount));
176 pDev->DSRCanYield = true;
179 case HIF_DEVICE_IRQ_ASYNC_SYNC:
180 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF Interrupt processing is ASYNC and SYNC\n"));
186 pDev->HifMaskUmaskRecvEvent = NULL;
188 /* see if the HIF layer implements the mask/unmask recv events function */
189 HIFConfigureDevice(pDev->HIFDevice,
190 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
191 &pDev->HifMaskUmaskRecvEvent,
192 sizeof(pDev->HifMaskUmaskRecvEvent));
194 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF special overrides : 0x%lX , 0x%lX\n",
195 (unsigned long)pDev->GetPendingEventsFunc, (unsigned long)pDev->HifMaskUmaskRecvEvent));
197 status = DevDisableInterrupts(pDev);
203 status = DevSetupGMbox(pDev);
208 if (pDev->HifAttached) {
209 HIFDetachHTC(pDev->HIFDevice);
210 pDev->HifAttached = false;
218 int DevEnableInterrupts(struct ar6k_device *pDev)
221 struct ar6k_irq_enable_registers regs;
225 /* Enable all the interrupts except for the internal AR6000 CPU interrupt */
226 pDev->IrqEnableRegisters.int_status_enable = INT_STATUS_ENABLE_ERROR_SET(0x01) |
227 INT_STATUS_ENABLE_CPU_SET(0x01) |
228 INT_STATUS_ENABLE_COUNTER_SET(0x01);
230 if (NULL == pDev->GetPendingEventsFunc) {
231 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
233 /* The HIF layer provided us with a pending events function which means that
234 * the detection of pending mbox messages is handled in the HIF layer.
235 * This is the case for the SPI2 interface.
236 * In the normal case we enable MBOX interrupts, for the case
237 * with HIFs that offer this mechanism, we keep these interrupts
239 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
243 /* Set up the CPU Interrupt Status Register */
244 pDev->IrqEnableRegisters.cpu_int_status_enable = CPU_INT_STATUS_ENABLE_BIT_SET(0x00);
246 /* Set up the Error Interrupt Status Register */
247 pDev->IrqEnableRegisters.error_status_enable =
248 ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) |
249 ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01);
251 /* Set up the Counter Interrupt Status Register (only for debug interrupt to catch fatal errors) */
252 pDev->IrqEnableRegisters.counter_int_status_enable =
253 COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK);
255 /* copy into our temp area */
256 memcpy(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
260 /* always synchronous */
261 status = HIFReadWrite(pDev->HIFDevice,
262 INT_STATUS_ENABLE_ADDRESS,
263 ®s.int_status_enable,
264 AR6K_IRQ_ENABLE_REGS_SIZE,
265 HIF_WR_SYNC_BYTE_INC,
269 /* Can't write it for some reason */
270 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
271 ("Failed to update interrupt control registers err: %d\n", status));
278 int DevDisableInterrupts(struct ar6k_device *pDev)
280 struct ar6k_irq_enable_registers regs;
283 /* Disable all interrupts */
284 pDev->IrqEnableRegisters.int_status_enable = 0;
285 pDev->IrqEnableRegisters.cpu_int_status_enable = 0;
286 pDev->IrqEnableRegisters.error_status_enable = 0;
287 pDev->IrqEnableRegisters.counter_int_status_enable = 0;
288 /* copy into our temp area */
289 memcpy(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
293 /* always synchronous */
294 return HIFReadWrite(pDev->HIFDevice,
295 INT_STATUS_ENABLE_ADDRESS,
296 ®s.int_status_enable,
297 AR6K_IRQ_ENABLE_REGS_SIZE,
298 HIF_WR_SYNC_BYTE_INC,
302 /* enable device interrupts */
303 int DevUnmaskInterrupts(struct ar6k_device *pDev)
305 /* for good measure, make sure interrupt are disabled before unmasking at the HIF
307 * The rationale here is that between device insertion (where we clear the interrupts the first time)
308 * and when HTC is finally ready to handle interrupts, other software can perform target "soft" resets.
309 * The AR6K interrupt enables reset back to an "enabled" state when this happens.
312 DevDisableInterrupts(pDev);
316 IntStatus = DevEnableInterrupts(pDev);
317 /* Unmask the host controller interrupts */
318 HIFUnMaskInterrupt(pDev->HIFDevice);
320 /* Unmask the host controller interrupts */
321 HIFUnMaskInterrupt(pDev->HIFDevice);
322 IntStatus = DevEnableInterrupts(pDev);
328 /* disable all device interrupts */
329 int DevMaskInterrupts(struct ar6k_device *pDev)
331 /* mask the interrupt at the HIF layer, we don't want a stray interrupt taken while
332 * we zero out our shadow registers in DevDisableInterrupts()*/
333 HIFMaskInterrupt(pDev->HIFDevice);
335 return DevDisableInterrupts(pDev);
338 /* callback when our fetch to enable/disable completes */
339 static void DevDoEnableDisableRecvAsyncHandler(void *Context, struct htc_packet *pPacket)
341 struct ar6k_device *pDev = (struct ar6k_device *)Context;
343 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDoEnableDisableRecvAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
345 if (pPacket->Status) {
346 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
347 (" Failed to disable receiver, status:%d \n", pPacket->Status));
349 /* free this IO packet */
350 AR6KFreeIOPacket(pDev,pPacket);
351 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDoEnableDisableRecvAsyncHandler \n"));
354 /* disable packet reception (used in case the host runs out of buffers)
355 * this is the "override" method when the HIF reports another methods to
356 * disable recv events */
357 static int DevDoEnableDisableRecvOverride(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
360 struct htc_packet *pIOPacket = NULL;
362 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("DevDoEnableDisableRecvOverride: Enable:%d Mode:%d\n",
363 EnableRecv,AsyncMode));
369 pIOPacket = AR6KAllocIOPacket(pDev);
371 if (NULL == pIOPacket) {
372 status = A_NO_MEMORY;
377 /* stick in our completion routine when the I/O operation completes */
378 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
379 pIOPacket->pContext = pDev;
381 /* call the HIF layer override and do this asynchronously */
382 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
383 EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
388 /* if we get here we are doing it synchronously */
389 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
390 EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
395 if (status && (pIOPacket != NULL)) {
396 AR6KFreeIOPacket(pDev,pIOPacket);
402 /* disable packet reception (used in case the host runs out of buffers)
403 * this is the "normal" method using the interrupt enable registers through
405 static int DevDoEnableDisableRecvNormal(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
408 struct htc_packet *pIOPacket = NULL;
409 struct ar6k_irq_enable_registers regs;
411 /* take the lock to protect interrupt enable shadows */
415 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
417 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
420 /* copy into our temp area */
421 memcpy(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
428 pIOPacket = AR6KAllocIOPacket(pDev);
430 if (NULL == pIOPacket) {
431 status = A_NO_MEMORY;
436 /* copy values to write to our async I/O buffer */
437 memcpy(pIOPacket->pBuffer,®s,AR6K_IRQ_ENABLE_REGS_SIZE);
439 /* stick in our completion routine when the I/O operation completes */
440 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
441 pIOPacket->pContext = pDev;
443 /* write it out asynchronously */
444 HIFReadWrite(pDev->HIFDevice,
445 INT_STATUS_ENABLE_ADDRESS,
447 AR6K_IRQ_ENABLE_REGS_SIZE,
448 HIF_WR_ASYNC_BYTE_INC,
453 /* if we get here we are doing it synchronously */
455 status = HIFReadWrite(pDev->HIFDevice,
456 INT_STATUS_ENABLE_ADDRESS,
457 ®s.int_status_enable,
458 AR6K_IRQ_ENABLE_REGS_SIZE,
459 HIF_WR_SYNC_BYTE_INC,
464 if (status && (pIOPacket != NULL)) {
465 AR6KFreeIOPacket(pDev,pIOPacket);
472 int DevStopRecv(struct ar6k_device *pDev, bool AsyncMode)
474 if (NULL == pDev->HifMaskUmaskRecvEvent) {
475 return DevDoEnableDisableRecvNormal(pDev,false,AsyncMode);
477 return DevDoEnableDisableRecvOverride(pDev,false,AsyncMode);
481 int DevEnableRecv(struct ar6k_device *pDev, bool AsyncMode)
483 if (NULL == pDev->HifMaskUmaskRecvEvent) {
484 return DevDoEnableDisableRecvNormal(pDev,true,AsyncMode);
486 return DevDoEnableDisableRecvOverride(pDev,true,AsyncMode);
490 int DevWaitForPendingRecv(struct ar6k_device *pDev,u32 TimeoutInMs,bool *pbIsRecvPending)
493 u8 host_int_status = 0x0;
496 if(TimeoutInMs < 100)
501 counter = TimeoutInMs / 100;
505 //Read the Host Interrupt Status Register
506 status = HIFReadWrite(pDev->HIFDevice,
507 HOST_INT_STATUS_ADDRESS,
510 HIF_RD_SYNC_BYTE_INC,
514 AR_DEBUG_PRINTF(ATH_LOG_ERR,("DevWaitForPendingRecv:Read HOST_INT_STATUS_ADDRESS Failed 0x%X\n",status));
518 host_int_status = !status ? (host_int_status & (1 << 0)):0;
522 *pbIsRecvPending = false;
527 *pbIsRecvPending = true;
538 void DevDumpRegisters(struct ar6k_device *pDev,
539 struct ar6k_irq_proc_registers *pIrqProcRegs,
540 struct ar6k_irq_enable_registers *pIrqEnableRegs)
543 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\n<------- Register Table -------->\n"));
545 if (pIrqProcRegs != NULL) {
546 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
547 ("Host Int Status: 0x%x\n",pIrqProcRegs->host_int_status));
548 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
549 ("CPU Int Status: 0x%x\n",pIrqProcRegs->cpu_int_status));
550 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
551 ("Error Int Status: 0x%x\n",pIrqProcRegs->error_int_status));
552 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
553 ("Counter Int Status: 0x%x\n",pIrqProcRegs->counter_int_status));
554 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
555 ("Mbox Frame: 0x%x\n",pIrqProcRegs->mbox_frame));
556 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
557 ("Rx Lookahead Valid: 0x%x\n",pIrqProcRegs->rx_lookahead_valid));
558 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
559 ("Rx Lookahead 0: 0x%x\n",pIrqProcRegs->rx_lookahead[0]));
560 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
561 ("Rx Lookahead 1: 0x%x\n",pIrqProcRegs->rx_lookahead[1]));
563 if (pDev->MailBoxInfo.GMboxAddress != 0) {
564 /* if the target supports GMBOX hardware, dump some additional state */
565 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
566 ("GMBOX Host Int Status 2: 0x%x\n",pIrqProcRegs->host_int_status2));
567 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
568 ("GMBOX RX Avail: 0x%x\n",pIrqProcRegs->gmbox_rx_avail));
569 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
570 ("GMBOX lookahead alias 0: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[0]));
571 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
572 ("GMBOX lookahead alias 1: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[1]));
577 if (pIrqEnableRegs != NULL) {
578 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
579 ("Int Status Enable: 0x%x\n",pIrqEnableRegs->int_status_enable));
580 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
581 ("Counter Int Status Enable: 0x%x\n",pIrqEnableRegs->counter_int_status_enable));
583 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("<------------------------------->\n"));
587 #define DEV_GET_VIRT_DMA_INFO(p) ((struct dev_scatter_dma_virtual_info *)((p)->HIFPrivate[0]))
589 static struct hif_scatter_req *DevAllocScatterReq(struct hif_device *Context)
591 struct dl_list *pItem;
592 struct ar6k_device *pDev = (struct ar6k_device *)Context;
594 pItem = DL_ListRemoveItemFromHead(&pDev->ScatterReqHead);
597 return A_CONTAINING_STRUCT(pItem, struct hif_scatter_req, ListLink);
602 static void DevFreeScatterReq(struct hif_device *Context, struct hif_scatter_req *pReq)
604 struct ar6k_device *pDev = (struct ar6k_device *)Context;
606 DL_ListInsertTail(&pDev->ScatterReqHead, &pReq->ListLink);
610 int DevCopyScatterListToFromDMABuffer(struct hif_scatter_req *pReq, bool FromDMA)
612 u8 *pDMABuffer = NULL;
616 pDMABuffer = pReq->pScatterBounceBuffer;
618 if (pDMABuffer == NULL) {
623 remaining = (int)pReq->TotalLength;
625 for (i = 0; i < pReq->ValidScatterEntries; i++) {
627 length = min((int)pReq->ScatterList[i].Length, remaining);
629 if (length != (int)pReq->ScatterList[i].Length) {
631 /* there is a problem with the scatter list */
636 /* from DMA buffer */
637 memcpy(pReq->ScatterList[i].pBuffer, pDMABuffer , length);
640 memcpy(pDMABuffer, pReq->ScatterList[i].pBuffer, length);
643 pDMABuffer += length;
650 static void DevReadWriteScatterAsyncHandler(void *Context, struct htc_packet *pPacket)
652 struct ar6k_device *pDev = (struct ar6k_device *)Context;
653 struct hif_scatter_req *pReq = (struct hif_scatter_req *)pPacket->pPktContext;
655 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevReadWriteScatterAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
657 pReq->CompletionStatus = pPacket->Status;
659 AR6KFreeIOPacket(pDev,pPacket);
661 pReq->CompletionRoutine(pReq);
663 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevReadWriteScatterAsyncHandler \n"));
666 static int DevReadWriteScatter(struct hif_device *Context, struct hif_scatter_req *pReq)
668 struct ar6k_device *pDev = (struct ar6k_device *)Context;
670 struct htc_packet *pIOPacket = NULL;
671 u32 request = pReq->Request;
675 if (pReq->TotalLength > AR6K_MAX_TRANSFER_SIZE_PER_SCATTER) {
676 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
677 ("Invalid length: %d \n", pReq->TotalLength));
681 if (pReq->TotalLength == 0) {
686 if (request & HIF_ASYNCHRONOUS) {
687 /* use an I/O packet to carry this request */
688 pIOPacket = AR6KAllocIOPacket(pDev);
689 if (NULL == pIOPacket) {
690 status = A_NO_MEMORY;
694 /* save the request */
695 pIOPacket->pPktContext = pReq;
696 /* stick in our completion routine when the I/O operation completes */
697 pIOPacket->Completion = DevReadWriteScatterAsyncHandler;
698 pIOPacket->pContext = pDev;
701 if (request & HIF_WRITE) {
702 /* in virtual DMA, we are issuing the requests through the legacy HIFReadWrite API
703 * this API will adjust the address automatically for the last byte to fall on the mailbox
706 /* if the address is an extended address, we can adjust the address here since the extended
707 * address will bypass the normal checks in legacy HIF layers */
708 if (pReq->Address == pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress) {
709 pReq->Address += pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize - pReq->TotalLength;
713 /* use legacy readwrite */
714 status = HIFReadWrite(pDev->HIFDevice,
716 DEV_GET_VIRT_DMA_INFO(pReq)->pVirtDmaBuffer,
719 (request & HIF_ASYNCHRONOUS) ? pIOPacket : NULL);
723 if ((status != A_PENDING) && status && (request & HIF_ASYNCHRONOUS)) {
724 if (pIOPacket != NULL) {
725 AR6KFreeIOPacket(pDev,pIOPacket);
727 pReq->CompletionStatus = status;
728 pReq->CompletionRoutine(pReq);
736 static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev)
738 struct hif_scatter_req *pReq;
741 pReq = DevAllocScatterReq((struct hif_device *)pDev);
750 /* function to set up virtual scatter support if HIF layer has not implemented the interface */
751 static int DevSetupVirtualScatterSupport(struct ar6k_device *pDev)
754 int bufferSize, sgreqSize;
756 struct dev_scatter_dma_virtual_info *pVirtualInfo;
757 struct hif_scatter_req *pReq;
759 bufferSize = sizeof(struct dev_scatter_dma_virtual_info) +
760 2 * (A_GET_CACHE_LINE_BYTES()) + AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
762 sgreqSize = sizeof(struct hif_scatter_req) +
763 (AR6K_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(struct hif_scatter_item));
765 for (i = 0; i < AR6K_SCATTER_REQS; i++) {
766 /* allocate the scatter request, buffer info and the actual virtual buffer itself */
767 pReq = (struct hif_scatter_req *)A_MALLOC(sgreqSize + bufferSize);
770 status = A_NO_MEMORY;
774 A_MEMZERO(pReq, sgreqSize);
776 /* the virtual DMA starts after the scatter request struct */
777 pVirtualInfo = (struct dev_scatter_dma_virtual_info *)((u8 *)pReq + sgreqSize);
778 A_MEMZERO(pVirtualInfo, sizeof(struct dev_scatter_dma_virtual_info));
780 pVirtualInfo->pVirtDmaBuffer = &pVirtualInfo->DataArea[0];
781 /* align buffer to cache line in case host controller can actually DMA this */
782 pVirtualInfo->pVirtDmaBuffer = A_ALIGN_TO_CACHE_LINE(pVirtualInfo->pVirtDmaBuffer);
783 /* store the structure in the private area */
784 pReq->HIFPrivate[0] = pVirtualInfo;
785 /* we emulate a DMA bounce interface */
786 pReq->ScatterMethod = HIF_SCATTER_DMA_BOUNCE;
787 pReq->pScatterBounceBuffer = pVirtualInfo->pVirtDmaBuffer;
788 /* free request to the list */
789 DevFreeScatterReq((struct hif_device *)pDev,pReq);
793 DevCleanupVirtualScatterSupport(pDev);
795 pDev->HifScatterInfo.pAllocateReqFunc = DevAllocScatterReq;
796 pDev->HifScatterInfo.pFreeReqFunc = DevFreeScatterReq;
797 pDev->HifScatterInfo.pReadWriteScatterFunc = DevReadWriteScatter;
798 if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
799 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K: SPI bus requires RX scatter limits\n"));
800 pDev->HifScatterInfo.MaxScatterEntries = AR6K_MIN_SCATTER_ENTRIES_PER_REQ;
801 pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MIN_TRANSFER_SIZE_PER_SCATTER;
803 pDev->HifScatterInfo.MaxScatterEntries = AR6K_SCATTER_ENTRIES_PER_REQ;
804 pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
806 pDev->ScatterIsVirtual = true;
812 int DevCleanupMsgBundling(struct ar6k_device *pDev)
816 DevCleanupVirtualScatterSupport(pDev);
822 int DevSetupMsgBundling(struct ar6k_device *pDev, int MaxMsgsPerTransfer)
826 if (pDev->MailBoxInfo.Flags & HIF_MBOX_FLAG_NO_BUNDLING) {
827 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HIF requires bundling disabled\n"));
831 status = HIFConfigureDevice(pDev->HIFDevice,
832 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
833 &pDev->HifScatterInfo,
834 sizeof(pDev->HifScatterInfo));
837 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
838 ("AR6K: ** HIF layer does not support scatter requests (%d) \n",status));
840 /* we can try to use a virtual DMA scatter mechanism using legacy HIFReadWrite() */
841 status = DevSetupVirtualScatterSupport(pDev);
844 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
845 ("AR6K: virtual scatter transfers enabled (max scatter items:%d: maxlen:%d) \n",
846 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
850 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
851 ("AR6K: HIF layer supports scatter requests (max scatter items:%d: maxlen:%d) \n",
852 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
856 /* for the recv path, the maximum number of bytes per recv bundle is just limited
857 * by the maximum transfer size at the HIF layer */
858 pDev->MaxRecvBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
860 if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
861 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K : SPI bus requires TX bundling disabled\n"));
862 pDev->MaxSendBundleSize = 0;
864 /* for the send path, the max transfer size is limited by the existence and size of
865 * the extended mailbox address range */
866 if (pDev->MailBoxInfo.MboxProp[0].ExtendedAddress != 0) {
867 pDev->MaxSendBundleSize = pDev->MailBoxInfo.MboxProp[0].ExtendedSize;
870 pDev->MaxSendBundleSize = AR6K_LEGACY_MAX_WRITE_LENGTH;
873 if (pDev->MaxSendBundleSize > pDev->HifScatterInfo.MaxTransferSizePerScatterReq) {
874 /* limit send bundle size to what the HIF can support for scatter requests */
875 pDev->MaxSendBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
879 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
880 ("AR6K: max recv: %d max send: %d \n",
881 DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev), DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev)));
887 int DevSubmitScatterRequest(struct ar6k_device *pDev, struct hif_scatter_req *pScatterReq, bool Read, bool Async)
893 pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BLOCK_FIX;
894 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
895 A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev));
899 /* write operation */
900 pScatterReq->Request = (Async) ? HIF_WR_ASYNC_BLOCK_INC : HIF_WR_SYNC_BLOCK_INC;
901 A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev));
902 if (pScatterReq->TotalLength > AR6K_LEGACY_MAX_WRITE_LENGTH) {
903 /* for large writes use the extended address */
904 pScatterReq->Address = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress;
905 mailboxWidth = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize;
907 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
908 mailboxWidth = AR6K_LEGACY_MAX_WRITE_LENGTH;
911 if (!pDev->ScatterIsVirtual) {
912 /* we are passing this scatter list down to the HIF layer' scatter request handler, fixup the address
913 * so that the last byte falls on the EOM, we do this for those HIFs that support the
915 pScatterReq->Address += (mailboxWidth - pScatterReq->TotalLength);
920 AR_DEBUG_PRINTF(ATH_DEBUG_RECV | ATH_DEBUG_SEND,
921 ("DevSubmitScatterRequest, Entries: %d, Total Length: %d Mbox:0x%X (mode: %s : %s)\n",
922 pScatterReq->ValidScatterEntries,
923 pScatterReq->TotalLength,
924 pScatterReq->Address,
925 Async ? "ASYNC" : "SYNC",
926 (Read) ? "RD" : "WR"));
928 status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq);
932 pScatterReq->CompletionStatus = status;
933 pScatterReq->CompletionRoutine(pScatterReq);
939 status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->ScatterIsVirtual ? pDev : pDev->HIFDevice,
942 /* in sync mode, we can touch the scatter request */
943 pScatterReq->CompletionStatus = status;
944 DEV_FINISH_SCATTER_OPERATION(pScatterReq);
946 if (status == A_PENDING) {
955 #ifdef MBOXHW_UNIT_TEST
958 /* This is a mailbox hardware unit test that must be called in a schedulable context
959 * This test is very simple, it will send a list of buffers with a counting pattern
960 * and the target will invert the data and send the message back
962 * the unit test has the following constraints:
964 * The target has at least 8 buffers of 256 bytes each. The host will send
965 * the following pattern of buffers in rapid succession :
967 * 1 buffer - 128 bytes
968 * 1 buffer - 256 bytes
969 * 1 buffer - 512 bytes
970 * 1 buffer - 1024 bytes
972 * The host will send the buffers to one mailbox and wait for buffers to be reflected
973 * back from the same mailbox. The target sends the buffers FIFO order.
974 * Once the final buffer has been received for a mailbox, the next mailbox is tested.
977 * Note: To simplifythe test , we assume that the chosen buffer sizes
978 * will fall on a nice block pad
980 * It is expected that higher-order tests will be written to stress the mailboxes using
981 * a message-based protocol (with some performance timming) that can create more
982 * randomness in the packets sent over mailboxes.
986 #define A_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1))
988 #define BUFFER_BLOCK_PAD 128
1004 #define TOTAL_BYTES (A_ROUND_UP_PWR2(BUFFER1,BUFFER_BLOCK_PAD) + \
1005 A_ROUND_UP_PWR2(BUFFER2,BUFFER_BLOCK_PAD) + \
1006 A_ROUND_UP_PWR2(BUFFER3,BUFFER_BLOCK_PAD) + \
1007 A_ROUND_UP_PWR2(BUFFER4,BUFFER_BLOCK_PAD) )
1009 #define TEST_BYTES (BUFFER1 + BUFFER2 + BUFFER3 + BUFFER4)
1011 #define TEST_CREDITS_RECV_TIMEOUT 100
1013 static u8 g_Buffer[TOTAL_BYTES];
1014 static u32 g_MailboxAddrs[AR6K_MAILBOXES];
1015 static u32 g_BlockSizes[AR6K_MAILBOXES];
1017 #define BUFFER_PROC_LIST_DEPTH 4
1019 struct buffer_proc_list {
1025 #define PUSH_BUFF_PROC_ENTRY(pList,len,pCurrpos) \
1027 (pList)->pBuffer = (pCurrpos); \
1028 (pList)->length = (len); \
1029 (pCurrpos) += (len); \
1033 /* a simple and crude way to send different "message" sizes */
1034 static void AssembleBufferList(struct buffer_proc_list *pList)
1036 u8 *pBuffer = g_Buffer;
1038 #if BUFFER_PROC_LIST_DEPTH < 4
1039 #error "Buffer processing list depth is not deep enough!!"
1042 PUSH_BUFF_PROC_ENTRY(pList,BUFFER1,pBuffer);
1043 PUSH_BUFF_PROC_ENTRY(pList,BUFFER2,pBuffer);
1044 PUSH_BUFF_PROC_ENTRY(pList,BUFFER3,pBuffer);
1045 PUSH_BUFF_PROC_ENTRY(pList,BUFFER4,pBuffer);
1049 #define FILL_ZERO true
1050 #define FILL_COUNTING false
1051 static void InitBuffers(bool Zero)
1053 u16 *pBuffer16 = (u16 *)g_Buffer;
1056 /* fill buffer with 16 bit counting pattern or zeros */
1057 for (i = 0; i < (TOTAL_BYTES / 2) ; i++) {
1059 pBuffer16[i] = (u16)i;
1067 static bool CheckOneBuffer(u16 *pBuffer16, int Length)
1071 bool success = true;
1073 /* get the starting count */
1074 startCount = pBuffer16[0];
1075 /* invert it, this is the expected value */
1076 startCount = ~startCount;
1077 /* scan the buffer and verify */
1078 for (i = 0; i < (Length / 2) ; i++,startCount++) {
1079 /* target will invert all the data */
1080 if ((u16)pBuffer16[i] != (u16)~startCount) {
1082 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Data Got:0x%X, Expecting:0x%X (offset:%d, total:%d) \n",
1083 pBuffer16[i], ((u16)~startCount), i, Length));
1084 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("0x%X 0x%X 0x%X 0x%X \n",
1085 pBuffer16[i], pBuffer16[i + 1], pBuffer16[i + 2],pBuffer16[i+3]));
1093 static bool CheckBuffers(void)
1096 bool success = true;
1097 struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
1099 /* assemble the list */
1100 AssembleBufferList(checkList);
1102 /* scan the buffers and verify */
1103 for (i = 0; i < BUFFER_PROC_LIST_DEPTH ; i++) {
1104 success = CheckOneBuffer((u16 *)checkList[i].pBuffer, checkList[i].length);
1106 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer : 0x%X, Length:%d failed verify \n",
1107 (u32)checkList[i].pBuffer, checkList[i].length));
1115 /* find the end marker for the last buffer we will be sending */
1116 static u16 GetEndMarker(void)
1119 struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
1121 /* fill up buffers with the normal counting pattern */
1122 InitBuffers(FILL_COUNTING);
1124 /* assemble the list we will be sending down */
1125 AssembleBufferList(checkList);
1126 /* point to the last 2 bytes of the last buffer */
1127 pBuffer = &(checkList[BUFFER_PROC_LIST_DEPTH - 1].pBuffer[(checkList[BUFFER_PROC_LIST_DEPTH - 1].length) - 2]);
1129 /* the last count in the last buffer is the marker */
1130 return (u16)pBuffer[0] | ((u16)pBuffer[1] << 8);
1133 #define ATH_PRINT_OUT_ZONE ATH_DEBUG_ERR
1135 /* send the ordered buffers to the target */
1136 static int SendBuffers(struct ar6k_device *pDev, int mbox)
1139 u32 request = HIF_WR_SYNC_BLOCK_INC;
1140 struct buffer_proc_list sendList[BUFFER_PROC_LIST_DEPTH];
1144 int totalwPadding = 0;
1146 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sending buffers on mailbox : %d \n",mbox));
1148 /* fill buffer with counting pattern */
1149 InitBuffers(FILL_COUNTING);
1151 /* assemble the order in which we send */
1152 AssembleBufferList(sendList);
1154 for (i = 0; i < BUFFER_PROC_LIST_DEPTH; i++) {
1156 /* we are doing block transfers, so we need to pad everything to a block size */
1157 paddedLength = (sendList[i].length + (g_BlockSizes[mbox] - 1)) &
1158 (~(g_BlockSizes[mbox] - 1));
1160 /* send each buffer synchronously */
1161 status = HIFReadWrite(pDev->HIFDevice,
1162 g_MailboxAddrs[mbox],
1163 sendList[i].pBuffer,
1170 totalBytes += sendList[i].length;
1171 totalwPadding += paddedLength;
1174 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sent %d bytes (%d padded bytes) to mailbox : %d \n",totalBytes,totalwPadding,mbox));
1179 /* poll the mailbox credit counter until we get a credit or timeout */
1180 static int GetCredits(struct ar6k_device *pDev, int mbox, int *pCredits)
1183 int timeout = TEST_CREDITS_RECV_TIMEOUT;
1189 /* Read the counter register to get credits, this auto-decrements */
1190 address = COUNT_DEC_ADDRESS + (AR6K_MAILBOXES + mbox) * 4;
1191 status = HIFReadWrite(pDev->HIFDevice, address, &credits, sizeof(credits),
1192 HIF_RD_SYNC_BYTE_FIX, NULL);
1194 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1195 ("Unable to decrement the command credit count register (mbox=%d)\n",mbox));
1207 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1208 (" Timeout reading credit registers (mbox=%d, address:0x%X) \n",mbox,address));
1213 /* delay a little, target may not be ready */
1219 *pCredits = credits;
1226 /* wait for the buffers to come back */
1227 static int RecvBuffers(struct ar6k_device *pDev, int mbox)
1230 u32 request = HIF_RD_SYNC_BLOCK_INC;
1231 struct buffer_proc_list recvList[BUFFER_PROC_LIST_DEPTH];
1237 int totalwPadding = 0;
1239 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for buffers on mailbox : %d \n",mbox));
1241 /* zero the buffers */
1242 InitBuffers(FILL_ZERO);
1244 /* assemble the order in which we should receive */
1245 AssembleBufferList(recvList);
1249 while (curBuffer < BUFFER_PROC_LIST_DEPTH) {
1251 /* get number of buffers that have been completed, this blocks
1252 * until we get at least 1 credit or it times out */
1253 status = GetCredits(pDev, mbox, &credits);
1259 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got %d messages on mailbox : %d \n",credits, mbox));
1261 /* get all the buffers that are sitting on the queue */
1262 for (i = 0; i < credits; i++) {
1263 A_ASSERT(curBuffer < BUFFER_PROC_LIST_DEPTH);
1264 /* recv the current buffer synchronously, the buffers should come back in
1265 * order... with padding applied by the target */
1266 paddedLength = (recvList[curBuffer].length + (g_BlockSizes[mbox] - 1)) &
1267 (~(g_BlockSizes[mbox] - 1));
1269 status = HIFReadWrite(pDev->HIFDevice,
1270 g_MailboxAddrs[mbox],
1271 recvList[curBuffer].pBuffer,
1276 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to read %d bytes on mailbox:%d : address:0x%X \n",
1277 recvList[curBuffer].length, mbox, g_MailboxAddrs[mbox]));
1281 totalwPadding += paddedLength;
1282 totalBytes += recvList[curBuffer].length;
1289 /* go back and get some more */
1293 if (totalBytes != TEST_BYTES) {
1296 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got all buffers on mbox:%d total recv :%d (w/Padding : %d) \n",
1297 mbox, totalBytes, totalwPadding));
1305 static int DoOneMboxHWTest(struct ar6k_device *pDev, int mbox)
1310 /* send out buffers */
1311 status = SendBuffers(pDev,mbox);
1314 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Sending buffers Failed : %d mbox:%d\n",status,mbox));
1318 /* go get them, this will block */
1319 status = RecvBuffers(pDev, mbox);
1322 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Recv buffers Failed : %d mbox:%d\n",status,mbox));
1326 /* check the returned data patterns */
1327 if (!CheckBuffers()) {
1328 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer Verify Failed : mbox:%d\n",mbox));
1333 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" Send/Recv success! mailbox : %d \n",mbox));
1340 /* here is where the test starts */
1341 int DoMboxHWTest(struct ar6k_device *pDev)
1352 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest START - \n"));
1355 /* get the addresses for all 4 mailboxes */
1356 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
1357 g_MailboxAddrs, sizeof(g_MailboxAddrs));
1364 /* get the block sizes */
1365 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
1366 g_BlockSizes, sizeof(g_BlockSizes));
1373 /* note, the HIF layer usually reports mbox 0 to have a block size of
1374 * 1, but our test wants to run in block-mode for all mailboxes, so we treat all mailboxes
1376 g_BlockSizes[0] = g_BlockSizes[1];
1377 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Block Size to use: %d \n",g_BlockSizes[0]));
1379 if (g_BlockSizes[1] > BUFFER_BLOCK_PAD) {
1380 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("%d Block size is too large for buffer pad %d\n",
1381 g_BlockSizes[1], BUFFER_BLOCK_PAD));
1385 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for target.... \n"));
1387 /* the target lets us know it is ready by giving us 1 credit on
1389 status = GetCredits(pDev, 0, &credits);
1392 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait for target ready \n"));
1396 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Target is ready ...\n"));
1398 /* read the first 4 scratch registers */
1399 status = HIFReadWrite(pDev->HIFDevice,
1403 HIF_RD_SYNC_BYTE_INC,
1407 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait get parameters \n"));
1411 numBufs = params[0];
1412 bufferSize = (int)(((u16)params[2] << 8) | (u16)params[1]);
1414 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE,
1415 ("Target parameters: bufs per mailbox:%d, buffer size:%d bytes (total space: %d, minimum required space (w/padding): %d) \n",
1416 numBufs, bufferSize, (numBufs * bufferSize), TOTAL_BYTES));
1418 if ((numBufs * bufferSize) < TOTAL_BYTES) {
1419 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Not Enough buffer space to run test! need:%d, got:%d \n",
1420 TOTAL_BYTES, (numBufs*bufferSize)));
1425 temp = GetEndMarker();
1427 status = HIFReadWrite(pDev->HIFDevice,
1428 SCRATCH_ADDRESS + 4,
1431 HIF_WR_SYNC_BYTE_INC,
1435 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write end marker \n"));
1439 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("End Marker: 0x%X \n",temp));
1441 temp = (u16)g_BlockSizes[1];
1442 /* convert to a mask */
1444 status = HIFReadWrite(pDev->HIFDevice,
1445 SCRATCH_ADDRESS + 6,
1448 HIF_WR_SYNC_BYTE_INC,
1452 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write block mask \n"));
1456 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Set Block Mask: 0x%X \n",temp));
1458 /* execute the test on each mailbox */
1459 for (i = 0; i < AR6K_MAILBOXES; i++) {
1460 status = DoOneMboxHWTest(pDev, i);
1469 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - SUCCESS! - \n"));
1471 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - FAILED! - \n"));
1473 /* don't let HTC_Start continue, the target is actually not running any HTC code */