f8607bc08929112805afffb9665fbe82cce68207
[pandora-kernel.git] / drivers / staging / ath6kl / htc2 / AR6000 / ar6k.c
1 //------------------------------------------------------------------------------
2 // <copyright file="ar6k.c" company="Atheros">
3 //    Copyright (c) 2007-2010 Atheros Corporation.  All rights reserved.
4 // 
5 //
6 // Permission to use, copy, modify, and/or distribute this software for any
7 // purpose with or without fee is hereby granted, provided that the above
8 // copyright notice and this permission notice appear in all copies.
9 //
10 // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 //
18 //
19 //------------------------------------------------------------------------------
20 //==============================================================================
21 // AR6K device layer that handles register level I/O
22 //
23 // Author(s): ="Atheros"
24 //==============================================================================
25
26 #include "a_config.h"
27 #include "athdefs.h"
28 #include "hw/mbox_host_reg.h"
29 #include "a_osapi.h"
30 #include "../htc_debug.h"
31 #include "hif.h"
32 #include "htc_packet.h"
33 #include "ar6k.h"
34
35 #define MAILBOX_FOR_BLOCK_SIZE          1
36
37 int DevEnableInterrupts(struct ar6k_device *pDev);
38 int DevDisableInterrupts(struct ar6k_device *pDev);
39
40 static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev);
41
42 void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket)
43 {
44     LOCK_AR6K(pDev);
45     HTC_PACKET_ENQUEUE(&pDev->RegisterIOList,pPacket);
46     UNLOCK_AR6K(pDev);
47 }
48
49 struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev)
50 {
51     struct htc_packet *pPacket;
52
53     LOCK_AR6K(pDev);
54     pPacket = HTC_PACKET_DEQUEUE(&pDev->RegisterIOList);
55     UNLOCK_AR6K(pDev);
56
57     return pPacket;
58 }
59
60 void DevCleanup(struct ar6k_device *pDev)
61 {
62     DevCleanupGMbox(pDev);
63
64     if (pDev->HifAttached) {
65         HIFDetachHTC(pDev->HIFDevice);
66         pDev->HifAttached = false;
67     }
68
69     DevCleanupVirtualScatterSupport(pDev);
70
71     if (A_IS_MUTEX_VALID(&pDev->Lock)) {
72         A_MUTEX_DELETE(&pDev->Lock);
73     }
74 }
75
76 int DevSetup(struct ar6k_device *pDev)
77 {
78     u32 blocksizes[AR6K_MAILBOXES];
79     int status = 0;
80     int      i;
81     HTC_CALLBACKS htcCallbacks;
82
83     do {
84
85         DL_LIST_INIT(&pDev->ScatterReqHead);
86            /* initialize our free list of IO packets */
87         INIT_HTC_PACKET_QUEUE(&pDev->RegisterIOList);
88         A_MUTEX_INIT(&pDev->Lock);
89
90         A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS));
91             /* the device layer handles these */
92         htcCallbacks.rwCompletionHandler = DevRWCompletionHandler;
93         htcCallbacks.dsrHandler = DevDsrHandler;
94         htcCallbacks.context = pDev;
95
96         status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks);
97
98         if (status) {
99             break;
100         }
101
102         pDev->HifAttached = true;
103
104             /* get the addresses for all 4 mailboxes */
105         status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
106                                     &pDev->MailBoxInfo, sizeof(pDev->MailBoxInfo));
107
108         if (status) {
109             A_ASSERT(false);
110             break;
111         }
112
113             /* carve up register I/O packets (these are for ASYNC register I/O ) */
114         for (i = 0; i < AR6K_MAX_REG_IO_BUFFERS; i++) {
115             struct htc_packet *pIOPacket;
116             pIOPacket = &pDev->RegIOBuffers[i].HtcPacket;
117             SET_HTC_PACKET_INFO_RX_REFILL(pIOPacket,
118                                           pDev,
119                                           pDev->RegIOBuffers[i].Buffer,
120                                           AR6K_REG_IO_BUFFER_SIZE,
121                                           0); /* don't care */
122             AR6KFreeIOPacket(pDev,pIOPacket);
123         }
124
125             /* get the block sizes */
126         status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
127                                     blocksizes, sizeof(blocksizes));
128
129         if (status) {
130             A_ASSERT(false);
131             break;
132         }
133
134             /* note: we actually get the block size of a mailbox other than 0, for SDIO the block
135              * size on mailbox 0 is artificially set to 1.  So we use the block size that is set
136              * for the other 3 mailboxes */
137         pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
138             /* must be a power of 2 */
139         A_ASSERT((pDev->BlockSize & (pDev->BlockSize - 1)) == 0);
140
141             /* assemble mask, used for padding to a block */
142         pDev->BlockMask = pDev->BlockSize - 1;
143
144         AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("BlockSize: %d, MailboxAddress:0x%X \n",
145                     pDev->BlockSize, pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX]));
146
147         pDev->GetPendingEventsFunc = NULL;
148             /* see if the HIF layer implements the get pending events function  */
149         HIFConfigureDevice(pDev->HIFDevice,
150                            HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
151                            &pDev->GetPendingEventsFunc,
152                            sizeof(pDev->GetPendingEventsFunc));
153
154             /* assume we can process HIF interrupt events asynchronously */
155         pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
156
157             /* see if the HIF layer overrides this assumption */
158         HIFConfigureDevice(pDev->HIFDevice,
159                            HIF_DEVICE_GET_IRQ_PROC_MODE,
160                            &pDev->HifIRQProcessingMode,
161                            sizeof(pDev->HifIRQProcessingMode));
162
163         switch (pDev->HifIRQProcessingMode) {
164             case HIF_DEVICE_IRQ_SYNC_ONLY:
165                 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("HIF Interrupt processing is SYNC ONLY\n"));
166                     /* see if HIF layer wants HTC to yield */
167                 HIFConfigureDevice(pDev->HIFDevice,
168                                    HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
169                                    &pDev->HifIRQYieldParams,
170                                    sizeof(pDev->HifIRQYieldParams));
171
172                 if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) {
173                     AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
174                         ("HIF requests that DSR yield per %d RECV packets \n",
175                         pDev->HifIRQYieldParams.RecvPacketYieldCount));
176                     pDev->DSRCanYield = true;
177                 }
178                 break;
179             case HIF_DEVICE_IRQ_ASYNC_SYNC:
180                 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF Interrupt processing is ASYNC and SYNC\n"));
181                 break;
182             default:
183                 A_ASSERT(false);
184         }
185
186         pDev->HifMaskUmaskRecvEvent = NULL;
187
188             /* see if the HIF layer implements the mask/unmask recv events function  */
189         HIFConfigureDevice(pDev->HIFDevice,
190                            HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
191                            &pDev->HifMaskUmaskRecvEvent,
192                            sizeof(pDev->HifMaskUmaskRecvEvent));
193
194         AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF special overrides : 0x%lX , 0x%lX\n",
195                  (unsigned long)pDev->GetPendingEventsFunc, (unsigned long)pDev->HifMaskUmaskRecvEvent));
196
197         status = DevDisableInterrupts(pDev);
198
199         if (status) {
200             break;
201         }
202
203         status = DevSetupGMbox(pDev);
204
205     } while (false);
206
207     if (status) {
208         if (pDev->HifAttached) {
209             HIFDetachHTC(pDev->HIFDevice);
210             pDev->HifAttached = false;
211         }
212     }
213
214     return status;
215
216 }
217
218 int DevEnableInterrupts(struct ar6k_device *pDev)
219 {
220     int                  status;
221     struct ar6k_irq_enable_registers regs;
222
223     LOCK_AR6K(pDev);
224
225         /* Enable all the interrupts except for the internal AR6000 CPU interrupt */
226     pDev->IrqEnableRegisters.int_status_enable = INT_STATUS_ENABLE_ERROR_SET(0x01) |
227                                       INT_STATUS_ENABLE_CPU_SET(0x01) |
228                                       INT_STATUS_ENABLE_COUNTER_SET(0x01);
229
230     if (NULL == pDev->GetPendingEventsFunc) {
231         pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
232     } else {
233         /* The HIF layer provided us with a pending events function which means that
234          * the detection of pending mbox messages is handled in the HIF layer.
235          * This is the case for the SPI2 interface.
236          * In the normal case we enable MBOX interrupts, for the case
237          * with HIFs that offer this mechanism, we keep these interrupts
238          * masked */
239         pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
240     }
241
242
243     /* Set up the CPU Interrupt Status Register */
244     pDev->IrqEnableRegisters.cpu_int_status_enable = CPU_INT_STATUS_ENABLE_BIT_SET(0x00);
245
246     /* Set up the Error Interrupt Status Register */
247     pDev->IrqEnableRegisters.error_status_enable =
248                                   ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) |
249                                   ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01);
250
251     /* Set up the Counter Interrupt Status Register (only for debug interrupt to catch fatal errors) */
252     pDev->IrqEnableRegisters.counter_int_status_enable =
253         COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK);
254
255         /* copy into our temp area */
256     memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
257
258     UNLOCK_AR6K(pDev);
259
260         /* always synchronous */
261     status = HIFReadWrite(pDev->HIFDevice,
262                           INT_STATUS_ENABLE_ADDRESS,
263                           &regs.int_status_enable,
264                           AR6K_IRQ_ENABLE_REGS_SIZE,
265                           HIF_WR_SYNC_BYTE_INC,
266                           NULL);
267
268     if (status) {
269         /* Can't write it for some reason */
270         AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
271                         ("Failed to update interrupt control registers err: %d\n", status));
272
273     }
274
275     return status;
276 }
277
278 int DevDisableInterrupts(struct ar6k_device *pDev)
279 {
280     struct ar6k_irq_enable_registers regs;
281
282     LOCK_AR6K(pDev);
283         /* Disable all interrupts */
284     pDev->IrqEnableRegisters.int_status_enable = 0;
285     pDev->IrqEnableRegisters.cpu_int_status_enable = 0;
286     pDev->IrqEnableRegisters.error_status_enable = 0;
287     pDev->IrqEnableRegisters.counter_int_status_enable = 0;
288         /* copy into our temp area */
289     memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
290
291     UNLOCK_AR6K(pDev);
292
293         /* always synchronous */
294     return HIFReadWrite(pDev->HIFDevice,
295                         INT_STATUS_ENABLE_ADDRESS,
296                         &regs.int_status_enable,
297                         AR6K_IRQ_ENABLE_REGS_SIZE,
298                         HIF_WR_SYNC_BYTE_INC,
299                         NULL);
300 }
301
302 /* enable device interrupts */
303 int DevUnmaskInterrupts(struct ar6k_device *pDev)
304 {
305     /* for good measure, make sure interrupt are disabled before unmasking at the HIF
306      * layer.
307      * The rationale here is that between device insertion (where we clear the interrupts the first time)
308      * and when HTC is finally ready to handle interrupts, other software can perform target "soft" resets.
309      * The AR6K interrupt enables reset back to an "enabled" state when this happens.
310      *  */
311     int IntStatus = 0;
312     DevDisableInterrupts(pDev);
313
314 #ifdef THREAD_X
315     // Tobe verified...
316     IntStatus = DevEnableInterrupts(pDev);
317     /* Unmask the host controller interrupts */
318     HIFUnMaskInterrupt(pDev->HIFDevice);
319 #else
320     /* Unmask the host controller interrupts */
321     HIFUnMaskInterrupt(pDev->HIFDevice);
322     IntStatus = DevEnableInterrupts(pDev);
323 #endif
324
325     return IntStatus;
326 }
327
328 /* disable all device interrupts */
329 int DevMaskInterrupts(struct ar6k_device *pDev)
330 {
331         /* mask the interrupt at the HIF layer, we don't want a stray interrupt taken while
332          * we zero out our shadow registers in DevDisableInterrupts()*/
333     HIFMaskInterrupt(pDev->HIFDevice);
334
335     return DevDisableInterrupts(pDev);
336 }
337
338 /* callback when our fetch to enable/disable completes */
339 static void DevDoEnableDisableRecvAsyncHandler(void *Context, struct htc_packet *pPacket)
340 {
341     struct ar6k_device *pDev = (struct ar6k_device *)Context;
342
343     AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDoEnableDisableRecvAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
344
345     if (pPacket->Status) {
346         AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
347                 (" Failed to disable receiver, status:%d \n", pPacket->Status));
348     }
349         /* free this IO packet */
350     AR6KFreeIOPacket(pDev,pPacket);
351     AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDoEnableDisableRecvAsyncHandler \n"));
352 }
353
354 /* disable packet reception (used in case the host runs out of buffers)
355  * this is the "override" method when the HIF reports another methods to
356  * disable recv events */
357 static int DevDoEnableDisableRecvOverride(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
358 {
359     int                  status = 0;
360     struct htc_packet                *pIOPacket = NULL;
361
362     AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("DevDoEnableDisableRecvOverride: Enable:%d Mode:%d\n",
363             EnableRecv,AsyncMode));
364
365     do {
366
367         if (AsyncMode) {
368
369             pIOPacket = AR6KAllocIOPacket(pDev);
370
371             if (NULL == pIOPacket) {
372                 status = A_NO_MEMORY;
373                 A_ASSERT(false);
374                 break;
375             }
376
377                 /* stick in our completion routine when the I/O operation completes */
378             pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
379             pIOPacket->pContext = pDev;
380
381                 /* call the HIF layer override and do this asynchronously */
382             status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
383                                                  EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
384                                                  pIOPacket);
385             break;
386         }
387
388             /* if we get here we are doing it synchronously */
389         status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
390                                              EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
391                                              NULL);
392
393     } while (false);
394
395     if (status && (pIOPacket != NULL)) {
396         AR6KFreeIOPacket(pDev,pIOPacket);
397     }
398
399     return status;
400 }
401
402 /* disable packet reception (used in case the host runs out of buffers)
403  * this is the "normal" method using the interrupt enable registers through
404  * the host I/F */
405 static int DevDoEnableDisableRecvNormal(struct ar6k_device *pDev, bool EnableRecv, bool AsyncMode)
406 {
407     int                  status = 0;
408     struct htc_packet                *pIOPacket = NULL;
409     struct ar6k_irq_enable_registers regs;
410
411         /* take the lock to protect interrupt enable shadows */
412     LOCK_AR6K(pDev);
413
414     if (EnableRecv) {
415         pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
416     } else {
417         pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
418     }
419
420         /* copy into our temp area */
421     memcpy(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
422     UNLOCK_AR6K(pDev);
423
424     do {
425
426         if (AsyncMode) {
427
428             pIOPacket = AR6KAllocIOPacket(pDev);
429
430             if (NULL == pIOPacket) {
431                 status = A_NO_MEMORY;
432                 A_ASSERT(false);
433                 break;
434             }
435
436                 /* copy values to write to our async I/O buffer */
437             memcpy(pIOPacket->pBuffer,&regs,AR6K_IRQ_ENABLE_REGS_SIZE);
438
439                 /* stick in our completion routine when the I/O operation completes */
440             pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
441             pIOPacket->pContext = pDev;
442
443                 /* write it out asynchronously */
444             HIFReadWrite(pDev->HIFDevice,
445                          INT_STATUS_ENABLE_ADDRESS,
446                          pIOPacket->pBuffer,
447                          AR6K_IRQ_ENABLE_REGS_SIZE,
448                          HIF_WR_ASYNC_BYTE_INC,
449                          pIOPacket);
450             break;
451         }
452
453         /* if we get here we are doing it synchronously */
454
455         status = HIFReadWrite(pDev->HIFDevice,
456                               INT_STATUS_ENABLE_ADDRESS,
457                               &regs.int_status_enable,
458                               AR6K_IRQ_ENABLE_REGS_SIZE,
459                               HIF_WR_SYNC_BYTE_INC,
460                               NULL);
461
462     } while (false);
463
464     if (status && (pIOPacket != NULL)) {
465         AR6KFreeIOPacket(pDev,pIOPacket);
466     }
467
468     return status;
469 }
470
471
472 int DevStopRecv(struct ar6k_device *pDev, bool AsyncMode)
473 {
474     if (NULL == pDev->HifMaskUmaskRecvEvent) {
475         return DevDoEnableDisableRecvNormal(pDev,false,AsyncMode);
476     } else {
477         return DevDoEnableDisableRecvOverride(pDev,false,AsyncMode);
478     }
479 }
480
481 int DevEnableRecv(struct ar6k_device *pDev, bool AsyncMode)
482 {
483     if (NULL == pDev->HifMaskUmaskRecvEvent) {
484         return DevDoEnableDisableRecvNormal(pDev,true,AsyncMode);
485     } else {
486         return DevDoEnableDisableRecvOverride(pDev,true,AsyncMode);
487     }
488 }
489
490 int DevWaitForPendingRecv(struct ar6k_device *pDev,u32 TimeoutInMs,bool *pbIsRecvPending)
491 {
492     int    status          = 0;
493     u8     host_int_status = 0x0;
494     u32 counter         = 0x0;
495
496     if(TimeoutInMs < 100)
497     {
498         TimeoutInMs = 100;
499     }
500
501     counter = TimeoutInMs / 100;
502
503     do
504     {
505         //Read the Host Interrupt Status Register
506         status = HIFReadWrite(pDev->HIFDevice,
507                               HOST_INT_STATUS_ADDRESS,
508                              &host_int_status,
509                               sizeof(u8),
510                               HIF_RD_SYNC_BYTE_INC,
511                               NULL);
512         if (status)
513         {
514             AR_DEBUG_PRINTF(ATH_LOG_ERR,("DevWaitForPendingRecv:Read HOST_INT_STATUS_ADDRESS Failed 0x%X\n",status));
515             break;
516         }
517
518         host_int_status = !status ? (host_int_status & (1 << 0)):0;
519         if(!host_int_status)
520         {
521             status          = 0;
522            *pbIsRecvPending = false;
523             break;
524         }
525         else
526         {
527             *pbIsRecvPending = true;
528         }
529
530         A_MDELAY(100);
531
532         counter--;
533
534     }while(counter);
535     return status;
536 }
537
538 void DevDumpRegisters(struct ar6k_device               *pDev,
539                       struct ar6k_irq_proc_registers   *pIrqProcRegs,
540                       struct ar6k_irq_enable_registers *pIrqEnableRegs)
541 {
542
543     AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\n<------- Register Table -------->\n"));
544
545     if (pIrqProcRegs != NULL) {
546         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
547             ("Host Int Status:           0x%x\n",pIrqProcRegs->host_int_status));
548         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
549             ("CPU Int Status:            0x%x\n",pIrqProcRegs->cpu_int_status));
550         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
551             ("Error Int Status:          0x%x\n",pIrqProcRegs->error_int_status));
552         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
553             ("Counter Int Status:        0x%x\n",pIrqProcRegs->counter_int_status));
554         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
555             ("Mbox Frame:                0x%x\n",pIrqProcRegs->mbox_frame));
556         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
557             ("Rx Lookahead Valid:        0x%x\n",pIrqProcRegs->rx_lookahead_valid));
558         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
559             ("Rx Lookahead 0:            0x%x\n",pIrqProcRegs->rx_lookahead[0]));
560         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
561             ("Rx Lookahead 1:            0x%x\n",pIrqProcRegs->rx_lookahead[1]));
562
563         if (pDev->MailBoxInfo.GMboxAddress != 0) {
564                 /* if the target supports GMBOX hardware, dump some additional state */
565             AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
566                 ("GMBOX Host Int Status 2:   0x%x\n",pIrqProcRegs->host_int_status2));
567             AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
568                 ("GMBOX RX Avail:            0x%x\n",pIrqProcRegs->gmbox_rx_avail));
569             AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
570                 ("GMBOX lookahead alias 0:   0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[0]));
571             AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
572                 ("GMBOX lookahead alias 1:   0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[1]));
573         }
574
575     }
576
577     if (pIrqEnableRegs != NULL) {
578         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
579             ("Int Status Enable:         0x%x\n",pIrqEnableRegs->int_status_enable));
580         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
581             ("Counter Int Status Enable: 0x%x\n",pIrqEnableRegs->counter_int_status_enable));
582     }
583     AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("<------------------------------->\n"));
584 }
585
586
587 #define DEV_GET_VIRT_DMA_INFO(p)  ((struct dev_scatter_dma_virtual_info *)((p)->HIFPrivate[0]))
588
589 static struct hif_scatter_req *DevAllocScatterReq(struct hif_device *Context)
590 {
591     struct dl_list *pItem;
592     struct ar6k_device *pDev = (struct ar6k_device *)Context;
593     LOCK_AR6K(pDev);
594     pItem = DL_ListRemoveItemFromHead(&pDev->ScatterReqHead);
595     UNLOCK_AR6K(pDev);
596     if (pItem != NULL) {
597         return A_CONTAINING_STRUCT(pItem, struct hif_scatter_req, ListLink);
598     }
599     return NULL;
600 }
601
602 static void DevFreeScatterReq(struct hif_device *Context, struct hif_scatter_req *pReq)
603 {
604     struct ar6k_device *pDev = (struct ar6k_device *)Context;
605     LOCK_AR6K(pDev);
606     DL_ListInsertTail(&pDev->ScatterReqHead, &pReq->ListLink);
607     UNLOCK_AR6K(pDev);
608 }
609
610 int DevCopyScatterListToFromDMABuffer(struct hif_scatter_req *pReq, bool FromDMA)
611 {
612     u8 *pDMABuffer = NULL;
613     int             i, remaining;
614     u32 length;
615
616     pDMABuffer = pReq->pScatterBounceBuffer;
617
618     if (pDMABuffer == NULL) {
619         A_ASSERT(false);
620         return A_EINVAL;
621     }
622
623     remaining = (int)pReq->TotalLength;
624
625     for (i = 0; i < pReq->ValidScatterEntries; i++) {
626
627         length = min((int)pReq->ScatterList[i].Length, remaining);
628
629         if (length != (int)pReq->ScatterList[i].Length) {
630             A_ASSERT(false);
631                 /* there is a problem with the scatter list */
632             return A_EINVAL;
633         }
634
635         if (FromDMA) {
636                 /* from DMA buffer */
637             memcpy(pReq->ScatterList[i].pBuffer, pDMABuffer , length);
638         } else {
639                 /* to DMA buffer */
640             memcpy(pDMABuffer, pReq->ScatterList[i].pBuffer, length);
641         }
642
643         pDMABuffer += length;
644         remaining -= length;
645     }
646
647     return 0;
648 }
649
650 static void DevReadWriteScatterAsyncHandler(void *Context, struct htc_packet *pPacket)
651 {
652     struct ar6k_device     *pDev = (struct ar6k_device *)Context;
653     struct hif_scatter_req *pReq = (struct hif_scatter_req *)pPacket->pPktContext;
654     
655     AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevReadWriteScatterAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
656     
657     pReq->CompletionStatus = pPacket->Status;
658
659     AR6KFreeIOPacket(pDev,pPacket);
660
661     pReq->CompletionRoutine(pReq);
662
663     AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevReadWriteScatterAsyncHandler \n"));
664 }
665
666 static int DevReadWriteScatter(struct hif_device *Context, struct hif_scatter_req *pReq)
667 {
668     struct ar6k_device     *pDev = (struct ar6k_device *)Context;
669     int        status = 0;
670     struct htc_packet      *pIOPacket = NULL;
671     u32 request = pReq->Request;
672
673     do {
674
675         if (pReq->TotalLength > AR6K_MAX_TRANSFER_SIZE_PER_SCATTER) {
676             AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
677                             ("Invalid length: %d \n", pReq->TotalLength));
678             break;
679         }
680
681         if (pReq->TotalLength == 0) {
682             A_ASSERT(false);
683             break;
684         }
685
686         if (request & HIF_ASYNCHRONOUS) {
687                 /* use an I/O packet to carry this request */
688             pIOPacket = AR6KAllocIOPacket(pDev);
689             if (NULL == pIOPacket) {
690                 status = A_NO_MEMORY;
691                 break;
692             }
693
694                 /* save the request */
695             pIOPacket->pPktContext = pReq;
696                 /* stick in our completion routine when the I/O operation completes */
697             pIOPacket->Completion = DevReadWriteScatterAsyncHandler;
698             pIOPacket->pContext = pDev;
699         }
700
701         if (request & HIF_WRITE) {
702             /* in virtual DMA, we are issuing the requests through the legacy HIFReadWrite API
703              * this API will adjust the address automatically for the last byte to fall on the mailbox
704              * EOM. */
705
706             /* if the address is an extended address, we can adjust the address here since the extended
707              * address will bypass the normal checks in legacy HIF layers */
708             if (pReq->Address == pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress) {
709                 pReq->Address += pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize - pReq->TotalLength;
710             }
711         }
712
713             /* use legacy readwrite */
714         status = HIFReadWrite(pDev->HIFDevice,
715                               pReq->Address,
716                               DEV_GET_VIRT_DMA_INFO(pReq)->pVirtDmaBuffer,
717                               pReq->TotalLength,
718                               request,
719                               (request & HIF_ASYNCHRONOUS) ? pIOPacket : NULL);
720
721     } while (false);
722
723     if ((status != A_PENDING) && status && (request & HIF_ASYNCHRONOUS)) {
724         if (pIOPacket != NULL) {
725             AR6KFreeIOPacket(pDev,pIOPacket);
726         }
727         pReq->CompletionStatus = status;
728         pReq->CompletionRoutine(pReq);
729         status = 0;
730     }
731
732     return status;
733 }
734
735
736 static void DevCleanupVirtualScatterSupport(struct ar6k_device *pDev)
737 {
738     struct hif_scatter_req *pReq;
739
740     while (1) {
741         pReq = DevAllocScatterReq((struct hif_device *)pDev);
742         if (NULL == pReq) {
743             break;
744         }
745         kfree(pReq);
746     }
747
748 }
749
750     /* function to set up virtual scatter support if HIF layer has not implemented the interface */
751 static int DevSetupVirtualScatterSupport(struct ar6k_device *pDev)
752 {
753     int                     status = 0;
754     int                          bufferSize, sgreqSize;
755     int                          i;
756     struct dev_scatter_dma_virtual_info *pVirtualInfo;
757     struct hif_scatter_req              *pReq;
758
759     bufferSize = sizeof(struct dev_scatter_dma_virtual_info) +
760                 2 * (A_GET_CACHE_LINE_BYTES()) + AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
761
762     sgreqSize = sizeof(struct hif_scatter_req) +
763                     (AR6K_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(struct hif_scatter_item));
764
765     for (i = 0; i < AR6K_SCATTER_REQS; i++) {
766             /* allocate the scatter request, buffer info and the actual virtual buffer itself */
767         pReq = (struct hif_scatter_req *)A_MALLOC(sgreqSize + bufferSize);
768
769         if (NULL == pReq) {
770             status = A_NO_MEMORY;
771             break;
772         }
773
774         A_MEMZERO(pReq, sgreqSize);
775
776             /* the virtual DMA starts after the scatter request struct */
777         pVirtualInfo = (struct dev_scatter_dma_virtual_info *)((u8 *)pReq + sgreqSize);
778         A_MEMZERO(pVirtualInfo, sizeof(struct dev_scatter_dma_virtual_info));
779
780         pVirtualInfo->pVirtDmaBuffer = &pVirtualInfo->DataArea[0];
781             /* align buffer to cache line in case host controller can actually DMA this */
782         pVirtualInfo->pVirtDmaBuffer = A_ALIGN_TO_CACHE_LINE(pVirtualInfo->pVirtDmaBuffer);
783             /* store the structure in the private area */
784         pReq->HIFPrivate[0] = pVirtualInfo;
785             /* we emulate a DMA bounce interface */
786         pReq->ScatterMethod = HIF_SCATTER_DMA_BOUNCE;
787         pReq->pScatterBounceBuffer = pVirtualInfo->pVirtDmaBuffer;
788             /* free request to the list */
789         DevFreeScatterReq((struct hif_device *)pDev,pReq);
790     }
791
792     if (status) {
793         DevCleanupVirtualScatterSupport(pDev);
794     } else {
795         pDev->HifScatterInfo.pAllocateReqFunc = DevAllocScatterReq;
796         pDev->HifScatterInfo.pFreeReqFunc = DevFreeScatterReq;
797         pDev->HifScatterInfo.pReadWriteScatterFunc = DevReadWriteScatter;
798         if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
799             AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K: SPI bus requires RX scatter limits\n"));
800             pDev->HifScatterInfo.MaxScatterEntries = AR6K_MIN_SCATTER_ENTRIES_PER_REQ;
801             pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MIN_TRANSFER_SIZE_PER_SCATTER;
802         } else {
803             pDev->HifScatterInfo.MaxScatterEntries = AR6K_SCATTER_ENTRIES_PER_REQ;
804             pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
805         }
806         pDev->ScatterIsVirtual = true;
807     }
808
809     return status;
810 }
811
812 int DevCleanupMsgBundling(struct ar6k_device *pDev)
813 {
814     if(NULL != pDev)
815     {
816         DevCleanupVirtualScatterSupport(pDev);
817     }
818
819     return 0;
820 }
821
822 int DevSetupMsgBundling(struct ar6k_device *pDev, int MaxMsgsPerTransfer)
823 {
824     int status;
825
826     if (pDev->MailBoxInfo.Flags & HIF_MBOX_FLAG_NO_BUNDLING) {
827         AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HIF requires bundling disabled\n"));
828         return A_ENOTSUP;
829     }
830
831     status = HIFConfigureDevice(pDev->HIFDevice,
832                                 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
833                                 &pDev->HifScatterInfo,
834                                 sizeof(pDev->HifScatterInfo));
835
836     if (status) {
837         AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
838             ("AR6K: ** HIF layer does not support scatter requests (%d) \n",status));
839
840             /* we can try to use a virtual DMA scatter mechanism using legacy HIFReadWrite() */
841         status = DevSetupVirtualScatterSupport(pDev);
842
843         if (!status) {
844              AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
845                 ("AR6K: virtual scatter transfers enabled (max scatter items:%d: maxlen:%d) \n",
846                     DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
847         }
848
849     } else {
850         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
851             ("AR6K: HIF layer supports scatter requests (max scatter items:%d: maxlen:%d) \n",
852                     DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
853     }
854
855     if (!status) {
856             /* for the recv path, the maximum number of bytes per recv bundle is just limited
857              * by the maximum transfer size at the HIF layer */
858         pDev->MaxRecvBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
859
860         if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
861             AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K : SPI bus requires TX bundling disabled\n"));
862             pDev->MaxSendBundleSize = 0;
863         } else {
864                 /* for the send path, the max transfer size is limited by the existence and size of
865                  * the extended mailbox address range */
866             if (pDev->MailBoxInfo.MboxProp[0].ExtendedAddress != 0) {
867                 pDev->MaxSendBundleSize = pDev->MailBoxInfo.MboxProp[0].ExtendedSize;
868             } else {
869                     /* legacy */
870                 pDev->MaxSendBundleSize = AR6K_LEGACY_MAX_WRITE_LENGTH;
871             }
872
873             if (pDev->MaxSendBundleSize > pDev->HifScatterInfo.MaxTransferSizePerScatterReq) {
874                     /* limit send bundle size to what the HIF can support for scatter requests */
875                 pDev->MaxSendBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
876             }
877         }
878
879         AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
880             ("AR6K: max recv: %d max send: %d \n",
881                     DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev), DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev)));
882
883     }
884     return status;
885 }
886
887 int DevSubmitScatterRequest(struct ar6k_device *pDev, struct hif_scatter_req *pScatterReq, bool Read, bool Async)
888 {
889     int status;
890
891     if (Read) {
892             /* read operation */
893         pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BLOCK_FIX;
894         pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
895         A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev));
896     } else {
897         u32 mailboxWidth;
898
899             /* write operation */
900         pScatterReq->Request = (Async) ? HIF_WR_ASYNC_BLOCK_INC : HIF_WR_SYNC_BLOCK_INC;
901         A_ASSERT(pScatterReq->TotalLength <= (u32)DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev));
902         if (pScatterReq->TotalLength > AR6K_LEGACY_MAX_WRITE_LENGTH) {
903                 /* for large writes use the extended address */
904             pScatterReq->Address = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress;
905             mailboxWidth = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize;
906         } else {
907             pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
908             mailboxWidth = AR6K_LEGACY_MAX_WRITE_LENGTH;
909         }
910
911         if (!pDev->ScatterIsVirtual) {
912             /* we are passing this scatter list down to the HIF layer' scatter request handler, fixup the address
913              * so that the last byte falls on the EOM, we do this for those HIFs that support the
914              * scatter API */
915             pScatterReq->Address += (mailboxWidth - pScatterReq->TotalLength);
916         }
917
918     }
919
920     AR_DEBUG_PRINTF(ATH_DEBUG_RECV | ATH_DEBUG_SEND,
921                 ("DevSubmitScatterRequest, Entries: %d, Total Length: %d Mbox:0x%X (mode: %s : %s)\n",
922                 pScatterReq->ValidScatterEntries,
923                 pScatterReq->TotalLength,
924                 pScatterReq->Address,
925                 Async ? "ASYNC" : "SYNC",
926                 (Read) ? "RD" : "WR"));
927
928     status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq);
929
930     if (status) {
931         if (Async) {
932             pScatterReq->CompletionStatus = status;
933             pScatterReq->CompletionRoutine(pScatterReq);
934             return 0;
935         }
936         return status;
937     }
938
939     status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->ScatterIsVirtual ? pDev : pDev->HIFDevice,
940                                                         pScatterReq);
941     if (!Async) {
942             /* in sync mode, we can touch the scatter request */
943         pScatterReq->CompletionStatus = status;
944         DEV_FINISH_SCATTER_OPERATION(pScatterReq);
945     } else {
946         if (status == A_PENDING) {
947             status = 0;
948         }
949     }
950
951     return status;
952 }
953
954
955 #ifdef MBOXHW_UNIT_TEST
956
957
958 /* This is a mailbox hardware unit test that must be called in a schedulable context
959  * This test is very simple, it will send a list of buffers with a counting pattern
960  * and the target will invert the data and send the message back
961  *
962  * the unit test has the following constraints:
963  *
964  * The target has at least 8 buffers of 256 bytes each. The host will send
965  * the following pattern of buffers in rapid succession :
966  *
967  * 1 buffer - 128 bytes
968  * 1 buffer - 256 bytes
969  * 1 buffer - 512 bytes
970  * 1 buffer - 1024 bytes
971  *
972  * The host will send the buffers to one mailbox and wait for buffers to be reflected
973  * back from the same mailbox. The target sends the buffers FIFO order.
974  * Once the final buffer has been received for a mailbox, the next mailbox is tested.
975  *
976  *
977  * Note:  To simplifythe test , we assume that the chosen buffer sizes
978  *        will fall on a nice block pad
979  *
980  * It is expected that higher-order tests will be written to stress the mailboxes using
981  * a message-based protocol (with some performance timming) that can create more
982  * randomness in the packets sent over mailboxes.
983  *
984  * */
985
986 #define A_ROUND_UP_PWR2(x, align)    (((int) (x) + ((align)-1)) & ~((align)-1))
987
988 #define BUFFER_BLOCK_PAD 128
989
990 #if 0
991 #define BUFFER1 128
992 #define BUFFER2 256
993 #define BUFFER3 512
994 #define BUFFER4 1024
995 #endif
996
997 #if 1
998 #define BUFFER1 80
999 #define BUFFER2 200
1000 #define BUFFER3 444
1001 #define BUFFER4 800
1002 #endif
1003
1004 #define TOTAL_BYTES (A_ROUND_UP_PWR2(BUFFER1,BUFFER_BLOCK_PAD) + \
1005                      A_ROUND_UP_PWR2(BUFFER2,BUFFER_BLOCK_PAD) + \
1006                      A_ROUND_UP_PWR2(BUFFER3,BUFFER_BLOCK_PAD) + \
1007                      A_ROUND_UP_PWR2(BUFFER4,BUFFER_BLOCK_PAD) )
1008
1009 #define TEST_BYTES (BUFFER1 +  BUFFER2 + BUFFER3 + BUFFER4)
1010
1011 #define TEST_CREDITS_RECV_TIMEOUT 100
1012
1013 static u8 g_Buffer[TOTAL_BYTES];
1014 static u32 g_MailboxAddrs[AR6K_MAILBOXES];
1015 static u32 g_BlockSizes[AR6K_MAILBOXES];
1016
1017 #define BUFFER_PROC_LIST_DEPTH 4
1018
1019 struct buffer_proc_list {
1020     u8 *pBuffer;
1021     u32 length;
1022 };
1023
1024
1025 #define PUSH_BUFF_PROC_ENTRY(pList,len,pCurrpos) \
1026 {                                                   \
1027     (pList)->pBuffer = (pCurrpos);                  \
1028     (pList)->length = (len);                        \
1029     (pCurrpos) += (len);                            \
1030     (pList)++;                                      \
1031 }
1032
1033 /* a simple and crude way to send different "message" sizes */
1034 static void AssembleBufferList(struct buffer_proc_list *pList)
1035 {
1036     u8 *pBuffer = g_Buffer;
1037
1038 #if BUFFER_PROC_LIST_DEPTH < 4
1039 #error "Buffer processing list depth is not deep enough!!"
1040 #endif
1041
1042     PUSH_BUFF_PROC_ENTRY(pList,BUFFER1,pBuffer);
1043     PUSH_BUFF_PROC_ENTRY(pList,BUFFER2,pBuffer);
1044     PUSH_BUFF_PROC_ENTRY(pList,BUFFER3,pBuffer);
1045     PUSH_BUFF_PROC_ENTRY(pList,BUFFER4,pBuffer);
1046
1047 }
1048
1049 #define FILL_ZERO     true
1050 #define FILL_COUNTING false
1051 static void InitBuffers(bool Zero)
1052 {
1053     u16 *pBuffer16 = (u16 *)g_Buffer;
1054     int      i;
1055
1056         /* fill buffer with 16 bit counting pattern or zeros */
1057     for (i = 0; i <  (TOTAL_BYTES / 2) ; i++) {
1058         if (!Zero) {
1059             pBuffer16[i] = (u16)i;
1060         } else {
1061             pBuffer16[i] = 0;
1062         }
1063     }
1064 }
1065
1066
1067 static bool CheckOneBuffer(u16 *pBuffer16, int Length)
1068 {
1069     int      i;
1070     u16 startCount;
1071     bool   success = true;
1072
1073         /* get the starting count */
1074     startCount = pBuffer16[0];
1075         /* invert it, this is the expected value */
1076     startCount = ~startCount;
1077         /* scan the buffer and verify */
1078     for (i = 0; i < (Length / 2) ; i++,startCount++) {
1079             /* target will invert all the data */
1080         if ((u16)pBuffer16[i] != (u16)~startCount) {
1081             success = false;
1082             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Data Got:0x%X, Expecting:0x%X (offset:%d, total:%d) \n",
1083                         pBuffer16[i], ((u16)~startCount), i, Length));
1084              AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("0x%X 0x%X 0x%X 0x%X \n",
1085                         pBuffer16[i], pBuffer16[i + 1], pBuffer16[i + 2],pBuffer16[i+3]));
1086             break;
1087         }
1088     }
1089
1090     return success;
1091 }
1092
1093 static bool CheckBuffers(void)
1094 {
1095     int      i;
1096     bool   success = true;
1097     struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
1098
1099         /* assemble the list */
1100     AssembleBufferList(checkList);
1101
1102         /* scan the buffers and verify */
1103     for (i = 0; i < BUFFER_PROC_LIST_DEPTH ; i++) {
1104         success = CheckOneBuffer((u16 *)checkList[i].pBuffer, checkList[i].length);
1105         if (!success) {
1106             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer : 0x%X, Length:%d failed verify \n",
1107                         (u32)checkList[i].pBuffer, checkList[i].length));
1108             break;
1109         }
1110     }
1111
1112     return success;
1113 }
1114
1115     /* find the end marker for the last buffer we will be sending */
1116 static u16 GetEndMarker(void)
1117 {
1118     u8 *pBuffer;
1119     struct buffer_proc_list checkList[BUFFER_PROC_LIST_DEPTH];
1120
1121         /* fill up buffers with the normal counting pattern */
1122     InitBuffers(FILL_COUNTING);
1123
1124         /* assemble the list we will be sending down */
1125     AssembleBufferList(checkList);
1126         /* point to the last 2 bytes of the last buffer */
1127     pBuffer = &(checkList[BUFFER_PROC_LIST_DEPTH - 1].pBuffer[(checkList[BUFFER_PROC_LIST_DEPTH - 1].length) - 2]);
1128
1129         /* the last count in the last buffer is the marker */
1130     return (u16)pBuffer[0] | ((u16)pBuffer[1] << 8);
1131 }
1132
1133 #define ATH_PRINT_OUT_ZONE ATH_DEBUG_ERR
1134
1135 /* send the ordered buffers to the target */
1136 static int SendBuffers(struct ar6k_device *pDev, int mbox)
1137 {
1138     int         status = 0;
1139     u32 request = HIF_WR_SYNC_BLOCK_INC;
1140     struct buffer_proc_list sendList[BUFFER_PROC_LIST_DEPTH];
1141     int              i;
1142     int              totalBytes = 0;
1143     int              paddedLength;
1144     int              totalwPadding = 0;
1145
1146     AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sending buffers on mailbox : %d \n",mbox));
1147
1148         /* fill buffer with counting pattern */
1149     InitBuffers(FILL_COUNTING);
1150
1151         /* assemble the order in which we send */
1152     AssembleBufferList(sendList);
1153
1154     for (i = 0; i < BUFFER_PROC_LIST_DEPTH; i++) {
1155
1156             /* we are doing block transfers, so we need to pad everything to a block size */
1157         paddedLength = (sendList[i].length + (g_BlockSizes[mbox] - 1)) &
1158                        (~(g_BlockSizes[mbox] - 1));
1159
1160             /* send each buffer synchronously */
1161         status = HIFReadWrite(pDev->HIFDevice,
1162                               g_MailboxAddrs[mbox],
1163                               sendList[i].pBuffer,
1164                               paddedLength,
1165                               request,
1166                               NULL);
1167         if (status) {
1168             break;
1169         }
1170         totalBytes += sendList[i].length;
1171         totalwPadding += paddedLength;
1172     }
1173
1174     AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sent %d bytes (%d padded bytes) to mailbox : %d \n",totalBytes,totalwPadding,mbox));
1175
1176     return status;
1177 }
1178
1179 /* poll the mailbox credit counter until we get a credit or timeout */
1180 static int GetCredits(struct ar6k_device *pDev, int mbox, int *pCredits)
1181 {
1182     int status = 0;
1183     int      timeout = TEST_CREDITS_RECV_TIMEOUT;
1184     u8 credits = 0;
1185     u32 address;
1186
1187     while (true) {
1188
1189             /* Read the counter register to get credits, this auto-decrements  */
1190         address = COUNT_DEC_ADDRESS + (AR6K_MAILBOXES + mbox) * 4;
1191         status = HIFReadWrite(pDev->HIFDevice, address, &credits, sizeof(credits),
1192                               HIF_RD_SYNC_BYTE_FIX, NULL);
1193         if (status) {
1194             AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1195                 ("Unable to decrement the command credit count register (mbox=%d)\n",mbox));
1196             status = A_ERROR;
1197             break;
1198         }
1199
1200         if (credits) {
1201             break;
1202         }
1203
1204         timeout--;
1205
1206         if (timeout <= 0) {
1207               AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1208                 (" Timeout reading credit registers (mbox=%d, address:0x%X) \n",mbox,address));
1209             status = A_ERROR;
1210             break;
1211         }
1212
1213          /* delay a little, target may not be ready */
1214          A_MDELAY(1000);
1215
1216     }
1217
1218     if (status == 0) {
1219         *pCredits = credits;
1220     }
1221
1222     return status;
1223 }
1224
1225
1226 /* wait for the buffers to come back */
1227 static int RecvBuffers(struct ar6k_device *pDev, int mbox)
1228 {
1229     int         status = 0;
1230     u32 request = HIF_RD_SYNC_BLOCK_INC;
1231     struct buffer_proc_list recvList[BUFFER_PROC_LIST_DEPTH];
1232     int              curBuffer;
1233     int              credits;
1234     int              i;
1235     int              totalBytes = 0;
1236     int              paddedLength;
1237     int              totalwPadding = 0;
1238
1239     AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for buffers on mailbox : %d \n",mbox));
1240
1241         /* zero the buffers */
1242     InitBuffers(FILL_ZERO);
1243
1244         /* assemble the order in which we should receive */
1245     AssembleBufferList(recvList);
1246
1247     curBuffer = 0;
1248
1249     while (curBuffer < BUFFER_PROC_LIST_DEPTH) {
1250
1251             /* get number of buffers that have been completed, this blocks
1252              * until we get at least 1 credit or it times out */
1253         status = GetCredits(pDev, mbox, &credits);
1254
1255         if (status) {
1256             break;
1257         }
1258
1259         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got %d messages on mailbox : %d \n",credits, mbox));
1260
1261             /* get all the buffers that are sitting on the queue */
1262         for (i = 0; i < credits; i++) {
1263             A_ASSERT(curBuffer < BUFFER_PROC_LIST_DEPTH);
1264                 /* recv the current buffer synchronously, the buffers should come back in
1265                  * order... with padding applied by the target */
1266             paddedLength = (recvList[curBuffer].length + (g_BlockSizes[mbox] - 1)) &
1267                        (~(g_BlockSizes[mbox] - 1));
1268
1269             status = HIFReadWrite(pDev->HIFDevice,
1270                                   g_MailboxAddrs[mbox],
1271                                   recvList[curBuffer].pBuffer,
1272                                   paddedLength,
1273                                   request,
1274                                   NULL);
1275             if (status) {
1276                 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to read %d bytes on mailbox:%d : address:0x%X \n",
1277                         recvList[curBuffer].length, mbox, g_MailboxAddrs[mbox]));
1278                 break;
1279             }
1280
1281             totalwPadding += paddedLength;
1282             totalBytes += recvList[curBuffer].length;
1283             curBuffer++;
1284         }
1285
1286         if (status) {
1287             break;
1288         }
1289             /* go back and get some more */
1290         credits = 0;
1291     }
1292
1293     if (totalBytes != TEST_BYTES) {
1294         A_ASSERT(false);
1295     }  else {
1296         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got all buffers on mbox:%d total recv :%d (w/Padding : %d) \n",
1297             mbox, totalBytes, totalwPadding));
1298     }
1299
1300     return status;
1301
1302
1303 }
1304
1305 static int DoOneMboxHWTest(struct ar6k_device *pDev, int mbox)
1306 {
1307     int status;
1308
1309     do {
1310             /* send out buffers */
1311         status = SendBuffers(pDev,mbox);
1312
1313         if (status) {
1314             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Sending buffers Failed : %d mbox:%d\n",status,mbox));
1315             break;
1316         }
1317
1318             /* go get them, this will block */
1319         status =  RecvBuffers(pDev, mbox);
1320
1321         if (status) {
1322             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Recv buffers Failed : %d mbox:%d\n",status,mbox));
1323             break;
1324         }
1325
1326             /* check the returned data patterns */
1327         if (!CheckBuffers()) {
1328             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer Verify Failed : mbox:%d\n",mbox));
1329             status = A_ERROR;
1330             break;
1331         }
1332
1333         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" Send/Recv success! mailbox : %d \n",mbox));
1334
1335     }  while (false);
1336
1337     return status;
1338 }
1339
1340 /* here is where the test starts */
1341 int DoMboxHWTest(struct ar6k_device *pDev)
1342 {
1343     int      i;
1344     int status;
1345     int      credits = 0;
1346     u8 params[4];
1347     int      numBufs;
1348     int      bufferSize;
1349     u16 temp;
1350
1351
1352     AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest START -  \n"));
1353
1354     do {
1355             /* get the addresses for all 4 mailboxes */
1356         status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
1357                                     g_MailboxAddrs, sizeof(g_MailboxAddrs));
1358
1359         if (status) {
1360             A_ASSERT(false);
1361             break;
1362         }
1363
1364             /* get the block sizes */
1365         status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
1366                                     g_BlockSizes, sizeof(g_BlockSizes));
1367
1368         if (status) {
1369             A_ASSERT(false);
1370             break;
1371         }
1372
1373             /* note, the HIF layer usually reports mbox 0 to have a block size of
1374              * 1, but our test wants to run in block-mode for all mailboxes, so we treat all mailboxes
1375              * the same. */
1376         g_BlockSizes[0] = g_BlockSizes[1];
1377         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Block Size to use: %d \n",g_BlockSizes[0]));
1378
1379         if (g_BlockSizes[1] > BUFFER_BLOCK_PAD) {
1380             AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("%d Block size is too large for buffer pad %d\n",
1381                 g_BlockSizes[1], BUFFER_BLOCK_PAD));
1382             break;
1383         }
1384
1385         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for target.... \n"));
1386
1387             /* the target lets us know it is ready by giving us 1 credit on
1388              * mailbox 0 */
1389         status = GetCredits(pDev, 0, &credits);
1390
1391         if (status) {
1392             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait for target ready \n"));
1393             break;
1394         }
1395
1396         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Target is ready ...\n"));
1397
1398             /* read the first 4 scratch registers */
1399         status = HIFReadWrite(pDev->HIFDevice,
1400                               SCRATCH_ADDRESS,
1401                               params,
1402                               4,
1403                               HIF_RD_SYNC_BYTE_INC,
1404                               NULL);
1405
1406         if (status) {
1407             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait get parameters \n"));
1408             break;
1409         }
1410
1411         numBufs = params[0];
1412         bufferSize = (int)(((u16)params[2] << 8) | (u16)params[1]);
1413
1414         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE,
1415             ("Target parameters: bufs per mailbox:%d, buffer size:%d bytes (total space: %d, minimum required space (w/padding): %d) \n",
1416             numBufs, bufferSize, (numBufs * bufferSize), TOTAL_BYTES));
1417
1418         if ((numBufs * bufferSize) < TOTAL_BYTES) {
1419             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Not Enough buffer space to run test! need:%d, got:%d \n",
1420                 TOTAL_BYTES, (numBufs*bufferSize)));
1421             status = A_ERROR;
1422             break;
1423         }
1424
1425         temp = GetEndMarker();
1426
1427         status = HIFReadWrite(pDev->HIFDevice,
1428                               SCRATCH_ADDRESS + 4,
1429                               (u8 *)&temp,
1430                               2,
1431                               HIF_WR_SYNC_BYTE_INC,
1432                               NULL);
1433
1434         if (status) {
1435             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write end marker \n"));
1436             break;
1437         }
1438
1439         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("End Marker: 0x%X \n",temp));
1440
1441         temp = (u16)g_BlockSizes[1];
1442             /* convert to a mask */
1443         temp = temp - 1;
1444         status = HIFReadWrite(pDev->HIFDevice,
1445                               SCRATCH_ADDRESS + 6,
1446                               (u8 *)&temp,
1447                               2,
1448                               HIF_WR_SYNC_BYTE_INC,
1449                               NULL);
1450
1451         if (status) {
1452             AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write block mask \n"));
1453             break;
1454         }
1455
1456         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Set Block Mask: 0x%X \n",temp));
1457
1458             /* execute the test on each mailbox */
1459         for (i = 0; i < AR6K_MAILBOXES; i++) {
1460             status = DoOneMboxHWTest(pDev, i);
1461             if (status) {
1462                 break;
1463             }
1464         }
1465
1466     } while (false);
1467
1468     if (status == 0) {
1469         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - SUCCESS! -  \n"));
1470     } else {
1471         AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - FAILED! -  \n"));
1472     }
1473         /* don't let HTC_Start continue, the target is actually not running any HTC code */
1474     return A_ERROR;
1475 }
1476 #endif
1477
1478
1479