pandora: defconfig: update
[pandora-kernel.git] / drivers / staging / tidspbridge / core / io_sm.c
1 /*
2  * io_sm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * IO dispatcher for a shared memory channel driver.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 /*
20  * Channel Invariant:
21  * There is an important invariant condition which must be maintained per
22  * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
23  * which may cause timeouts and/or failure of the sync_wait_on_event
24  * function.
25  */
26 #include <linux/types.h>
27 #include <linux/list.h>
28
29 /* Host OS */
30 #include <dspbridge/host_os.h>
31 #include <linux/workqueue.h>
32
33 /*  ----------------------------------- DSP/BIOS Bridge */
34 #include <dspbridge/dbdefs.h>
35
36 /* Trace & Debug */
37 #include <dspbridge/dbc.h>
38
39 /* Services Layer */
40 #include <dspbridge/ntfy.h>
41 #include <dspbridge/sync.h>
42
43 /* Hardware Abstraction Layer */
44 #include <hw_defs.h>
45 #include <hw_mmu.h>
46
47 /* Bridge Driver */
48 #include <dspbridge/dspdeh.h>
49 #include <dspbridge/dspio.h>
50 #include <dspbridge/dspioctl.h>
51 #include <dspbridge/wdt.h>
52 #include <_tiomap.h>
53 #include <tiomap_io.h>
54 #include <_tiomap_pwr.h>
55
56 /* Platform Manager */
57 #include <dspbridge/cod.h>
58 #include <dspbridge/node.h>
59 #include <dspbridge/dev.h>
60
61 /* Others */
62 #include <dspbridge/rms_sh.h>
63 #include <dspbridge/mgr.h>
64 #include <dspbridge/drv.h>
65 #include "_cmm.h"
66 #include "module_list.h"
67
68 /* This */
69 #include <dspbridge/io_sm.h>
70 #include "_msg_sm.h"
71
72 /* Defines, Data Structures, Typedefs */
73 #define OUTPUTNOTREADY  0xffff
74 #define NOTENABLED      0xffff  /* Channel(s) not enabled */
75
76 #define EXTEND      "_EXT_END"
77
78 #define SWAP_WORD(x)     (x)
79 #define UL_PAGE_ALIGN_SIZE 0x10000      /* Page Align Size */
80
81 #define MAX_PM_REQS 32
82
83 #define MMU_FAULT_HEAD1 0xa5a5a5a5
84 #define MMU_FAULT_HEAD2 0x96969696
85 #define POLL_MAX 1000
86 #define MAX_MMU_DBGBUFF 10240
87
88 /* IO Manager: only one created per board */
89 struct io_mgr {
90         /* These four fields must be the first fields in a io_mgr_ struct */
91         /* Bridge device context */
92         struct bridge_dev_context *bridge_context;
93         /* Function interface to Bridge driver */
94         struct bridge_drv_interface *intf_fxns;
95         struct dev_object *dev_obj;     /* Device this board represents */
96
97         /* These fields initialized in bridge_io_create() */
98         struct chnl_mgr *chnl_mgr;
99         struct shm *shared_mem; /* Shared Memory control */
100         u8 *input;              /* Address of input channel */
101         u8 *output;             /* Address of output channel */
102         struct msg_mgr *msg_mgr;        /* Message manager */
103         /* Msg control for from DSP messages */
104         struct msg_ctrl *msg_input_ctrl;
105         /* Msg control for to DSP messages */
106         struct msg_ctrl *msg_output_ctrl;
107         u8 *msg_input;          /* Address of input messages */
108         u8 *msg_output;         /* Address of output messages */
109         u32 sm_buf_size;        /* Size of a shared memory I/O channel */
110         bool shared_irq;        /* Is this IRQ shared? */
111         u32 word_size;          /* Size in bytes of DSP word */
112         u16 intr_val;           /* Interrupt value */
113         /* Private extnd proc info; mmu setup */
114         struct mgr_processorextinfo ext_proc_info;
115         struct cmm_object *cmm_mgr;     /* Shared Mem Mngr */
116         struct work_struct io_workq;    /* workqueue */
117 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
118         u32 trace_buffer_begin; /* Trace message start address */
119         u32 trace_buffer_end;   /* Trace message end address */
120         u32 trace_buffer_current;       /* Trace message current address */
121         u32 gpp_read_pointer;           /* GPP Read pointer to Trace buffer */
122         u8 *msg;
123         u32 gpp_va;
124         u32 dsp_va;
125 #endif
126         /* IO Dpc */
127         u32 dpc_req;            /* Number of requested DPC's. */
128         u32 dpc_sched;          /* Number of executed DPC's. */
129         struct tasklet_struct dpc_tasklet;
130         spinlock_t dpc_lock;
131
132 };
133
134 /* Function Prototypes */
135 static void io_dispatch_pm(struct io_mgr *pio_mgr);
136 static void notify_chnl_complete(struct chnl_object *pchnl,
137                                  struct chnl_irp *chnl_packet_obj);
138 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
139                         u8 io_mode);
140 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
141                         u8 io_mode);
142 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
143 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
144 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
145                              struct chnl_object *pchnl, u32 mask);
146
147 /* Bus Addr (cached kernel) */
148 static int register_shm_segs(struct io_mgr *hio_mgr,
149                                     struct cod_manager *cod_man,
150                                     u32 dw_gpp_base_pa);
151
152 static inline void set_chnl_free(struct shm *sm, u32 chnl)
153 {
154         sm->host_free_mask &= ~(1 << chnl);
155 }
156
157 static inline void set_chnl_busy(struct shm *sm, u32 chnl)
158 {
159         sm->host_free_mask |= 1 << chnl;
160 }
161
162
163 /*
164  *  ======== bridge_io_create ========
165  *      Create an IO manager object.
166  */
167 int bridge_io_create(struct io_mgr **io_man,
168                             struct dev_object *hdev_obj,
169                             const struct io_attrs *mgr_attrts)
170 {
171         struct io_mgr *pio_mgr = NULL;
172         struct bridge_dev_context *hbridge_context = NULL;
173         struct cfg_devnode *dev_node_obj;
174         struct chnl_mgr *hchnl_mgr;
175         u8 dev_type;
176
177         /* Check requirements */
178         if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0)
179                 return -EFAULT;
180
181         *io_man = NULL;
182
183         dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
184         if (!hchnl_mgr || hchnl_mgr->iomgr)
185                 return -EFAULT;
186
187         /*
188          * Message manager will be created when a file is loaded, since
189          * size of message buffer in shared memory is configurable in
190          * the base image.
191          */
192         dev_get_bridge_context(hdev_obj, &hbridge_context);
193         if (!hbridge_context)
194                 return -EFAULT;
195
196         dev_get_dev_type(hdev_obj, &dev_type);
197
198         /* Allocate IO manager object */
199         pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
200         if (!pio_mgr)
201                 return -ENOMEM;
202
203         /* Initialize chnl_mgr object */
204         pio_mgr->chnl_mgr = hchnl_mgr;
205         pio_mgr->word_size = mgr_attrts->word_size;
206
207         if (dev_type == DSP_UNIT) {
208                 /* Create an IO DPC */
209                 tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
210
211                 /* Initialize DPC counters */
212                 pio_mgr->dpc_req = 0;
213                 pio_mgr->dpc_sched = 0;
214
215                 spin_lock_init(&pio_mgr->dpc_lock);
216
217                 if (dev_get_dev_node(hdev_obj, &dev_node_obj)) {
218                         bridge_io_destroy(pio_mgr);
219                         return -EIO;
220                 }
221         }
222
223         pio_mgr->bridge_context = hbridge_context;
224         pio_mgr->shared_irq = mgr_attrts->irq_shared;
225         if (dsp_wdt_init()) {
226                 bridge_io_destroy(pio_mgr);
227                 return -EPERM;
228         }
229
230         /* Return IO manager object to caller... */
231         hchnl_mgr->iomgr = pio_mgr;
232         *io_man = pio_mgr;
233
234         return 0;
235 }
236
237 /*
238  *  ======== bridge_io_destroy ========
239  *  Purpose:
240  *      Disable interrupts, destroy the IO manager.
241  */
242 int bridge_io_destroy(struct io_mgr *hio_mgr)
243 {
244         int status = 0;
245         if (hio_mgr) {
246                 /* Free IO DPC object */
247                 tasklet_kill(&hio_mgr->dpc_tasklet);
248
249 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
250                 kfree(hio_mgr->msg);
251 #endif
252                 dsp_wdt_exit();
253                 /* Free this IO manager object */
254                 kfree(hio_mgr);
255         } else {
256                 status = -EFAULT;
257         }
258
259         return status;
260 }
261
262 /*
263  *  ======== bridge_io_on_loaded ========
264  *  Purpose:
265  *      Called when a new program is loaded to get shared memory buffer
266  *      parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
267  *      are in DSP address units.
268  */
269 int bridge_io_on_loaded(struct io_mgr *hio_mgr)
270 {
271         struct cod_manager *cod_man;
272         struct chnl_mgr *hchnl_mgr;
273         struct msg_mgr *hmsg_mgr;
274         u32 ul_shm_base;
275         u32 ul_shm_base_offset;
276         u32 ul_shm_limit;
277         u32 ul_shm_length = -1;
278         u32 ul_mem_length = -1;
279         u32 ul_msg_base;
280         u32 ul_msg_limit;
281         u32 ul_msg_length = -1;
282         u32 ul_ext_end;
283         u32 ul_gpp_pa = 0;
284         u32 ul_gpp_va = 0;
285         u32 ul_dsp_va = 0;
286         u32 ul_seg_size = 0;
287         u32 ul_pad_size = 0;
288         u32 i;
289         int status = 0;
290         u8 num_procs = 0;
291         s32 ndx = 0;
292         /* DSP MMU setup table */
293         struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
294         struct cfg_hostres *host_res;
295         struct bridge_dev_context *pbridge_context;
296         u32 map_attrs;
297         u32 shm0_end;
298         u32 ul_dyn_ext_base;
299         u32 ul_seg1_size = 0;
300         u32 pa_curr = 0;
301         u32 va_curr = 0;
302         u32 gpp_va_curr = 0;
303         u32 num_bytes = 0;
304         u32 all_bits = 0;
305         u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
306                 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
307         };
308
309         status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
310         if (!pbridge_context) {
311                 status = -EFAULT;
312                 goto func_end;
313         }
314
315         host_res = pbridge_context->resources;
316         if (!host_res) {
317                 status = -EFAULT;
318                 goto func_end;
319         }
320         status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
321         if (!cod_man) {
322                 status = -EFAULT;
323                 goto func_end;
324         }
325         hchnl_mgr = hio_mgr->chnl_mgr;
326         /* The message manager is destroyed when the board is stopped. */
327         dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
328         hmsg_mgr = hio_mgr->msg_mgr;
329         if (!hchnl_mgr || !hmsg_mgr) {
330                 status = -EFAULT;
331                 goto func_end;
332         }
333         if (hio_mgr->shared_mem)
334                 hio_mgr->shared_mem = NULL;
335
336         /* Get start and length of channel part of shared memory */
337         status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
338                                    &ul_shm_base);
339         if (status) {
340                 status = -EFAULT;
341                 goto func_end;
342         }
343         status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
344                                    &ul_shm_limit);
345         if (status) {
346                 status = -EFAULT;
347                 goto func_end;
348         }
349         if (ul_shm_limit <= ul_shm_base) {
350                 status = -EINVAL;
351                 goto func_end;
352         }
353         /* Get total length in bytes */
354         ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
355         /* Calculate size of a PROCCOPY shared memory region */
356         dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
357                 __func__, (ul_shm_length - sizeof(struct shm)));
358
359         /* Get start and length of message part of shared memory */
360         status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
361                                            &ul_msg_base);
362         if (!status) {
363                 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
364                                            &ul_msg_limit);
365                 if (!status) {
366                         if (ul_msg_limit <= ul_msg_base) {
367                                 status = -EINVAL;
368                         } else {
369                                 /*
370                                  * Length (bytes) of messaging part of shared
371                                  * memory.
372                                  */
373                                 ul_msg_length =
374                                     (ul_msg_limit - ul_msg_base +
375                                      1) * hio_mgr->word_size;
376                                 /*
377                                  * Total length (bytes) of shared memory:
378                                  * chnl + msg.
379                                  */
380                                 ul_mem_length = ul_shm_length + ul_msg_length;
381                         }
382                 } else {
383                         status = -EFAULT;
384                 }
385         } else {
386                 status = -EFAULT;
387         }
388         if (!status) {
389 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
390                 status =
391                     cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
392 #else
393                 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
394                                            &shm0_end);
395 #endif
396                 if (status)
397                         status = -EFAULT;
398         }
399         if (!status) {
400                 status =
401                     cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
402                 if (status)
403                         status = -EFAULT;
404         }
405         if (!status) {
406                 status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
407                 if (status)
408                         status = -EFAULT;
409         }
410         if (!status) {
411                 /* Get memory reserved in host resources */
412                 (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
413                                               &hio_mgr->ext_proc_info,
414                                               sizeof(struct
415                                                      mgr_processorextinfo),
416                                               &num_procs);
417
418                 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
419                 ndx = 0;
420                 ul_gpp_pa = host_res->mem_phys[1];
421                 ul_gpp_va = host_res->mem_base[1];
422                 /* This is the virtual uncached ioremapped address!!! */
423                 /* Why can't we directly take the DSPVA from the symbols? */
424                 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
425                 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
426                 ul_seg1_size =
427                     (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
428                 /* 4K align */
429                 ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
430                 /* 64K align */
431                 ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
432                 ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
433                                                     UL_PAGE_ALIGN_SIZE);
434                 if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
435                         ul_pad_size = 0x0;
436
437                 dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
438                         "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
439                         "ul_seg_size %x ul_seg1_size %x \n", __func__,
440                         ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
441                         ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
442
443                 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
444                     host_res->mem_length[1]) {
445                         pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
446                                __func__, host_res->mem_length[1],
447                                ul_seg_size + ul_seg1_size + ul_pad_size);
448                         status = -ENOMEM;
449                 }
450         }
451         if (status)
452                 goto func_end;
453
454         pa_curr = ul_gpp_pa;
455         va_curr = ul_dyn_ext_base * hio_mgr->word_size;
456         gpp_va_curr = ul_gpp_va;
457         num_bytes = ul_seg1_size;
458
459         /*
460          * Try to fit into TLB entries. If not possible, push them to page
461          * tables. It is quite possible that if sections are not on
462          * bigger page boundary, we may end up making several small pages.
463          * So, push them onto page tables, if that is the case.
464          */
465         map_attrs = 0x00000000;
466         map_attrs = DSP_MAPLITTLEENDIAN;
467         map_attrs |= DSP_MAPPHYSICALADDR;
468         map_attrs |= DSP_MAPELEMSIZE32;
469         map_attrs |= DSP_MAPDONOTLOCK;
470
471         while (num_bytes) {
472                 /*
473                  * To find the max. page size with which both PA & VA are
474                  * aligned.
475                  */
476                 all_bits = pa_curr | va_curr;
477                 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
478                         "num_bytes %x\n", all_bits, pa_curr, va_curr,
479                         num_bytes);
480                 for (i = 0; i < 4; i++) {
481                         if ((num_bytes >= page_size[i]) && ((all_bits &
482                                                              (page_size[i] -
483                                                               1)) == 0)) {
484                                 status =
485                                     hio_mgr->intf_fxns->
486                                     brd_mem_map(hio_mgr->bridge_context,
487                                                     pa_curr, va_curr,
488                                                     page_size[i], map_attrs,
489                                                     NULL);
490                                 if (status)
491                                         goto func_end;
492                                 pa_curr += page_size[i];
493                                 va_curr += page_size[i];
494                                 gpp_va_curr += page_size[i];
495                                 num_bytes -= page_size[i];
496                                 /*
497                                  * Don't try smaller sizes. Hopefully we have
498                                  * reached an address aligned to a bigger page
499                                  * size.
500                                  */
501                                 break;
502                         }
503                 }
504         }
505         pa_curr += ul_pad_size;
506         va_curr += ul_pad_size;
507         gpp_va_curr += ul_pad_size;
508
509         /* Configure the TLB entries for the next cacheable segment */
510         num_bytes = ul_seg_size;
511         va_curr = ul_dsp_va * hio_mgr->word_size;
512         while (num_bytes) {
513                 /*
514                  * To find the max. page size with which both PA & VA are
515                  * aligned.
516                  */
517                 all_bits = pa_curr | va_curr;
518                 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
519                         "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
520                         va_curr, num_bytes);
521                 for (i = 0; i < 4; i++) {
522                         if (!(num_bytes >= page_size[i]) ||
523                             !((all_bits & (page_size[i] - 1)) == 0))
524                                 continue;
525                         if (ndx < MAX_LOCK_TLB_ENTRIES) {
526                                 /*
527                                  * This is the physical address written to
528                                  * DSP MMU.
529                                  */
530                                 ae_proc[ndx].gpp_pa = pa_curr;
531                                 /*
532                                  * This is the virtual uncached ioremapped
533                                  * address!!!
534                                  */
535                                 ae_proc[ndx].gpp_va = gpp_va_curr;
536                                 ae_proc[ndx].dsp_va =
537                                     va_curr / hio_mgr->word_size;
538                                 ae_proc[ndx].size = page_size[i];
539                                 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
540                                 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
541                                 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
542                                 dev_dbg(bridge, "shm MMU TLB entry PA %x"
543                                         " VA %x DSP_VA %x Size %x\n",
544                                         ae_proc[ndx].gpp_pa,
545                                         ae_proc[ndx].gpp_va,
546                                         ae_proc[ndx].dsp_va *
547                                         hio_mgr->word_size, page_size[i]);
548                                 ndx++;
549                         } else {
550                                 status =
551                                     hio_mgr->intf_fxns->
552                                     brd_mem_map(hio_mgr->bridge_context,
553                                                     pa_curr, va_curr,
554                                                     page_size[i], map_attrs,
555                                                     NULL);
556                                 dev_dbg(bridge,
557                                         "shm MMU PTE entry PA %x"
558                                         " VA %x DSP_VA %x Size %x\n",
559                                         ae_proc[ndx].gpp_pa,
560                                         ae_proc[ndx].gpp_va,
561                                         ae_proc[ndx].dsp_va *
562                                         hio_mgr->word_size, page_size[i]);
563                                 if (status)
564                                         goto func_end;
565                         }
566                         pa_curr += page_size[i];
567                         va_curr += page_size[i];
568                         gpp_va_curr += page_size[i];
569                         num_bytes -= page_size[i];
570                         /*
571                          * Don't try smaller sizes. Hopefully we have reached
572                          * an address aligned to a bigger page size.
573                          */
574                         break;
575                 }
576         }
577
578         /*
579          * Copy remaining entries from CDB. All entries are 1 MB and
580          * should not conflict with shm entries on MPU or DSP side.
581          */
582         for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
583                 if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
584                         continue;
585
586                 if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
587                      ul_gpp_pa - 0x100000
588                      && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
589                      ul_gpp_pa + ul_seg_size)
590                     || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
591                         ul_dsp_va - 0x100000 / hio_mgr->word_size
592                         && hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <=
593                         ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
594                         dev_dbg(bridge,
595                                 "CDB MMU entry %d conflicts with "
596                                 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
597                                 "GppPa %x, DspVa %x, Bytes %x.\n", i,
598                                 hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
599                                 hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
600                                 ul_gpp_pa, ul_dsp_va, ul_seg_size);
601                         status = -EPERM;
602                 } else {
603                         if (ndx < MAX_LOCK_TLB_ENTRIES) {
604                                 ae_proc[ndx].dsp_va =
605                                     hio_mgr->ext_proc_info.ty_tlb[i].
606                                     dsp_virt;
607                                 ae_proc[ndx].gpp_pa =
608                                     hio_mgr->ext_proc_info.ty_tlb[i].
609                                     gpp_phys;
610                                 ae_proc[ndx].gpp_va = 0;
611                                 /* 1 MB */
612                                 ae_proc[ndx].size = 0x100000;
613                                 dev_dbg(bridge, "shm MMU entry PA %x "
614                                         "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
615                                         ae_proc[ndx].dsp_va);
616                                 ndx++;
617                         } else {
618                                 status = hio_mgr->intf_fxns->brd_mem_map
619                                     (hio_mgr->bridge_context,
620                                      hio_mgr->ext_proc_info.ty_tlb[i].
621                                      gpp_phys,
622                                      hio_mgr->ext_proc_info.ty_tlb[i].
623                                      dsp_virt, 0x100000, map_attrs,
624                                      NULL);
625                         }
626                 }
627                 if (status)
628                         goto func_end;
629         }
630
631         map_attrs = 0x00000000;
632         map_attrs = DSP_MAPLITTLEENDIAN;
633         map_attrs |= DSP_MAPPHYSICALADDR;
634         map_attrs |= DSP_MAPELEMSIZE32;
635         map_attrs |= DSP_MAPDONOTLOCK;
636
637         /* Map the L4 peripherals */
638         i = 0;
639         while (l4_peripheral_table[i].phys_addr) {
640                 status = hio_mgr->intf_fxns->brd_mem_map
641                     (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
642                      l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
643                      map_attrs, NULL);
644                 if (status)
645                         goto func_end;
646                 i++;
647         }
648
649         for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
650                 ae_proc[i].dsp_va = 0;
651                 ae_proc[i].gpp_pa = 0;
652                 ae_proc[i].gpp_va = 0;
653                 ae_proc[i].size = 0;
654         }
655         /*
656          * Set the shm physical address entry (grayed out in CDB file)
657          * to the virtual uncached ioremapped address of shm reserved
658          * on MPU.
659          */
660         hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
661             (ul_gpp_va + ul_seg1_size + ul_pad_size);
662
663         /*
664          * Need shm Phys addr. IO supports only one DSP for now:
665          * num_procs = 1.
666          */
667         if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
668                 status = -EFAULT;
669                 goto func_end;
670         } else {
671                 if (ae_proc[0].dsp_va > ul_shm_base) {
672                         status = -EPERM;
673                         goto func_end;
674                 }
675                 /* ul_shm_base may not be at ul_dsp_va address */
676                 ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) *
677                     hio_mgr->word_size;
678                 /*
679                  * bridge_dev_ctrl() will set dev context dsp-mmu info. In
680                  * bridge_brd_start() the MMU will be re-programed with MMU
681                  * DSPVa-GPPPa pair info while DSP is in a known
682                  * (reset) state.
683                  */
684
685                 status =
686                     hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
687                                                       BRDIOCTL_SETMMUCONFIG,
688                                                       ae_proc);
689                 if (status)
690                         goto func_end;
691                 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
692                 ul_shm_base += ul_shm_base_offset;
693                 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
694                                                        ul_mem_length);
695                 if (ul_shm_base == 0) {
696                         status = -EFAULT;
697                         goto func_end;
698                 }
699                 /* Register SM */
700                 status =
701                     register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa);
702         }
703
704         hio_mgr->shared_mem = (struct shm *)ul_shm_base;
705         hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
706         hio_mgr->output = hio_mgr->input + (ul_shm_length -
707                                             sizeof(struct shm)) / 2;
708         hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
709
710         /*  Set up Shared memory addresses for messaging. */
711         hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
712                                                       + ul_shm_length);
713         hio_mgr->msg_input =
714             (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
715         hio_mgr->msg_output_ctrl =
716             (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
717                                 ul_msg_length / 2);
718         hio_mgr->msg_output =
719             (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
720         hmsg_mgr->max_msgs =
721             ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
722             / sizeof(struct msg_dspmsg);
723         dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
724                 "output %p, msg_input_ctrl %p, msg_input %p, "
725                 "msg_output_ctrl %p, msg_output %p\n",
726                 (u8 *) hio_mgr->shared_mem, hio_mgr->input,
727                 hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
728                 hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
729                 hio_mgr->msg_output);
730         dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
731                 hmsg_mgr->max_msgs);
732         memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
733
734 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
735         /* Get the start address of trace buffer */
736         status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
737                                    &hio_mgr->trace_buffer_begin);
738         if (status) {
739                 status = -EFAULT;
740                 goto func_end;
741         }
742
743         hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
744             (ul_gpp_va + ul_seg1_size + ul_pad_size) +
745             (hio_mgr->trace_buffer_begin - ul_dsp_va);
746         /* Get the end address of trace buffer */
747         status = cod_get_sym_value(cod_man, SYS_PUTCEND,
748                                    &hio_mgr->trace_buffer_end);
749         if (status) {
750                 status = -EFAULT;
751                 goto func_end;
752         }
753         hio_mgr->trace_buffer_end =
754             (ul_gpp_va + ul_seg1_size + ul_pad_size) +
755             (hio_mgr->trace_buffer_end - ul_dsp_va);
756         /* Get the current address of DSP write pointer */
757         status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
758                                    &hio_mgr->trace_buffer_current);
759         if (status) {
760                 status = -EFAULT;
761                 goto func_end;
762         }
763         hio_mgr->trace_buffer_current =
764             (ul_gpp_va + ul_seg1_size + ul_pad_size) +
765             (hio_mgr->trace_buffer_current - ul_dsp_va);
766         /* Calculate the size of trace buffer */
767         kfree(hio_mgr->msg);
768         hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
769                                 hio_mgr->trace_buffer_begin) *
770                                 hio_mgr->word_size) + 2, GFP_KERNEL);
771         if (!hio_mgr->msg)
772                 status = -ENOMEM;
773
774         hio_mgr->dsp_va = ul_dsp_va;
775         hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
776
777 #endif
778 func_end:
779         return status;
780 }
781
782 /*
783  *  ======== io_buf_size ========
784  *      Size of shared memory I/O channel.
785  */
786 u32 io_buf_size(struct io_mgr *hio_mgr)
787 {
788         if (hio_mgr)
789                 return hio_mgr->sm_buf_size;
790         else
791                 return 0;
792 }
793
794 /*
795  *  ======== io_cancel_chnl ========
796  *      Cancel IO on a given PCPY channel.
797  */
798 void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
799 {
800         struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
801         struct shm *sm;
802
803         if (!hio_mgr)
804                 goto func_end;
805         sm = hio_mgr->shared_mem;
806
807         /* Inform DSP that we have no more buffers on this channel */
808         set_chnl_free(sm, chnl);
809
810         sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
811 func_end:
812         return;
813 }
814
815
816 /*
817  *  ======== io_dispatch_pm ========
818  *      Performs I/O dispatch on PM related messages from DSP
819  */
820 static void io_dispatch_pm(struct io_mgr *pio_mgr)
821 {
822         int status;
823         u32 parg[2];
824
825         /* Perform Power message processing here */
826         parg[0] = pio_mgr->intr_val;
827
828         /* Send the command to the Bridge clk/pwr manager to handle */
829         if (parg[0] == MBX_PM_HIBERNATE_EN) {
830                 dev_dbg(bridge, "PM: Hibernate command\n");
831                 status = pio_mgr->intf_fxns->
832                                 dev_cntrl(pio_mgr->bridge_context,
833                                               BRDIOCTL_PWR_HIBERNATE, parg);
834                 if (status)
835                         pr_err("%s: hibernate cmd failed 0x%x\n",
836                                        __func__, status);
837         } else if (parg[0] == MBX_PM_OPP_REQ) {
838                 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
839                 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
840                 status = pio_mgr->intf_fxns->
841                                 dev_cntrl(pio_mgr->bridge_context,
842                                         BRDIOCTL_CONSTRAINT_REQUEST, parg);
843                 if (status)
844                         dev_dbg(bridge, "PM: Failed to set constraint "
845                                 "= 0x%x\n", parg[1]);
846         } else {
847                 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
848                         parg[0]);
849                 status = pio_mgr->intf_fxns->
850                                 dev_cntrl(pio_mgr->bridge_context,
851                                               BRDIOCTL_CLK_CTRL, parg);
852                 if (status)
853                         dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
854                                 "= 0x%x\n", *parg);
855         }
856 }
857
858 /*
859  *  ======== io_dpc ========
860  *      Deferred procedure call for shared memory channel driver ISR.  Carries
861  *      out the dispatch of I/O as a non-preemptible event.It can only be
862  *      pre-empted      by an ISR.
863  */
864 void io_dpc(unsigned long ref_data)
865 {
866         struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
867         struct chnl_mgr *chnl_mgr_obj;
868         struct msg_mgr *msg_mgr_obj;
869         struct deh_mgr *hdeh_mgr;
870         u32 requested;
871         u32 serviced;
872
873         if (!pio_mgr)
874                 goto func_end;
875         chnl_mgr_obj = pio_mgr->chnl_mgr;
876         dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj);
877         dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr);
878         if (!chnl_mgr_obj)
879                 goto func_end;
880
881         requested = pio_mgr->dpc_req;
882         serviced = pio_mgr->dpc_sched;
883
884         if (serviced == requested)
885                 goto func_end;
886
887         /* Process pending DPC's */
888         do {
889                 /* Check value of interrupt reg to ensure it's a valid error */
890                 if ((pio_mgr->intr_val > DEH_BASE) &&
891                     (pio_mgr->intr_val < DEH_LIMIT)) {
892                         /* Notify DSP/BIOS exception */
893                         if (hdeh_mgr) {
894 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
895                                 print_dsp_debug_trace(pio_mgr);
896 #endif
897                                 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
898                                                   pio_mgr->intr_val);
899                         }
900                 }
901                 /* Proc-copy chanel dispatch */
902                 input_chnl(pio_mgr, NULL, IO_SERVICE);
903                 output_chnl(pio_mgr, NULL, IO_SERVICE);
904
905 #ifdef CHNL_MESSAGES
906                 if (msg_mgr_obj) {
907                         /* Perform I/O dispatch on message queues */
908                         input_msg(pio_mgr, msg_mgr_obj);
909                         output_msg(pio_mgr, msg_mgr_obj);
910                 }
911
912 #endif
913 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
914                 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
915                         /* Notify DSP Trace message */
916                         print_dsp_debug_trace(pio_mgr);
917                 }
918 #endif
919                 serviced++;
920         } while (serviced != requested);
921         pio_mgr->dpc_sched = requested;
922 func_end:
923         return;
924 }
925
926 /*
927  *  ======== io_mbox_msg ========
928  *      Main interrupt handler for the shared memory IO manager.
929  *      Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
930  *      schedules a DPC to dispatch I/O.
931  */
932 int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
933 {
934         struct io_mgr *pio_mgr;
935         struct dev_object *dev_obj;
936         unsigned long flags;
937
938         dev_obj = dev_get_first();
939         dev_get_io_mgr(dev_obj, &pio_mgr);
940
941         if (!pio_mgr)
942                 return NOTIFY_BAD;
943
944         pio_mgr->intr_val = (u16)((u32)msg);
945         if (pio_mgr->intr_val & MBX_PM_CLASS)
946                 io_dispatch_pm(pio_mgr);
947
948         if (pio_mgr->intr_val == MBX_DEH_RESET) {
949                 pio_mgr->intr_val = 0;
950         } else {
951                 spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
952                 pio_mgr->dpc_req++;
953                 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
954                 tasklet_schedule(&pio_mgr->dpc_tasklet);
955         }
956         return NOTIFY_OK;
957 }
958
959 /*
960  *  ======== io_request_chnl ========
961  *  Purpose:
962  *      Request chanenel I/O from the DSP. Sets flags in shared memory, then
963  *      interrupts the DSP.
964  */
965 void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
966                         u8 io_mode, u16 *mbx_val)
967 {
968         struct chnl_mgr *chnl_mgr_obj;
969         struct shm *sm;
970
971         if (!pchnl || !mbx_val)
972                 goto func_end;
973         chnl_mgr_obj = io_manager->chnl_mgr;
974         sm = io_manager->shared_mem;
975         if (io_mode == IO_INPUT) {
976                 /*
977                  * Assertion fires if CHNL_AddIOReq() called on a stream
978                  * which was cancelled, or attached to a dead board.
979                  */
980                 DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
981                            (pchnl->state == CHNL_STATEEOS));
982                 /* Indicate to the DSP we have a buffer available for input */
983                 set_chnl_busy(sm, pchnl->chnl_id);
984                 *mbx_val = MBX_PCPY_CLASS;
985         } else if (io_mode == IO_OUTPUT) {
986                 /*
987                  * This assertion fails if CHNL_AddIOReq() was called on a
988                  * stream which was cancelled, or attached to a dead board.
989                  */
990                 DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
991                            CHNL_STATEREADY);
992                 /*
993                  * Record the fact that we have a buffer available for
994                  * output.
995                  */
996                 chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
997         } else {
998                 DBC_ASSERT(io_mode);    /* Shouldn't get here. */
999         }
1000 func_end:
1001         return;
1002 }
1003
1004 /*
1005  *  ======== iosm_schedule ========
1006  *      Schedule DPC for IO.
1007  */
1008 void iosm_schedule(struct io_mgr *io_manager)
1009 {
1010         unsigned long flags;
1011
1012         if (!io_manager)
1013                 return;
1014
1015         /* Increment count of DPC's pending. */
1016         spin_lock_irqsave(&io_manager->dpc_lock, flags);
1017         io_manager->dpc_req++;
1018         spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
1019
1020         /* Schedule DPC */
1021         tasklet_schedule(&io_manager->dpc_tasklet);
1022 }
1023
1024 /*
1025  *  ======== find_ready_output ========
1026  *      Search for a host output channel which is ready to send.  If this is
1027  *      called as a result of servicing the DPC, then implement a round
1028  *      robin search; otherwise, this was called by a client thread (via
1029  *      IO_Dispatch()), so just start searching from the current channel id.
1030  */
1031 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1032                              struct chnl_object *pchnl, u32 mask)
1033 {
1034         u32 ret = OUTPUTNOTREADY;
1035         u32 id, start_id;
1036         u32 shift;
1037
1038         id = (pchnl !=
1039               NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
1040         id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1041         if (id >= CHNL_MAXCHANNELS)
1042                 goto func_end;
1043         if (mask) {
1044                 shift = (1 << id);
1045                 start_id = id;
1046                 do {
1047                         if (mask & shift) {
1048                                 ret = id;
1049                                 if (pchnl == NULL)
1050                                         chnl_mgr_obj->last_output = id;
1051                                 break;
1052                         }
1053                         id = id + 1;
1054                         id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1055                         shift = (1 << id);
1056                 } while (id != start_id);
1057         }
1058 func_end:
1059         return ret;
1060 }
1061
1062 /*
1063  *  ======== input_chnl ========
1064  *      Dispatch a buffer on an input channel.
1065  */
1066 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1067                         u8 io_mode)
1068 {
1069         struct chnl_mgr *chnl_mgr_obj;
1070         struct shm *sm;
1071         u32 chnl_id;
1072         u32 bytes;
1073         struct chnl_irp *chnl_packet_obj = NULL;
1074         u32 dw_arg;
1075         bool clear_chnl = false;
1076         bool notify_client = false;
1077
1078         sm = pio_mgr->shared_mem;
1079         chnl_mgr_obj = pio_mgr->chnl_mgr;
1080
1081         /* Attempt to perform input */
1082         if (!sm->input_full)
1083                 goto func_end;
1084
1085         bytes = sm->input_size * chnl_mgr_obj->word_size;
1086         chnl_id = sm->input_id;
1087         dw_arg = sm->arg;
1088         if (chnl_id >= CHNL_MAXCHANNELS) {
1089                 /* Shouldn't be here: would indicate corrupted shm. */
1090                 DBC_ASSERT(chnl_id);
1091                 goto func_end;
1092         }
1093         pchnl = chnl_mgr_obj->channels[chnl_id];
1094         if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
1095                 if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
1096                         /* Get the I/O request, and attempt a transfer */
1097                         if (!list_empty(&pchnl->io_requests)) {
1098                                 if (!pchnl->cio_reqs)
1099                                         goto func_end;
1100
1101                                 chnl_packet_obj = list_first_entry(
1102                                                 &pchnl->io_requests,
1103                                                 struct chnl_irp, link);
1104                                 list_del(&chnl_packet_obj->link);
1105                                 pchnl->cio_reqs--;
1106
1107                                 /*
1108                                  * Ensure we don't overflow the client's
1109                                  * buffer.
1110                                  */
1111                                 bytes = min(bytes, chnl_packet_obj->byte_size);
1112                                 memcpy(chnl_packet_obj->host_sys_buf,
1113                                                 pio_mgr->input, bytes);
1114                                 pchnl->bytes_moved += bytes;
1115                                 chnl_packet_obj->byte_size = bytes;
1116                                 chnl_packet_obj->arg = dw_arg;
1117                                 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
1118
1119                                 if (bytes == 0) {
1120                                         /*
1121                                          * This assertion fails if the DSP
1122                                          * sends EOS more than once on this
1123                                          * channel.
1124                                          */
1125                                         if (pchnl->state & CHNL_STATEEOS)
1126                                                 goto func_end;
1127                                         /*
1128                                          * Zero bytes indicates EOS. Update
1129                                          * IOC status for this chirp, and also
1130                                          * the channel state.
1131                                          */
1132                                         chnl_packet_obj->status |=
1133                                                 CHNL_IOCSTATEOS;
1134                                         pchnl->state |= CHNL_STATEEOS;
1135                                         /*
1136                                          * Notify that end of stream has
1137                                          * occurred.
1138                                          */
1139                                         ntfy_notify(pchnl->ntfy_obj,
1140                                                         DSP_STREAMDONE);
1141                                 }
1142                                 /* Tell DSP if no more I/O buffers available */
1143                                 if (list_empty(&pchnl->io_requests))
1144                                         set_chnl_free(sm, pchnl->chnl_id);
1145                                 clear_chnl = true;
1146                                 notify_client = true;
1147                         } else {
1148                                 /*
1149                                  * Input full for this channel, but we have no
1150                                  * buffers available.  The channel must be
1151                                  * "idling". Clear out the physical input
1152                                  * channel.
1153                                  */
1154                                 clear_chnl = true;
1155                         }
1156                 } else {
1157                         /* Input channel cancelled: clear input channel */
1158                         clear_chnl = true;
1159                 }
1160         } else {
1161                 /* DPC fired after host closed channel: clear input channel */
1162                 clear_chnl = true;
1163         }
1164         if (clear_chnl) {
1165                 /* Indicate to the DSP we have read the input */
1166                 sm->input_full = 0;
1167                 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1168         }
1169         if (notify_client) {
1170                 /* Notify client with IO completion record */
1171                 notify_chnl_complete(pchnl, chnl_packet_obj);
1172         }
1173 func_end:
1174         return;
1175 }
1176
1177 /*
1178  *  ======== input_msg ========
1179  *      Copies messages from shared memory to the message queues.
1180  */
1181 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1182 {
1183         u32 num_msgs;
1184         u32 i;
1185         u8 *msg_input;
1186         struct msg_queue *msg_queue_obj;
1187         struct msg_frame *pmsg;
1188         struct msg_dspmsg msg;
1189         struct msg_ctrl *msg_ctr_obj;
1190         u32 input_empty;
1191         u32 addr;
1192
1193         msg_ctr_obj = pio_mgr->msg_input_ctrl;
1194         /* Get the number of input messages to be read */
1195         input_empty = msg_ctr_obj->buf_empty;
1196         num_msgs = msg_ctr_obj->size;
1197         if (input_empty)
1198                 return;
1199
1200         msg_input = pio_mgr->msg_input;
1201         for (i = 0; i < num_msgs; i++) {
1202                 /* Read the next message */
1203                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
1204                 msg.msg.cmd =
1205                         read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1206                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
1207                 msg.msg.arg1 =
1208                         read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1209                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
1210                 msg.msg.arg2 =
1211                         read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1212                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1213                 msg.msgq_id =
1214                         read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1215                 msg_input += sizeof(struct msg_dspmsg);
1216
1217                 /* Determine which queue to put the message in */
1218                 dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x "
1219                                 "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd,
1220                                 msg.msg.arg1, msg.msg.arg2, msg.msgq_id);
1221                 /*
1222                  * Interrupt may occur before shared memory and message
1223                  * input locations have been set up. If all nodes were
1224                  * cleaned up, hmsg_mgr->max_msgs should be 0.
1225                  */
1226                 list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list,
1227                                 list_elem) {
1228                         if (msg.msgq_id != msg_queue_obj->msgq_id)
1229                                 continue;
1230                         /* Found it */
1231                         if (msg.msg.cmd == RMS_EXITACK) {
1232                                 /*
1233                                  * Call the node exit notification.
1234                                  * The exit message does not get
1235                                  * queued.
1236                                  */
1237                                 (*hmsg_mgr->on_exit)(msg_queue_obj->arg,
1238                                                 msg.msg.arg1);
1239                                 break;
1240                         }
1241                         /*
1242                          * Not an exit acknowledgement, queue
1243                          * the message.
1244                          */
1245                         if (list_empty(&msg_queue_obj->msg_free_list)) {
1246                                 /*
1247                                  * No free frame to copy the
1248                                  * message into.
1249                                  */
1250                                 pr_err("%s: no free msg frames,"
1251                                                 " discarding msg\n",
1252                                                 __func__);
1253                                 break;
1254                         }
1255
1256                         pmsg = list_first_entry(&msg_queue_obj->msg_free_list,
1257                                         struct msg_frame, list_elem);
1258                         list_del(&pmsg->list_elem);
1259                         pmsg->msg_data = msg;
1260                         list_add_tail(&pmsg->list_elem,
1261                                         &msg_queue_obj->msg_used_list);
1262                         ntfy_notify(msg_queue_obj->ntfy_obj,
1263                                         DSP_NODEMESSAGEREADY);
1264                         sync_set_event(msg_queue_obj->sync_event);
1265                 }
1266         }
1267         /* Set the post SWI flag */
1268         if (num_msgs > 0) {
1269                 /* Tell the DSP we've read the messages */
1270                 msg_ctr_obj->buf_empty = true;
1271                 msg_ctr_obj->post_swi = true;
1272                 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1273         }
1274 }
1275
1276 /*
1277  *  ======== notify_chnl_complete ========
1278  *  Purpose:
1279  *      Signal the channel event, notifying the client that I/O has completed.
1280  */
1281 static void notify_chnl_complete(struct chnl_object *pchnl,
1282                                  struct chnl_irp *chnl_packet_obj)
1283 {
1284         bool signal_event;
1285
1286         if (!pchnl || !pchnl->sync_event || !chnl_packet_obj)
1287                 goto func_end;
1288
1289         /*
1290          * Note: we signal the channel event only if the queue of IO
1291          * completions is empty.  If it is not empty, the event is sure to be
1292          * signalled by the only IO completion list consumer:
1293          * bridge_chnl_get_ioc().
1294          */
1295         signal_event = list_empty(&pchnl->io_completions);
1296         /* Enqueue the IO completion info for the client */
1297         list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions);
1298         pchnl->cio_cs++;
1299
1300         if (pchnl->cio_cs > pchnl->chnl_packets)
1301                 goto func_end;
1302         /* Signal the channel event (if not already set) that IO is complete */
1303         if (signal_event)
1304                 sync_set_event(pchnl->sync_event);
1305
1306         /* Notify that IO is complete */
1307         ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
1308 func_end:
1309         return;
1310 }
1311
1312 /*
1313  *  ======== output_chnl ========
1314  *  Purpose:
1315  *      Dispatch a buffer on an output channel.
1316  */
1317 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1318                         u8 io_mode)
1319 {
1320         struct chnl_mgr *chnl_mgr_obj;
1321         struct shm *sm;
1322         u32 chnl_id;
1323         struct chnl_irp *chnl_packet_obj;
1324         u32 dw_dsp_f_mask;
1325
1326         chnl_mgr_obj = pio_mgr->chnl_mgr;
1327         sm = pio_mgr->shared_mem;
1328         /* Attempt to perform output */
1329         if (sm->output_full)
1330                 goto func_end;
1331
1332         if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1333                 goto func_end;
1334
1335         /* Look to see if both a PC and DSP output channel are ready */
1336         dw_dsp_f_mask = sm->dsp_free_mask;
1337         chnl_id =
1338             find_ready_output(chnl_mgr_obj, pchnl,
1339                               (chnl_mgr_obj->output_mask & dw_dsp_f_mask));
1340         if (chnl_id == OUTPUTNOTREADY)
1341                 goto func_end;
1342
1343         pchnl = chnl_mgr_obj->channels[chnl_id];
1344         if (!pchnl || list_empty(&pchnl->io_requests)) {
1345                 /* Shouldn't get here */
1346                 goto func_end;
1347         }
1348
1349         if (!pchnl->cio_reqs)
1350                 goto func_end;
1351
1352         /* Get the I/O request, and attempt a transfer */
1353         chnl_packet_obj = list_first_entry(&pchnl->io_requests,
1354                         struct chnl_irp, link);
1355         list_del(&chnl_packet_obj->link);
1356
1357         pchnl->cio_reqs--;
1358
1359         /* Record fact that no more I/O buffers available */
1360         if (list_empty(&pchnl->io_requests))
1361                 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
1362
1363         /* Transfer buffer to DSP side */
1364         chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size,
1365                                         chnl_packet_obj->byte_size);
1366         memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
1367                                         chnl_packet_obj->byte_size);
1368         pchnl->bytes_moved += chnl_packet_obj->byte_size;
1369         /* Write all 32 bits of arg */
1370         sm->arg = chnl_packet_obj->arg;
1371 #if _CHNL_WORDSIZE == 2
1372         /* Access can be different SM access word size (e.g. 16/32 bit words) */
1373         sm->output_id = (u16) chnl_id;
1374         sm->output_size = (u16) (chnl_packet_obj->byte_size +
1375                                 chnl_mgr_obj->word_size - 1) /
1376                                 (u16) chnl_mgr_obj->word_size;
1377 #else
1378         sm->output_id = chnl_id;
1379         sm->output_size = (chnl_packet_obj->byte_size +
1380                         chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
1381 #endif
1382         sm->output_full =  1;
1383         /* Indicate to the DSP we have written the output */
1384         sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1385         /* Notify client with IO completion record (keep EOS) */
1386         chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1387         notify_chnl_complete(pchnl, chnl_packet_obj);
1388         /* Notify if stream is done. */
1389         if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
1390                 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
1391
1392 func_end:
1393         return;
1394 }
1395
1396 /*
1397  *  ======== output_msg ========
1398  *      Copies messages from the message queues to the shared memory.
1399  */
1400 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1401 {
1402         u32 num_msgs = 0;
1403         u32 i;
1404         struct msg_dspmsg *msg_output;
1405         struct msg_frame *pmsg;
1406         struct msg_ctrl *msg_ctr_obj;
1407         u32 val;
1408         u32 addr;
1409
1410         msg_ctr_obj = pio_mgr->msg_output_ctrl;
1411
1412         /* Check if output has been cleared */
1413         if (!msg_ctr_obj->buf_empty)
1414                 return;
1415
1416         num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1417                 hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1418         msg_output = (struct msg_dspmsg *) pio_mgr->msg_output;
1419
1420         /* Copy num_msgs messages into shared memory */
1421         for (i = 0; i < num_msgs; i++) {
1422                 if (list_empty(&hmsg_mgr->msg_used_list))
1423                         continue;
1424
1425                 pmsg = list_first_entry(&hmsg_mgr->msg_used_list,
1426                                 struct msg_frame, list_elem);
1427                 list_del(&pmsg->list_elem);
1428
1429                 val = (pmsg->msg_data).msgq_id;
1430                 addr = (u32) &msg_output->msgq_id;
1431                 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1432
1433                 val = (pmsg->msg_data).msg.cmd;
1434                 addr = (u32) &msg_output->msg.cmd;
1435                 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1436
1437                 val = (pmsg->msg_data).msg.arg1;
1438                 addr = (u32) &msg_output->msg.arg1;
1439                 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1440
1441                 val = (pmsg->msg_data).msg.arg2;
1442                 addr = (u32) &msg_output->msg.arg2;
1443                 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1444
1445                 msg_output++;
1446                 list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list);
1447                 sync_set_event(hmsg_mgr->sync_event);
1448         }
1449
1450         if (num_msgs > 0) {
1451                 hmsg_mgr->msgs_pending -= num_msgs;
1452 #if _CHNL_WORDSIZE == 2
1453                 /*
1454                  * Access can be different SM access word size
1455                  * (e.g. 16/32 bit words)
1456                  */
1457                 msg_ctr_obj->size = (u16) num_msgs;
1458 #else
1459                 msg_ctr_obj->size = num_msgs;
1460 #endif
1461                 msg_ctr_obj->buf_empty = false;
1462                 /* Set the post SWI flag */
1463                 msg_ctr_obj->post_swi = true;
1464                 /* Tell the DSP we have written the output. */
1465                 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1466         }
1467 }
1468
1469 /*
1470  *  ======== register_shm_segs ========
1471  *  purpose:
1472  *      Registers GPP SM segment with CMM.
1473  */
1474 static int register_shm_segs(struct io_mgr *hio_mgr,
1475                                     struct cod_manager *cod_man,
1476                                     u32 dw_gpp_base_pa)
1477 {
1478         int status = 0;
1479         u32 ul_shm0_base = 0;
1480         u32 shm0_end = 0;
1481         u32 ul_shm0_rsrvd_start = 0;
1482         u32 ul_rsrvd_size = 0;
1483         u32 ul_gpp_phys;
1484         u32 ul_dsp_virt;
1485         u32 ul_shm_seg_id0 = 0;
1486         u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1487
1488         /*
1489          * Read address and size info for first SM region.
1490          * Get start of 1st SM Heap region.
1491          */
1492         status =
1493             cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
1494         if (ul_shm0_base == 0) {
1495                 status = -EPERM;
1496                 goto func_end;
1497         }
1498         /* Get end of 1st SM Heap region */
1499         if (!status) {
1500                 /* Get start and length of message part of shared memory */
1501                 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
1502                                            &shm0_end);
1503                 if (shm0_end == 0) {
1504                         status = -EPERM;
1505                         goto func_end;
1506                 }
1507         }
1508         /* Start of Gpp reserved region */
1509         if (!status) {
1510                 /* Get start and length of message part of shared memory */
1511                 status =
1512                     cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
1513                                       &ul_shm0_rsrvd_start);
1514                 if (ul_shm0_rsrvd_start == 0) {
1515                         status = -EPERM;
1516                         goto func_end;
1517                 }
1518         }
1519         /* Register with CMM */
1520         if (!status) {
1521                 status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr);
1522                 if (!status) {
1523                         status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr,
1524                                                            CMM_ALLSEGMENTS);
1525                 }
1526         }
1527         /* Register new SM region(s) */
1528         if (!status && (shm0_end - ul_shm0_base) > 0) {
1529                 /* Calc size (bytes) of SM the GPP can alloc from */
1530                 ul_rsrvd_size =
1531                     (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
1532                 if (ul_rsrvd_size <= 0) {
1533                         status = -EPERM;
1534                         goto func_end;
1535                 }
1536                 /* Calc size of SM DSP can alloc from */
1537                 ul_dsp_size =
1538                     (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
1539                 if (ul_dsp_size <= 0) {
1540                         status = -EPERM;
1541                         goto func_end;
1542                 }
1543                 /* First TLB entry reserved for Bridge SM use. */
1544                 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
1545                 /* Get size in bytes */
1546                 ul_dsp_virt =
1547                     hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
1548                     hio_mgr->word_size;
1549                 /*
1550                  * Calc byte offset used to convert GPP phys <-> DSP byte
1551                  * address.
1552                  */
1553                 if (dw_gpp_base_pa > ul_dsp_virt)
1554                         dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1555                 else
1556                         dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1557
1558                 if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
1559                         status = -EPERM;
1560                         goto func_end;
1561                 }
1562                 /*
1563                  * Calc Gpp phys base of SM region.
1564                  * This is actually uncached kernel virtual address.
1565                  */
1566                 dw_gpp_base_va =
1567                     ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
1568                     ul_dsp_virt;
1569                 /*
1570                  * Calc Gpp phys base of SM region.
1571                  * This is the physical address.
1572                  */
1573                 dw_gpp_base_pa =
1574                     dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
1575                     ul_dsp_virt;
1576                 /* Register SM Segment 0. */
1577                 status =
1578                     cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa,
1579                                            ul_rsrvd_size, dw_offset,
1580                                            (dw_gpp_base_pa >
1581                                             ul_dsp_virt) ? CMM_ADDTODSPPA :
1582                                            CMM_SUBFROMDSPPA,
1583                                            (u32) (ul_shm0_base *
1584                                                   hio_mgr->word_size),
1585                                            ul_dsp_size, &ul_shm_seg_id0,
1586                                            dw_gpp_base_va);
1587                 /* First SM region is seg_id = 1 */
1588                 if (ul_shm_seg_id0 != 1)
1589                         status = -EPERM;
1590         }
1591 func_end:
1592         return status;
1593 }
1594
1595 /* ZCPY IO routines. */
1596 /*
1597  *  ======== IO_SHMcontrol ========
1598  *      Sets the requested shm setting.
1599  */
1600 int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1601 {
1602 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1603         u32 i;
1604         struct dspbridge_platform_data *pdata =
1605             omap_dspbridge_dev->dev.platform_data;
1606
1607         switch (desc) {
1608         case SHM_CURROPP:
1609                 /* Update the shared memory with requested OPP information */
1610                 if (pargs != NULL)
1611                         hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
1612                             *(u32 *) pargs;
1613                 else
1614                         return -EPERM;
1615                 break;
1616         case SHM_OPPINFO:
1617                 /*
1618                  * Update the shared memory with the voltage, frequency,
1619                  * min and max frequency values for an OPP.
1620                  */
1621                 for (i = 0; i <= dsp_max_opps; i++) {
1622                         hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1623                             voltage = vdd1_dsp_freq[i][0];
1624                         dev_dbg(bridge, "OPP-shm: voltage: %d\n",
1625                                 vdd1_dsp_freq[i][0]);
1626                         hio_mgr->shared_mem->opp_table_struct.
1627                             opp_point[i].frequency = vdd1_dsp_freq[i][1];
1628                         dev_dbg(bridge, "OPP-shm: frequency: %d\n",
1629                                 vdd1_dsp_freq[i][1]);
1630                         hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1631                             min_freq = vdd1_dsp_freq[i][2];
1632                         dev_dbg(bridge, "OPP-shm: min freq: %d\n",
1633                                 vdd1_dsp_freq[i][2]);
1634                         hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1635                             max_freq = vdd1_dsp_freq[i][3];
1636                         dev_dbg(bridge, "OPP-shm: max freq: %d\n",
1637                                 vdd1_dsp_freq[i][3]);
1638                 }
1639                 hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
1640                     dsp_max_opps;
1641                 dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
1642                 /* Update the current OPP number */
1643                 if (pdata->dsp_get_opp)
1644                         i = (*pdata->dsp_get_opp) ();
1645                 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
1646                 dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
1647                 break;
1648         case SHM_GETOPP:
1649                 /* Get the OPP that DSP has requested */
1650                 *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
1651                 break;
1652         default:
1653                 break;
1654         }
1655 #endif
1656         return 0;
1657 }
1658
1659 /*
1660  *  ======== bridge_io_get_proc_load ========
1661  *      Gets the Processor's Load information
1662  */
1663 int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1664                                 struct dsp_procloadstat *proc_lstat)
1665 {
1666         if (!hio_mgr->shared_mem)
1667                 return -EFAULT;
1668
1669         proc_lstat->curr_load =
1670                         hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1671         proc_lstat->predicted_load =
1672             hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
1673         proc_lstat->curr_dsp_freq =
1674             hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
1675         proc_lstat->predicted_freq =
1676             hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
1677
1678         dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1679                 "Pred Freq = %d\n", proc_lstat->curr_load,
1680                 proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
1681                 proc_lstat->predicted_freq);
1682         return 0;
1683 }
1684
1685
1686 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
1687 void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1688 {
1689         u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1690
1691         while (true) {
1692                 /* Get the DSP current pointer */
1693                 ul_gpp_cur_pointer =
1694                     *(u32 *) (hio_mgr->trace_buffer_current);
1695                 ul_gpp_cur_pointer =
1696                     hio_mgr->gpp_va + (ul_gpp_cur_pointer -
1697                                           hio_mgr->dsp_va);
1698
1699                 /* No new debug messages available yet */
1700                 if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
1701                         break;
1702                 } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
1703                         /* Continuous data */
1704                         ul_new_message_length =
1705                             ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
1706
1707                         memcpy(hio_mgr->msg,
1708                                (char *)hio_mgr->gpp_read_pointer,
1709                                ul_new_message_length);
1710                         hio_mgr->msg[ul_new_message_length] = '\0';
1711                         /*
1712                          * Advance the GPP trace pointer to DSP current
1713                          * pointer.
1714                          */
1715                         hio_mgr->gpp_read_pointer += ul_new_message_length;
1716                         /* Print the trace messages */
1717                         pr_info("DSPTrace: %s\n", hio_mgr->msg);
1718                 } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
1719                         /* Handle trace buffer wraparound */
1720                         memcpy(hio_mgr->msg,
1721                                (char *)hio_mgr->gpp_read_pointer,
1722                                hio_mgr->trace_buffer_end -
1723                                hio_mgr->gpp_read_pointer);
1724                         ul_new_message_length =
1725                             ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
1726                         memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end -
1727                                               hio_mgr->gpp_read_pointer],
1728                                (char *)hio_mgr->trace_buffer_begin,
1729                                ul_new_message_length);
1730                         hio_mgr->msg[hio_mgr->trace_buffer_end -
1731                                       hio_mgr->gpp_read_pointer +
1732                                       ul_new_message_length] = '\0';
1733                         /*
1734                          * Advance the GPP trace pointer to DSP current
1735                          * pointer.
1736                          */
1737                         hio_mgr->gpp_read_pointer =
1738                             hio_mgr->trace_buffer_begin +
1739                             ul_new_message_length;
1740                         /* Print the trace messages */
1741                         pr_info("DSPTrace: %s\n", hio_mgr->msg);
1742                 }
1743         }
1744 }
1745 #endif
1746
1747 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1748 /*
1749  *  ======== print_dsp_trace_buffer ========
1750  *      Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1751  *  Parameters:
1752  *    hdeh_mgr:          Handle to DEH manager object
1753  *                      number of extra carriage returns to generate.
1754  *  Returns:
1755  *      0:        Success.
1756  *      -ENOMEM:    Unable to allocate memory.
1757  *  Requires:
1758  *      hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1759  */
1760 int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1761 {
1762         int status = 0;
1763         struct cod_manager *cod_mgr;
1764         u32 ul_trace_end;
1765         u32 ul_trace_begin;
1766         u32 trace_cur_pos;
1767         u32 ul_num_bytes = 0;
1768         u32 ul_num_words = 0;
1769         u32 ul_word_size = 2;
1770         char *psz_buf;
1771         char *str_beg;
1772         char *trace_end;
1773         char *buf_end;
1774         char *new_line;
1775
1776         struct bridge_dev_context *pbridge_context = hbridge_context;
1777         struct bridge_drv_interface *intf_fxns;
1778         struct dev_object *dev_obj = (struct dev_object *)
1779             pbridge_context->dev_obj;
1780
1781         status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1782
1783         if (cod_mgr) {
1784                 /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1785                 status =
1786                     cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
1787         } else {
1788                 status = -EFAULT;
1789         }
1790         if (!status)
1791                 status =
1792                     cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
1793
1794         if (!status)
1795                 /* trace_cur_pos will hold the address of a DSP pointer */
1796                 status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
1797                                                         &trace_cur_pos);
1798
1799         if (status)
1800                 goto func_end;
1801
1802         ul_num_bytes = (ul_trace_end - ul_trace_begin);
1803
1804         ul_num_words = ul_num_bytes * ul_word_size;
1805         status = dev_get_intf_fxns(dev_obj, &intf_fxns);
1806
1807         if (status)
1808                 goto func_end;
1809
1810         psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1811         if (psz_buf != NULL) {
1812                 /* Read trace buffer data */
1813                 status = (*intf_fxns->brd_read)(pbridge_context,
1814                         (u8 *)psz_buf, (u32)ul_trace_begin,
1815                         ul_num_bytes, 0);
1816
1817                 if (status)
1818                         goto func_end;
1819
1820                 /* Pack and do newline conversion */
1821                 pr_debug("PrintDspTraceBuffer: "
1822                         "before pack and unpack.\n");
1823                 pr_debug("%s: DSP Trace Buffer Begin:\n"
1824                         "=======================\n%s\n",
1825                         __func__, psz_buf);
1826
1827                 /* Read the value at the DSP address in trace_cur_pos. */
1828                 status = (*intf_fxns->brd_read)(pbridge_context,
1829                                 (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1830                                 4, 0);
1831                 if (status)
1832                         goto func_end;
1833                 /* Pack and do newline conversion */
1834                 pr_info("DSP Trace Buffer Begin:\n"
1835                         "=======================\n%s\n",
1836                         psz_buf);
1837
1838
1839                 /* convert to offset */
1840                 trace_cur_pos = trace_cur_pos - ul_trace_begin;
1841
1842                 if (ul_num_bytes) {
1843                         /*
1844                          * The buffer is not full, find the end of the
1845                          * data -- buf_end will be >= pszBuf after
1846                          * while.
1847                          */
1848                         buf_end = &psz_buf[ul_num_bytes+1];
1849                         /* DSP print position */
1850                         trace_end = &psz_buf[trace_cur_pos];
1851
1852                         /*
1853                          * Search buffer for a new_line and replace it
1854                          * with '\0', then print as string.
1855                          * Continue until end of buffer is reached.
1856                          */
1857                         str_beg = trace_end;
1858                         ul_num_bytes = buf_end - str_beg;
1859
1860                         while (str_beg < buf_end) {
1861                                 new_line = strnchr(str_beg, ul_num_bytes,
1862                                                                 '\n');
1863                                 if (new_line && new_line < buf_end) {
1864                                         *new_line = 0;
1865                                         pr_debug("%s\n", str_beg);
1866                                         str_beg = ++new_line;
1867                                         ul_num_bytes = buf_end - str_beg;
1868                                 } else {
1869                                         /*
1870                                          * Assume buffer empty if it contains
1871                                          * a zero
1872                                          */
1873                                         if (*str_beg != '\0') {
1874                                                 str_beg[ul_num_bytes] = 0;
1875                                                 pr_debug("%s\n", str_beg);
1876                                         }
1877                                         str_beg = buf_end;
1878                                         ul_num_bytes = 0;
1879                                 }
1880                         }
1881                         /*
1882                          * Search buffer for a nNewLine and replace it
1883                          * with '\0', then print as string.
1884                          * Continue until buffer is exhausted.
1885                          */
1886                         str_beg = psz_buf;
1887                         ul_num_bytes = trace_end - str_beg;
1888
1889                         while (str_beg < trace_end) {
1890                                 new_line = strnchr(str_beg, ul_num_bytes, '\n');
1891                                 if (new_line != NULL && new_line < trace_end) {
1892                                         *new_line = 0;
1893                                         pr_debug("%s\n", str_beg);
1894                                         str_beg = ++new_line;
1895                                         ul_num_bytes = trace_end - str_beg;
1896                                 } else {
1897                                         /*
1898                                          * Assume buffer empty if it contains
1899                                          * a zero
1900                                          */
1901                                         if (*str_beg != '\0') {
1902                                                 str_beg[ul_num_bytes] = 0;
1903                                                 pr_debug("%s\n", str_beg);
1904                                         }
1905                                         str_beg = trace_end;
1906                                         ul_num_bytes = 0;
1907                                 }
1908                         }
1909                 }
1910                 pr_info("\n=======================\n"
1911                         "DSP Trace Buffer End:\n");
1912                 kfree(psz_buf);
1913         } else {
1914                 status = -ENOMEM;
1915         }
1916 func_end:
1917         if (status)
1918                 dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
1919         return status;
1920 }
1921
1922 /**
1923  * dump_dsp_stack() - This function dumps the data on the DSP stack.
1924  * @bridge_context:     Bridge driver's device context pointer.
1925  *
1926  */
1927 int dump_dsp_stack(struct bridge_dev_context *bridge_context)
1928 {
1929         int status = 0;
1930         struct cod_manager *code_mgr;
1931         struct node_mgr *node_mgr;
1932         u32 trace_begin;
1933         char name[256];
1934         struct {
1935                 u32 head[2];
1936                 u32 size;
1937         } mmu_fault_dbg_info;
1938         u32 *buffer;
1939         u32 *buffer_beg;
1940         u32 *buffer_end;
1941         u32 exc_type;
1942         u32 dyn_ext_base;
1943         u32 i;
1944         u32 offset_output;
1945         u32 total_size;
1946         u32 poll_cnt;
1947         const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
1948                                 "IRP", "NRP", "AMR", "SSR",
1949                                 "ILC", "RILC", "IER", "CSR"};
1950         const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
1951         struct bridge_drv_interface *intf_fxns;
1952         struct dev_object *dev_object = bridge_context->dev_obj;
1953
1954         status = dev_get_cod_mgr(dev_object, &code_mgr);
1955         if (!code_mgr) {
1956                 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
1957                 status = -EFAULT;
1958         }
1959
1960         if (!status) {
1961                 status = dev_get_node_manager(dev_object, &node_mgr);
1962                 if (!node_mgr) {
1963                         pr_debug("%s: Failed on dev_get_node_manager.\n",
1964                                                                 __func__);
1965                         status = -EFAULT;
1966                 }
1967         }
1968
1969         if (!status) {
1970                 /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
1971                 status =
1972                         cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
1973                 pr_debug("%s: trace_begin Value 0x%x\n",
1974                         __func__, trace_begin);
1975                 if (status)
1976                         pr_debug("%s: Failed on cod_get_sym_value.\n",
1977                                                                 __func__);
1978         }
1979         if (!status)
1980                 status = dev_get_intf_fxns(dev_object, &intf_fxns);
1981         /*
1982          * Check for the "magic number" in the trace buffer.  If it has
1983          * yet to appear then poll the trace buffer to wait for it.  Its
1984          * appearance signals that the DSP has finished dumping its state.
1985          */
1986         mmu_fault_dbg_info.head[0] = 0;
1987         mmu_fault_dbg_info.head[1] = 0;
1988         if (!status) {
1989                 poll_cnt = 0;
1990                 while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
1991                         mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
1992                         poll_cnt < POLL_MAX) {
1993
1994                         /* Read DSP dump size from the DSP trace buffer... */
1995                         status = (*intf_fxns->brd_read)(bridge_context,
1996                                 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
1997                                 sizeof(mmu_fault_dbg_info), 0);
1998
1999                         if (status)
2000                                 break;
2001
2002                         poll_cnt++;
2003                 }
2004
2005                 if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
2006                         mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
2007                         status = -ETIME;
2008                         pr_err("%s:No DSP MMU-Fault information available.\n",
2009                                                         __func__);
2010                 }
2011         }
2012
2013         if (!status) {
2014                 total_size = mmu_fault_dbg_info.size;
2015                 /* Limit the size in case DSP went crazy */
2016                 if (total_size > MAX_MMU_DBGBUFF)
2017                         total_size = MAX_MMU_DBGBUFF;
2018
2019                 buffer = kzalloc(total_size, GFP_ATOMIC);
2020                 if (!buffer) {
2021                         status = -ENOMEM;
2022                         pr_debug("%s: Failed to "
2023                                 "allocate stack dump buffer.\n", __func__);
2024                         goto func_end;
2025                 }
2026
2027                 buffer_beg = buffer;
2028                 buffer_end =  buffer + total_size / 4;
2029
2030                 /* Read bytes from the DSP trace buffer... */
2031                 status = (*intf_fxns->brd_read)(bridge_context,
2032                                 (u8 *)buffer, (u32)trace_begin,
2033                                 total_size, 0);
2034                 if (status) {
2035                         pr_debug("%s: Failed to Read Trace Buffer.\n",
2036                                                                 __func__);
2037                         goto func_end;
2038                 }
2039
2040                 pr_err("\nAproximate Crash Position:\n"
2041                         "--------------------------\n");
2042
2043                 exc_type = buffer[3];
2044                 if (!exc_type)
2045                         i = buffer[79];         /* IRP */
2046                 else
2047                         i = buffer[80];         /* NRP */
2048
2049                 status =
2050                     cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
2051                 if (status) {
2052                         status = -EFAULT;
2053                         goto func_end;
2054                 }
2055
2056                 if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
2057                         0x1000, &offset_output, name) == 0))
2058                         pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
2059                                                         i - offset_output);
2060                 else
2061                         pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
2062
2063                 buffer += 4;
2064
2065                 pr_err("\nExecution Info:\n"
2066                         "---------------\n");
2067
2068                 if (*buffer < ARRAY_SIZE(exec_ctxt)) {
2069                         pr_err("Execution context \t%s\n",
2070                                 exec_ctxt[*buffer++]);
2071                 } else {
2072                         pr_err("Execution context corrupt\n");
2073                         kfree(buffer_beg);
2074                         return -EFAULT;
2075                 }
2076                 pr_err("Task Handle\t\t0x%x\n", *buffer++);
2077                 pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
2078                 pr_err("Stack Top\t\t0x%x\n", *buffer++);
2079                 pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
2080                 pr_err("Stack Size\t\t0x%x\n", *buffer++);
2081                 pr_err("Stack Size In Use\t0x%x\n", *buffer++);
2082
2083                 pr_err("\nCPU Registers\n"
2084                         "---------------\n");
2085
2086                 for (i = 0; i < 32; i++) {
2087                         if (i == 4 || i == 6 || i == 8)
2088                                 pr_err("A%d 0x%-8x [Function Argument %d]\n",
2089                                                         i, *buffer++, i-3);
2090                         else if (i == 15)
2091                                 pr_err("A15 0x%-8x [Frame Pointer]\n",
2092                                                                 *buffer++);
2093                         else
2094                                 pr_err("A%d 0x%x\n", i, *buffer++);
2095                 }
2096
2097                 pr_err("\nB0 0x%x\n", *buffer++);
2098                 pr_err("B1 0x%x\n", *buffer++);
2099                 pr_err("B2 0x%x\n", *buffer++);
2100
2101                 if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2102                         *buffer, 0x1000, &offset_output, name) == 0))
2103
2104                         pr_err("B3 0x%-8x [Function Return Pointer:"
2105                                 " \"%s\" + 0x%x]\n", *buffer, name,
2106                                 *buffer - offset_output);
2107                 else
2108                         pr_err("B3 0x%-8x [Function Return Pointer:"
2109                                 "Unable to match to a symbol.]\n", *buffer);
2110
2111                 buffer++;
2112
2113                 for (i = 4; i < 32; i++) {
2114                         if (i == 4 || i == 6 || i == 8)
2115                                 pr_err("B%d 0x%-8x [Function Argument %d]\n",
2116                                                         i, *buffer++, i-2);
2117                         else if (i == 14)
2118                                 pr_err("B14 0x%-8x [Data Page Pointer]\n",
2119                                                                 *buffer++);
2120                         else
2121                                 pr_err("B%d 0x%x\n", i, *buffer++);
2122                 }
2123
2124                 pr_err("\n");
2125
2126                 for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
2127                         pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
2128
2129                 pr_err("\nStack:\n"
2130                         "------\n");
2131
2132                 for (i = 0; buffer < buffer_end; i++, buffer++) {
2133                         if ((*buffer > dyn_ext_base) && (
2134                                 node_find_addr(node_mgr, *buffer , 0x600,
2135                                 &offset_output, name) == 0))
2136                                 pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2137                                         i, *buffer, name,
2138                                         *buffer - offset_output);
2139                         else
2140                                 pr_err("[%d] 0x%x\n", i, *buffer);
2141                 }
2142                 kfree(buffer_beg);
2143         }
2144 func_end:
2145         return status;
2146 }
2147
2148 /**
2149  * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2150  * @bridge_context:             Bridge driver's device context pointer.
2151  *
2152  */
2153 void dump_dl_modules(struct bridge_dev_context *bridge_context)
2154 {
2155         struct cod_manager *code_mgr;
2156         struct bridge_drv_interface *intf_fxns;
2157         struct bridge_dev_context *bridge_ctxt = bridge_context;
2158         struct dev_object *dev_object = bridge_ctxt->dev_obj;
2159         struct modules_header modules_hdr;
2160         struct dll_module *module_struct = NULL;
2161         u32 module_dsp_addr;
2162         u32 module_size;
2163         u32 module_struct_size = 0;
2164         u32 sect_ndx;
2165         char *sect_str ;
2166         int status = 0;
2167
2168         status = dev_get_intf_fxns(dev_object, &intf_fxns);
2169         if (status) {
2170                 pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
2171                 goto func_end;
2172         }
2173
2174         status = dev_get_cod_mgr(dev_object, &code_mgr);
2175         if (!code_mgr) {
2176                 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2177                 status = -EFAULT;
2178                 goto func_end;
2179         }
2180
2181         /* Lookup  the address of the modules_header structure */
2182         status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
2183         if (status) {
2184                 pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2185                         __func__);
2186                 goto func_end;
2187         }
2188
2189         pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2190
2191         /* Copy the modules_header structure from DSP memory. */
2192         status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr,
2193                                 (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2194
2195         if (status) {
2196                 pr_debug("%s: Failed failed to read modules header.\n",
2197                                                                 __func__);
2198                 goto func_end;
2199         }
2200
2201         module_dsp_addr = modules_hdr.first_module;
2202         module_size = modules_hdr.first_module_size;
2203
2204         pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2205                                                                 module_size);
2206
2207         pr_err("\nDynamically Loaded Modules:\n"
2208                 "---------------------------\n");
2209
2210         /* For each dll_module structure in the list... */
2211         while (module_size) {
2212                 /*
2213                  * Allocate/re-allocate memory to hold the dll_module
2214                  * structure. The memory is re-allocated only if the existing
2215                  * allocation is too small.
2216                  */
2217                 if (module_size > module_struct_size) {
2218                         kfree(module_struct);
2219                         module_struct = kzalloc(module_size+128, GFP_ATOMIC);
2220                         module_struct_size = module_size+128;
2221                         pr_debug("%s: allocated module struct %p %d\n",
2222                                 __func__, module_struct, module_struct_size);
2223                         if (!module_struct)
2224                                 goto func_end;
2225                 }
2226                 /* Copy the dll_module structure from DSP memory */
2227                 status = (*intf_fxns->brd_read)(bridge_context,
2228                         (u8 *)module_struct, module_dsp_addr, module_size, 0);
2229
2230                 if (status) {
2231                         pr_debug(
2232                         "%s: Failed to read dll_module stuct for 0x%x.\n",
2233                         __func__, module_dsp_addr);
2234                         break;
2235                 }
2236
2237                 /* Update info regarding the _next_ module in the list. */
2238                 module_dsp_addr = module_struct->next_module;
2239                 module_size = module_struct->next_module_size;
2240
2241                 pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2242                         __func__, module_dsp_addr, module_size,
2243                         module_struct->num_sects);
2244
2245                 /*
2246                  * The section name strings start immedialty following
2247                  * the array of dll_sect structures.
2248                  */
2249                 sect_str = (char *) &module_struct->
2250                                         sects[module_struct->num_sects];
2251                 pr_err("%s\n", sect_str);
2252
2253                 /*
2254                  * Advance to the first section name string.
2255                  * Each string follows the one before.
2256                  */
2257                 sect_str += strlen(sect_str) + 1;
2258
2259                 /* Access each dll_sect structure and its name string. */
2260                 for (sect_ndx = 0;
2261                         sect_ndx < module_struct->num_sects; sect_ndx++) {
2262                         pr_err("    Section: 0x%x ",
2263                                 module_struct->sects[sect_ndx].sect_load_adr);
2264
2265                         if (((u32) sect_str - (u32) module_struct) <
2266                                 module_struct_size) {
2267                                 pr_err("%s\n", sect_str);
2268                                 /* Each string follows the one before. */
2269                                 sect_str += strlen(sect_str)+1;
2270                         } else {
2271                                 pr_err("<string error>\n");
2272                                 pr_debug("%s: section name sting address "
2273                                         "is invalid %p\n", __func__, sect_str);
2274                         }
2275                 }
2276         }
2277 func_end:
2278         kfree(module_struct);
2279 }
2280 #endif