4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * IO dispatcher for a shared memory channel driver.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * There is an important invariant condition which must be maintained per
22 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
23 * which may cause timeouts and/or failure of the sync_wait_on_event
26 #include <linux/types.h>
27 #include <linux/list.h>
30 #include <dspbridge/host_os.h>
31 #include <linux/workqueue.h>
33 /* ----------------------------------- DSP/BIOS Bridge */
34 #include <dspbridge/dbdefs.h>
37 #include <dspbridge/dbc.h>
40 #include <dspbridge/ntfy.h>
41 #include <dspbridge/sync.h>
43 /* Hardware Abstraction Layer */
48 #include <dspbridge/dspdeh.h>
49 #include <dspbridge/dspio.h>
50 #include <dspbridge/dspioctl.h>
51 #include <dspbridge/wdt.h>
53 #include <tiomap_io.h>
54 #include <_tiomap_pwr.h>
56 /* Platform Manager */
57 #include <dspbridge/cod.h>
58 #include <dspbridge/node.h>
59 #include <dspbridge/dev.h>
62 #include <dspbridge/rms_sh.h>
63 #include <dspbridge/mgr.h>
64 #include <dspbridge/drv.h>
66 #include "module_list.h"
69 #include <dspbridge/io_sm.h>
72 /* Defines, Data Structures, Typedefs */
73 #define OUTPUTNOTREADY 0xffff
74 #define NOTENABLED 0xffff /* Channel(s) not enabled */
76 #define EXTEND "_EXT_END"
78 #define SWAP_WORD(x) (x)
79 #define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
81 #define MAX_PM_REQS 32
83 #define MMU_FAULT_HEAD1 0xa5a5a5a5
84 #define MMU_FAULT_HEAD2 0x96969696
86 #define MAX_MMU_DBGBUFF 10240
88 /* IO Manager: only one created per board */
90 /* These four fields must be the first fields in a io_mgr_ struct */
91 /* Bridge device context */
92 struct bridge_dev_context *bridge_context;
93 /* Function interface to Bridge driver */
94 struct bridge_drv_interface *intf_fxns;
95 struct dev_object *dev_obj; /* Device this board represents */
97 /* These fields initialized in bridge_io_create() */
98 struct chnl_mgr *chnl_mgr;
99 struct shm *shared_mem; /* Shared Memory control */
100 u8 *input; /* Address of input channel */
101 u8 *output; /* Address of output channel */
102 struct msg_mgr *msg_mgr; /* Message manager */
103 /* Msg control for from DSP messages */
104 struct msg_ctrl *msg_input_ctrl;
105 /* Msg control for to DSP messages */
106 struct msg_ctrl *msg_output_ctrl;
107 u8 *msg_input; /* Address of input messages */
108 u8 *msg_output; /* Address of output messages */
109 u32 sm_buf_size; /* Size of a shared memory I/O channel */
110 bool shared_irq; /* Is this IRQ shared? */
111 u32 word_size; /* Size in bytes of DSP word */
112 u16 intr_val; /* Interrupt value */
113 /* Private extnd proc info; mmu setup */
114 struct mgr_processorextinfo ext_proc_info;
115 struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
116 struct work_struct io_workq; /* workqueue */
117 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
118 u32 trace_buffer_begin; /* Trace message start address */
119 u32 trace_buffer_end; /* Trace message end address */
120 u32 trace_buffer_current; /* Trace message current address */
121 u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */
127 u32 dpc_req; /* Number of requested DPC's. */
128 u32 dpc_sched; /* Number of executed DPC's. */
129 struct tasklet_struct dpc_tasklet;
134 /* Function Prototypes */
135 static void io_dispatch_pm(struct io_mgr *pio_mgr);
136 static void notify_chnl_complete(struct chnl_object *pchnl,
137 struct chnl_irp *chnl_packet_obj);
138 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
140 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
142 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
143 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
144 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
145 struct chnl_object *pchnl, u32 mask);
147 /* Bus Addr (cached kernel) */
148 static int register_shm_segs(struct io_mgr *hio_mgr,
149 struct cod_manager *cod_man,
152 static inline void set_chnl_free(struct shm *sm, u32 chnl)
154 sm->host_free_mask &= ~(1 << chnl);
157 static inline void set_chnl_busy(struct shm *sm, u32 chnl)
159 sm->host_free_mask |= 1 << chnl;
164 * ======== bridge_io_create ========
165 * Create an IO manager object.
167 int bridge_io_create(struct io_mgr **io_man,
168 struct dev_object *hdev_obj,
169 const struct io_attrs *mgr_attrts)
171 struct io_mgr *pio_mgr = NULL;
172 struct bridge_dev_context *hbridge_context = NULL;
173 struct cfg_devnode *dev_node_obj;
174 struct chnl_mgr *hchnl_mgr;
177 /* Check requirements */
178 if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0)
183 dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
184 if (!hchnl_mgr || hchnl_mgr->iomgr)
188 * Message manager will be created when a file is loaded, since
189 * size of message buffer in shared memory is configurable in
192 dev_get_bridge_context(hdev_obj, &hbridge_context);
193 if (!hbridge_context)
196 dev_get_dev_type(hdev_obj, &dev_type);
198 /* Allocate IO manager object */
199 pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
203 /* Initialize chnl_mgr object */
204 pio_mgr->chnl_mgr = hchnl_mgr;
205 pio_mgr->word_size = mgr_attrts->word_size;
207 if (dev_type == DSP_UNIT) {
208 /* Create an IO DPC */
209 tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
211 /* Initialize DPC counters */
212 pio_mgr->dpc_req = 0;
213 pio_mgr->dpc_sched = 0;
215 spin_lock_init(&pio_mgr->dpc_lock);
217 if (dev_get_dev_node(hdev_obj, &dev_node_obj)) {
218 bridge_io_destroy(pio_mgr);
223 pio_mgr->bridge_context = hbridge_context;
224 pio_mgr->shared_irq = mgr_attrts->irq_shared;
225 if (dsp_wdt_init()) {
226 bridge_io_destroy(pio_mgr);
230 /* Return IO manager object to caller... */
231 hchnl_mgr->iomgr = pio_mgr;
238 * ======== bridge_io_destroy ========
240 * Disable interrupts, destroy the IO manager.
242 int bridge_io_destroy(struct io_mgr *hio_mgr)
246 /* Free IO DPC object */
247 tasklet_kill(&hio_mgr->dpc_tasklet);
249 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
253 /* Free this IO manager object */
263 * ======== bridge_io_on_loaded ========
265 * Called when a new program is loaded to get shared memory buffer
266 * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
267 * are in DSP address units.
269 int bridge_io_on_loaded(struct io_mgr *hio_mgr)
271 struct cod_manager *cod_man;
272 struct chnl_mgr *hchnl_mgr;
273 struct msg_mgr *hmsg_mgr;
275 u32 ul_shm_base_offset;
277 u32 ul_shm_length = -1;
278 u32 ul_mem_length = -1;
281 u32 ul_msg_length = -1;
292 /* DSP MMU setup table */
293 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
294 struct cfg_hostres *host_res;
295 struct bridge_dev_context *pbridge_context;
299 u32 ul_seg1_size = 0;
305 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
306 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
309 status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
310 if (!pbridge_context) {
315 host_res = pbridge_context->resources;
320 status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
325 hchnl_mgr = hio_mgr->chnl_mgr;
326 /* The message manager is destroyed when the board is stopped. */
327 dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
328 hmsg_mgr = hio_mgr->msg_mgr;
329 if (!hchnl_mgr || !hmsg_mgr) {
333 if (hio_mgr->shared_mem)
334 hio_mgr->shared_mem = NULL;
336 /* Get start and length of channel part of shared memory */
337 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
343 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
349 if (ul_shm_limit <= ul_shm_base) {
353 /* Get total length in bytes */
354 ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
355 /* Calculate size of a PROCCOPY shared memory region */
356 dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
357 __func__, (ul_shm_length - sizeof(struct shm)));
359 /* Get start and length of message part of shared memory */
360 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
363 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
366 if (ul_msg_limit <= ul_msg_base) {
370 * Length (bytes) of messaging part of shared
374 (ul_msg_limit - ul_msg_base +
375 1) * hio_mgr->word_size;
377 * Total length (bytes) of shared memory:
380 ul_mem_length = ul_shm_length + ul_msg_length;
389 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
391 cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
393 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
401 cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
406 status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
411 /* Get memory reserved in host resources */
412 (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
413 &hio_mgr->ext_proc_info,
415 mgr_processorextinfo),
418 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
420 ul_gpp_pa = host_res->mem_phys[1];
421 ul_gpp_va = host_res->mem_base[1];
422 /* This is the virtual uncached ioremapped address!!! */
423 /* Why can't we directly take the DSPVA from the symbols? */
424 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
425 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
427 (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
429 ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
431 ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
432 ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
434 if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
437 dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
438 "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
439 "ul_seg_size %x ul_seg1_size %x \n", __func__,
440 ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
441 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
443 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
444 host_res->mem_length[1]) {
445 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
446 __func__, host_res->mem_length[1],
447 ul_seg_size + ul_seg1_size + ul_pad_size);
455 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
456 gpp_va_curr = ul_gpp_va;
457 num_bytes = ul_seg1_size;
460 * Try to fit into TLB entries. If not possible, push them to page
461 * tables. It is quite possible that if sections are not on
462 * bigger page boundary, we may end up making several small pages.
463 * So, push them onto page tables, if that is the case.
465 map_attrs = 0x00000000;
466 map_attrs = DSP_MAPLITTLEENDIAN;
467 map_attrs |= DSP_MAPPHYSICALADDR;
468 map_attrs |= DSP_MAPELEMSIZE32;
469 map_attrs |= DSP_MAPDONOTLOCK;
473 * To find the max. page size with which both PA & VA are
476 all_bits = pa_curr | va_curr;
477 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
478 "num_bytes %x\n", all_bits, pa_curr, va_curr,
480 for (i = 0; i < 4; i++) {
481 if ((num_bytes >= page_size[i]) && ((all_bits &
486 brd_mem_map(hio_mgr->bridge_context,
488 page_size[i], map_attrs,
492 pa_curr += page_size[i];
493 va_curr += page_size[i];
494 gpp_va_curr += page_size[i];
495 num_bytes -= page_size[i];
497 * Don't try smaller sizes. Hopefully we have
498 * reached an address aligned to a bigger page
505 pa_curr += ul_pad_size;
506 va_curr += ul_pad_size;
507 gpp_va_curr += ul_pad_size;
509 /* Configure the TLB entries for the next cacheable segment */
510 num_bytes = ul_seg_size;
511 va_curr = ul_dsp_va * hio_mgr->word_size;
514 * To find the max. page size with which both PA & VA are
517 all_bits = pa_curr | va_curr;
518 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
519 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
521 for (i = 0; i < 4; i++) {
522 if (!(num_bytes >= page_size[i]) ||
523 !((all_bits & (page_size[i] - 1)) == 0))
525 if (ndx < MAX_LOCK_TLB_ENTRIES) {
527 * This is the physical address written to
530 ae_proc[ndx].gpp_pa = pa_curr;
532 * This is the virtual uncached ioremapped
535 ae_proc[ndx].gpp_va = gpp_va_curr;
536 ae_proc[ndx].dsp_va =
537 va_curr / hio_mgr->word_size;
538 ae_proc[ndx].size = page_size[i];
539 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
540 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
541 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
542 dev_dbg(bridge, "shm MMU TLB entry PA %x"
543 " VA %x DSP_VA %x Size %x\n",
546 ae_proc[ndx].dsp_va *
547 hio_mgr->word_size, page_size[i]);
552 brd_mem_map(hio_mgr->bridge_context,
554 page_size[i], map_attrs,
557 "shm MMU PTE entry PA %x"
558 " VA %x DSP_VA %x Size %x\n",
561 ae_proc[ndx].dsp_va *
562 hio_mgr->word_size, page_size[i]);
566 pa_curr += page_size[i];
567 va_curr += page_size[i];
568 gpp_va_curr += page_size[i];
569 num_bytes -= page_size[i];
571 * Don't try smaller sizes. Hopefully we have reached
572 * an address aligned to a bigger page size.
579 * Copy remaining entries from CDB. All entries are 1 MB and
580 * should not conflict with shm entries on MPU or DSP side.
582 for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
583 if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
586 if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
588 && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
589 ul_gpp_pa + ul_seg_size)
590 || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
591 ul_dsp_va - 0x100000 / hio_mgr->word_size
592 && hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <=
593 ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
595 "CDB MMU entry %d conflicts with "
596 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
597 "GppPa %x, DspVa %x, Bytes %x.\n", i,
598 hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
599 hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
600 ul_gpp_pa, ul_dsp_va, ul_seg_size);
603 if (ndx < MAX_LOCK_TLB_ENTRIES) {
604 ae_proc[ndx].dsp_va =
605 hio_mgr->ext_proc_info.ty_tlb[i].
607 ae_proc[ndx].gpp_pa =
608 hio_mgr->ext_proc_info.ty_tlb[i].
610 ae_proc[ndx].gpp_va = 0;
612 ae_proc[ndx].size = 0x100000;
613 dev_dbg(bridge, "shm MMU entry PA %x "
614 "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
615 ae_proc[ndx].dsp_va);
618 status = hio_mgr->intf_fxns->brd_mem_map
619 (hio_mgr->bridge_context,
620 hio_mgr->ext_proc_info.ty_tlb[i].
622 hio_mgr->ext_proc_info.ty_tlb[i].
623 dsp_virt, 0x100000, map_attrs,
631 map_attrs = 0x00000000;
632 map_attrs = DSP_MAPLITTLEENDIAN;
633 map_attrs |= DSP_MAPPHYSICALADDR;
634 map_attrs |= DSP_MAPELEMSIZE32;
635 map_attrs |= DSP_MAPDONOTLOCK;
637 /* Map the L4 peripherals */
639 while (l4_peripheral_table[i].phys_addr) {
640 status = hio_mgr->intf_fxns->brd_mem_map
641 (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
642 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
649 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
650 ae_proc[i].dsp_va = 0;
651 ae_proc[i].gpp_pa = 0;
652 ae_proc[i].gpp_va = 0;
656 * Set the shm physical address entry (grayed out in CDB file)
657 * to the virtual uncached ioremapped address of shm reserved
660 hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
661 (ul_gpp_va + ul_seg1_size + ul_pad_size);
664 * Need shm Phys addr. IO supports only one DSP for now:
667 if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
671 if (ae_proc[0].dsp_va > ul_shm_base) {
675 /* ul_shm_base may not be at ul_dsp_va address */
676 ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) *
679 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
680 * bridge_brd_start() the MMU will be re-programed with MMU
681 * DSPVa-GPPPa pair info while DSP is in a known
686 hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
687 BRDIOCTL_SETMMUCONFIG,
691 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
692 ul_shm_base += ul_shm_base_offset;
693 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
695 if (ul_shm_base == 0) {
701 register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa);
704 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
705 hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
706 hio_mgr->output = hio_mgr->input + (ul_shm_length -
707 sizeof(struct shm)) / 2;
708 hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
710 /* Set up Shared memory addresses for messaging. */
711 hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
714 (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
715 hio_mgr->msg_output_ctrl =
716 (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
718 hio_mgr->msg_output =
719 (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
721 ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
722 / sizeof(struct msg_dspmsg);
723 dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
724 "output %p, msg_input_ctrl %p, msg_input %p, "
725 "msg_output_ctrl %p, msg_output %p\n",
726 (u8 *) hio_mgr->shared_mem, hio_mgr->input,
727 hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
728 hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
729 hio_mgr->msg_output);
730 dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
732 memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
734 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
735 /* Get the start address of trace buffer */
736 status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
737 &hio_mgr->trace_buffer_begin);
743 hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
744 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
745 (hio_mgr->trace_buffer_begin - ul_dsp_va);
746 /* Get the end address of trace buffer */
747 status = cod_get_sym_value(cod_man, SYS_PUTCEND,
748 &hio_mgr->trace_buffer_end);
753 hio_mgr->trace_buffer_end =
754 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
755 (hio_mgr->trace_buffer_end - ul_dsp_va);
756 /* Get the current address of DSP write pointer */
757 status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
758 &hio_mgr->trace_buffer_current);
763 hio_mgr->trace_buffer_current =
764 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
765 (hio_mgr->trace_buffer_current - ul_dsp_va);
766 /* Calculate the size of trace buffer */
768 hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
769 hio_mgr->trace_buffer_begin) *
770 hio_mgr->word_size) + 2, GFP_KERNEL);
774 hio_mgr->dsp_va = ul_dsp_va;
775 hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
783 * ======== io_buf_size ========
784 * Size of shared memory I/O channel.
786 u32 io_buf_size(struct io_mgr *hio_mgr)
789 return hio_mgr->sm_buf_size;
795 * ======== io_cancel_chnl ========
796 * Cancel IO on a given PCPY channel.
798 void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
800 struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
805 sm = hio_mgr->shared_mem;
807 /* Inform DSP that we have no more buffers on this channel */
808 set_chnl_free(sm, chnl);
810 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
817 * ======== io_dispatch_pm ========
818 * Performs I/O dispatch on PM related messages from DSP
820 static void io_dispatch_pm(struct io_mgr *pio_mgr)
825 /* Perform Power message processing here */
826 parg[0] = pio_mgr->intr_val;
828 /* Send the command to the Bridge clk/pwr manager to handle */
829 if (parg[0] == MBX_PM_HIBERNATE_EN) {
830 dev_dbg(bridge, "PM: Hibernate command\n");
831 status = pio_mgr->intf_fxns->
832 dev_cntrl(pio_mgr->bridge_context,
833 BRDIOCTL_PWR_HIBERNATE, parg);
835 pr_err("%s: hibernate cmd failed 0x%x\n",
837 } else if (parg[0] == MBX_PM_OPP_REQ) {
838 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
839 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
840 status = pio_mgr->intf_fxns->
841 dev_cntrl(pio_mgr->bridge_context,
842 BRDIOCTL_CONSTRAINT_REQUEST, parg);
844 dev_dbg(bridge, "PM: Failed to set constraint "
845 "= 0x%x\n", parg[1]);
847 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
849 status = pio_mgr->intf_fxns->
850 dev_cntrl(pio_mgr->bridge_context,
851 BRDIOCTL_CLK_CTRL, parg);
853 dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
859 * ======== io_dpc ========
860 * Deferred procedure call for shared memory channel driver ISR. Carries
861 * out the dispatch of I/O as a non-preemptible event.It can only be
862 * pre-empted by an ISR.
864 void io_dpc(unsigned long ref_data)
866 struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
867 struct chnl_mgr *chnl_mgr_obj;
868 struct msg_mgr *msg_mgr_obj;
869 struct deh_mgr *hdeh_mgr;
875 chnl_mgr_obj = pio_mgr->chnl_mgr;
876 dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj);
877 dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr);
881 requested = pio_mgr->dpc_req;
882 serviced = pio_mgr->dpc_sched;
884 if (serviced == requested)
887 /* Process pending DPC's */
889 /* Check value of interrupt reg to ensure it's a valid error */
890 if ((pio_mgr->intr_val > DEH_BASE) &&
891 (pio_mgr->intr_val < DEH_LIMIT)) {
892 /* Notify DSP/BIOS exception */
894 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
895 print_dsp_debug_trace(pio_mgr);
897 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
901 /* Proc-copy chanel dispatch */
902 input_chnl(pio_mgr, NULL, IO_SERVICE);
903 output_chnl(pio_mgr, NULL, IO_SERVICE);
907 /* Perform I/O dispatch on message queues */
908 input_msg(pio_mgr, msg_mgr_obj);
909 output_msg(pio_mgr, msg_mgr_obj);
913 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
914 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
915 /* Notify DSP Trace message */
916 print_dsp_debug_trace(pio_mgr);
920 } while (serviced != requested);
921 pio_mgr->dpc_sched = requested;
927 * ======== io_mbox_msg ========
928 * Main interrupt handler for the shared memory IO manager.
929 * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
930 * schedules a DPC to dispatch I/O.
932 int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
934 struct io_mgr *pio_mgr;
935 struct dev_object *dev_obj;
938 dev_obj = dev_get_first();
939 dev_get_io_mgr(dev_obj, &pio_mgr);
944 pio_mgr->intr_val = (u16)((u32)msg);
945 if (pio_mgr->intr_val & MBX_PM_CLASS)
946 io_dispatch_pm(pio_mgr);
948 if (pio_mgr->intr_val == MBX_DEH_RESET) {
949 pio_mgr->intr_val = 0;
951 spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
953 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
954 tasklet_schedule(&pio_mgr->dpc_tasklet);
960 * ======== io_request_chnl ========
962 * Request chanenel I/O from the DSP. Sets flags in shared memory, then
963 * interrupts the DSP.
965 void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
966 u8 io_mode, u16 *mbx_val)
968 struct chnl_mgr *chnl_mgr_obj;
971 if (!pchnl || !mbx_val)
973 chnl_mgr_obj = io_manager->chnl_mgr;
974 sm = io_manager->shared_mem;
975 if (io_mode == IO_INPUT) {
977 * Assertion fires if CHNL_AddIOReq() called on a stream
978 * which was cancelled, or attached to a dead board.
980 DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
981 (pchnl->state == CHNL_STATEEOS));
982 /* Indicate to the DSP we have a buffer available for input */
983 set_chnl_busy(sm, pchnl->chnl_id);
984 *mbx_val = MBX_PCPY_CLASS;
985 } else if (io_mode == IO_OUTPUT) {
987 * This assertion fails if CHNL_AddIOReq() was called on a
988 * stream which was cancelled, or attached to a dead board.
990 DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
993 * Record the fact that we have a buffer available for
996 chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
998 DBC_ASSERT(io_mode); /* Shouldn't get here. */
1005 * ======== iosm_schedule ========
1006 * Schedule DPC for IO.
1008 void iosm_schedule(struct io_mgr *io_manager)
1010 unsigned long flags;
1015 /* Increment count of DPC's pending. */
1016 spin_lock_irqsave(&io_manager->dpc_lock, flags);
1017 io_manager->dpc_req++;
1018 spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
1021 tasklet_schedule(&io_manager->dpc_tasklet);
1025 * ======== find_ready_output ========
1026 * Search for a host output channel which is ready to send. If this is
1027 * called as a result of servicing the DPC, then implement a round
1028 * robin search; otherwise, this was called by a client thread (via
1029 * IO_Dispatch()), so just start searching from the current channel id.
1031 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1032 struct chnl_object *pchnl, u32 mask)
1034 u32 ret = OUTPUTNOTREADY;
1039 NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
1040 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1041 if (id >= CHNL_MAXCHANNELS)
1050 chnl_mgr_obj->last_output = id;
1054 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1056 } while (id != start_id);
1063 * ======== input_chnl ========
1064 * Dispatch a buffer on an input channel.
1066 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1069 struct chnl_mgr *chnl_mgr_obj;
1073 struct chnl_irp *chnl_packet_obj = NULL;
1075 bool clear_chnl = false;
1076 bool notify_client = false;
1078 sm = pio_mgr->shared_mem;
1079 chnl_mgr_obj = pio_mgr->chnl_mgr;
1081 /* Attempt to perform input */
1082 if (!sm->input_full)
1085 bytes = sm->input_size * chnl_mgr_obj->word_size;
1086 chnl_id = sm->input_id;
1088 if (chnl_id >= CHNL_MAXCHANNELS) {
1089 /* Shouldn't be here: would indicate corrupted shm. */
1090 DBC_ASSERT(chnl_id);
1093 pchnl = chnl_mgr_obj->channels[chnl_id];
1094 if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
1095 if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
1096 /* Get the I/O request, and attempt a transfer */
1097 if (!list_empty(&pchnl->io_requests)) {
1098 if (!pchnl->cio_reqs)
1101 chnl_packet_obj = list_first_entry(
1102 &pchnl->io_requests,
1103 struct chnl_irp, link);
1104 list_del(&chnl_packet_obj->link);
1108 * Ensure we don't overflow the client's
1111 bytes = min(bytes, chnl_packet_obj->byte_size);
1112 memcpy(chnl_packet_obj->host_sys_buf,
1113 pio_mgr->input, bytes);
1114 pchnl->bytes_moved += bytes;
1115 chnl_packet_obj->byte_size = bytes;
1116 chnl_packet_obj->arg = dw_arg;
1117 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
1121 * This assertion fails if the DSP
1122 * sends EOS more than once on this
1125 if (pchnl->state & CHNL_STATEEOS)
1128 * Zero bytes indicates EOS. Update
1129 * IOC status for this chirp, and also
1130 * the channel state.
1132 chnl_packet_obj->status |=
1134 pchnl->state |= CHNL_STATEEOS;
1136 * Notify that end of stream has
1139 ntfy_notify(pchnl->ntfy_obj,
1142 /* Tell DSP if no more I/O buffers available */
1143 if (list_empty(&pchnl->io_requests))
1144 set_chnl_free(sm, pchnl->chnl_id);
1146 notify_client = true;
1149 * Input full for this channel, but we have no
1150 * buffers available. The channel must be
1151 * "idling". Clear out the physical input
1157 /* Input channel cancelled: clear input channel */
1161 /* DPC fired after host closed channel: clear input channel */
1165 /* Indicate to the DSP we have read the input */
1167 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1169 if (notify_client) {
1170 /* Notify client with IO completion record */
1171 notify_chnl_complete(pchnl, chnl_packet_obj);
1178 * ======== input_msg ========
1179 * Copies messages from shared memory to the message queues.
1181 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1186 struct msg_queue *msg_queue_obj;
1187 struct msg_frame *pmsg;
1188 struct msg_dspmsg msg;
1189 struct msg_ctrl *msg_ctr_obj;
1193 msg_ctr_obj = pio_mgr->msg_input_ctrl;
1194 /* Get the number of input messages to be read */
1195 input_empty = msg_ctr_obj->buf_empty;
1196 num_msgs = msg_ctr_obj->size;
1200 msg_input = pio_mgr->msg_input;
1201 for (i = 0; i < num_msgs; i++) {
1202 /* Read the next message */
1203 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
1205 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1206 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
1208 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1209 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
1211 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1212 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1214 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1215 msg_input += sizeof(struct msg_dspmsg);
1217 /* Determine which queue to put the message in */
1218 dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x "
1219 "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd,
1220 msg.msg.arg1, msg.msg.arg2, msg.msgq_id);
1222 * Interrupt may occur before shared memory and message
1223 * input locations have been set up. If all nodes were
1224 * cleaned up, hmsg_mgr->max_msgs should be 0.
1226 list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list,
1228 if (msg.msgq_id != msg_queue_obj->msgq_id)
1231 if (msg.msg.cmd == RMS_EXITACK) {
1233 * Call the node exit notification.
1234 * The exit message does not get
1237 (*hmsg_mgr->on_exit)(msg_queue_obj->arg,
1242 * Not an exit acknowledgement, queue
1245 if (list_empty(&msg_queue_obj->msg_free_list)) {
1247 * No free frame to copy the
1250 pr_err("%s: no free msg frames,"
1251 " discarding msg\n",
1256 pmsg = list_first_entry(&msg_queue_obj->msg_free_list,
1257 struct msg_frame, list_elem);
1258 list_del(&pmsg->list_elem);
1259 pmsg->msg_data = msg;
1260 list_add_tail(&pmsg->list_elem,
1261 &msg_queue_obj->msg_used_list);
1262 ntfy_notify(msg_queue_obj->ntfy_obj,
1263 DSP_NODEMESSAGEREADY);
1264 sync_set_event(msg_queue_obj->sync_event);
1267 /* Set the post SWI flag */
1269 /* Tell the DSP we've read the messages */
1270 msg_ctr_obj->buf_empty = true;
1271 msg_ctr_obj->post_swi = true;
1272 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1277 * ======== notify_chnl_complete ========
1279 * Signal the channel event, notifying the client that I/O has completed.
1281 static void notify_chnl_complete(struct chnl_object *pchnl,
1282 struct chnl_irp *chnl_packet_obj)
1286 if (!pchnl || !pchnl->sync_event || !chnl_packet_obj)
1290 * Note: we signal the channel event only if the queue of IO
1291 * completions is empty. If it is not empty, the event is sure to be
1292 * signalled by the only IO completion list consumer:
1293 * bridge_chnl_get_ioc().
1295 signal_event = list_empty(&pchnl->io_completions);
1296 /* Enqueue the IO completion info for the client */
1297 list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions);
1300 if (pchnl->cio_cs > pchnl->chnl_packets)
1302 /* Signal the channel event (if not already set) that IO is complete */
1304 sync_set_event(pchnl->sync_event);
1306 /* Notify that IO is complete */
1307 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
1313 * ======== output_chnl ========
1315 * Dispatch a buffer on an output channel.
1317 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1320 struct chnl_mgr *chnl_mgr_obj;
1323 struct chnl_irp *chnl_packet_obj;
1326 chnl_mgr_obj = pio_mgr->chnl_mgr;
1327 sm = pio_mgr->shared_mem;
1328 /* Attempt to perform output */
1329 if (sm->output_full)
1332 if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1335 /* Look to see if both a PC and DSP output channel are ready */
1336 dw_dsp_f_mask = sm->dsp_free_mask;
1338 find_ready_output(chnl_mgr_obj, pchnl,
1339 (chnl_mgr_obj->output_mask & dw_dsp_f_mask));
1340 if (chnl_id == OUTPUTNOTREADY)
1343 pchnl = chnl_mgr_obj->channels[chnl_id];
1344 if (!pchnl || list_empty(&pchnl->io_requests)) {
1345 /* Shouldn't get here */
1349 if (!pchnl->cio_reqs)
1352 /* Get the I/O request, and attempt a transfer */
1353 chnl_packet_obj = list_first_entry(&pchnl->io_requests,
1354 struct chnl_irp, link);
1355 list_del(&chnl_packet_obj->link);
1359 /* Record fact that no more I/O buffers available */
1360 if (list_empty(&pchnl->io_requests))
1361 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
1363 /* Transfer buffer to DSP side */
1364 chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size,
1365 chnl_packet_obj->byte_size);
1366 memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
1367 chnl_packet_obj->byte_size);
1368 pchnl->bytes_moved += chnl_packet_obj->byte_size;
1369 /* Write all 32 bits of arg */
1370 sm->arg = chnl_packet_obj->arg;
1371 #if _CHNL_WORDSIZE == 2
1372 /* Access can be different SM access word size (e.g. 16/32 bit words) */
1373 sm->output_id = (u16) chnl_id;
1374 sm->output_size = (u16) (chnl_packet_obj->byte_size +
1375 chnl_mgr_obj->word_size - 1) /
1376 (u16) chnl_mgr_obj->word_size;
1378 sm->output_id = chnl_id;
1379 sm->output_size = (chnl_packet_obj->byte_size +
1380 chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
1382 sm->output_full = 1;
1383 /* Indicate to the DSP we have written the output */
1384 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1385 /* Notify client with IO completion record (keep EOS) */
1386 chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1387 notify_chnl_complete(pchnl, chnl_packet_obj);
1388 /* Notify if stream is done. */
1389 if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
1390 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
1397 * ======== output_msg ========
1398 * Copies messages from the message queues to the shared memory.
1400 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1404 struct msg_dspmsg *msg_output;
1405 struct msg_frame *pmsg;
1406 struct msg_ctrl *msg_ctr_obj;
1410 msg_ctr_obj = pio_mgr->msg_output_ctrl;
1412 /* Check if output has been cleared */
1413 if (!msg_ctr_obj->buf_empty)
1416 num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1417 hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1418 msg_output = (struct msg_dspmsg *) pio_mgr->msg_output;
1420 /* Copy num_msgs messages into shared memory */
1421 for (i = 0; i < num_msgs; i++) {
1422 if (list_empty(&hmsg_mgr->msg_used_list))
1425 pmsg = list_first_entry(&hmsg_mgr->msg_used_list,
1426 struct msg_frame, list_elem);
1427 list_del(&pmsg->list_elem);
1429 val = (pmsg->msg_data).msgq_id;
1430 addr = (u32) &msg_output->msgq_id;
1431 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1433 val = (pmsg->msg_data).msg.cmd;
1434 addr = (u32) &msg_output->msg.cmd;
1435 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1437 val = (pmsg->msg_data).msg.arg1;
1438 addr = (u32) &msg_output->msg.arg1;
1439 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1441 val = (pmsg->msg_data).msg.arg2;
1442 addr = (u32) &msg_output->msg.arg2;
1443 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1446 list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list);
1447 sync_set_event(hmsg_mgr->sync_event);
1451 hmsg_mgr->msgs_pending -= num_msgs;
1452 #if _CHNL_WORDSIZE == 2
1454 * Access can be different SM access word size
1455 * (e.g. 16/32 bit words)
1457 msg_ctr_obj->size = (u16) num_msgs;
1459 msg_ctr_obj->size = num_msgs;
1461 msg_ctr_obj->buf_empty = false;
1462 /* Set the post SWI flag */
1463 msg_ctr_obj->post_swi = true;
1464 /* Tell the DSP we have written the output. */
1465 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1470 * ======== register_shm_segs ========
1472 * Registers GPP SM segment with CMM.
1474 static int register_shm_segs(struct io_mgr *hio_mgr,
1475 struct cod_manager *cod_man,
1479 u32 ul_shm0_base = 0;
1481 u32 ul_shm0_rsrvd_start = 0;
1482 u32 ul_rsrvd_size = 0;
1485 u32 ul_shm_seg_id0 = 0;
1486 u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1489 * Read address and size info for first SM region.
1490 * Get start of 1st SM Heap region.
1493 cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
1494 if (ul_shm0_base == 0) {
1498 /* Get end of 1st SM Heap region */
1500 /* Get start and length of message part of shared memory */
1501 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
1503 if (shm0_end == 0) {
1508 /* Start of Gpp reserved region */
1510 /* Get start and length of message part of shared memory */
1512 cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
1513 &ul_shm0_rsrvd_start);
1514 if (ul_shm0_rsrvd_start == 0) {
1519 /* Register with CMM */
1521 status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr);
1523 status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr,
1527 /* Register new SM region(s) */
1528 if (!status && (shm0_end - ul_shm0_base) > 0) {
1529 /* Calc size (bytes) of SM the GPP can alloc from */
1531 (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
1532 if (ul_rsrvd_size <= 0) {
1536 /* Calc size of SM DSP can alloc from */
1538 (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
1539 if (ul_dsp_size <= 0) {
1543 /* First TLB entry reserved for Bridge SM use. */
1544 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
1545 /* Get size in bytes */
1547 hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
1550 * Calc byte offset used to convert GPP phys <-> DSP byte
1553 if (dw_gpp_base_pa > ul_dsp_virt)
1554 dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1556 dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1558 if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
1563 * Calc Gpp phys base of SM region.
1564 * This is actually uncached kernel virtual address.
1567 ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
1570 * Calc Gpp phys base of SM region.
1571 * This is the physical address.
1574 dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
1576 /* Register SM Segment 0. */
1578 cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa,
1579 ul_rsrvd_size, dw_offset,
1581 ul_dsp_virt) ? CMM_ADDTODSPPA :
1583 (u32) (ul_shm0_base *
1584 hio_mgr->word_size),
1585 ul_dsp_size, &ul_shm_seg_id0,
1587 /* First SM region is seg_id = 1 */
1588 if (ul_shm_seg_id0 != 1)
1595 /* ZCPY IO routines. */
1597 * ======== IO_SHMcontrol ========
1598 * Sets the requested shm setting.
1600 int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1602 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1604 struct dspbridge_platform_data *pdata =
1605 omap_dspbridge_dev->dev.platform_data;
1609 /* Update the shared memory with requested OPP information */
1611 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
1618 * Update the shared memory with the voltage, frequency,
1619 * min and max frequency values for an OPP.
1621 for (i = 0; i <= dsp_max_opps; i++) {
1622 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1623 voltage = vdd1_dsp_freq[i][0];
1624 dev_dbg(bridge, "OPP-shm: voltage: %d\n",
1625 vdd1_dsp_freq[i][0]);
1626 hio_mgr->shared_mem->opp_table_struct.
1627 opp_point[i].frequency = vdd1_dsp_freq[i][1];
1628 dev_dbg(bridge, "OPP-shm: frequency: %d\n",
1629 vdd1_dsp_freq[i][1]);
1630 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1631 min_freq = vdd1_dsp_freq[i][2];
1632 dev_dbg(bridge, "OPP-shm: min freq: %d\n",
1633 vdd1_dsp_freq[i][2]);
1634 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1635 max_freq = vdd1_dsp_freq[i][3];
1636 dev_dbg(bridge, "OPP-shm: max freq: %d\n",
1637 vdd1_dsp_freq[i][3]);
1639 hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
1641 dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
1642 /* Update the current OPP number */
1643 if (pdata->dsp_get_opp)
1644 i = (*pdata->dsp_get_opp) ();
1645 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
1646 dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
1649 /* Get the OPP that DSP has requested */
1650 *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
1660 * ======== bridge_io_get_proc_load ========
1661 * Gets the Processor's Load information
1663 int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1664 struct dsp_procloadstat *proc_lstat)
1666 if (!hio_mgr->shared_mem)
1669 proc_lstat->curr_load =
1670 hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1671 proc_lstat->predicted_load =
1672 hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
1673 proc_lstat->curr_dsp_freq =
1674 hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
1675 proc_lstat->predicted_freq =
1676 hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
1678 dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1679 "Pred Freq = %d\n", proc_lstat->curr_load,
1680 proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
1681 proc_lstat->predicted_freq);
1686 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
1687 void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1689 u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1692 /* Get the DSP current pointer */
1693 ul_gpp_cur_pointer =
1694 *(u32 *) (hio_mgr->trace_buffer_current);
1695 ul_gpp_cur_pointer =
1696 hio_mgr->gpp_va + (ul_gpp_cur_pointer -
1699 /* No new debug messages available yet */
1700 if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
1702 } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
1703 /* Continuous data */
1704 ul_new_message_length =
1705 ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
1707 memcpy(hio_mgr->msg,
1708 (char *)hio_mgr->gpp_read_pointer,
1709 ul_new_message_length);
1710 hio_mgr->msg[ul_new_message_length] = '\0';
1712 * Advance the GPP trace pointer to DSP current
1715 hio_mgr->gpp_read_pointer += ul_new_message_length;
1716 /* Print the trace messages */
1717 pr_info("DSPTrace: %s\n", hio_mgr->msg);
1718 } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
1719 /* Handle trace buffer wraparound */
1720 memcpy(hio_mgr->msg,
1721 (char *)hio_mgr->gpp_read_pointer,
1722 hio_mgr->trace_buffer_end -
1723 hio_mgr->gpp_read_pointer);
1724 ul_new_message_length =
1725 ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
1726 memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end -
1727 hio_mgr->gpp_read_pointer],
1728 (char *)hio_mgr->trace_buffer_begin,
1729 ul_new_message_length);
1730 hio_mgr->msg[hio_mgr->trace_buffer_end -
1731 hio_mgr->gpp_read_pointer +
1732 ul_new_message_length] = '\0';
1734 * Advance the GPP trace pointer to DSP current
1737 hio_mgr->gpp_read_pointer =
1738 hio_mgr->trace_buffer_begin +
1739 ul_new_message_length;
1740 /* Print the trace messages */
1741 pr_info("DSPTrace: %s\n", hio_mgr->msg);
1747 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1749 * ======== print_dsp_trace_buffer ========
1750 * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1752 * hdeh_mgr: Handle to DEH manager object
1753 * number of extra carriage returns to generate.
1756 * -ENOMEM: Unable to allocate memory.
1758 * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1760 int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1763 struct cod_manager *cod_mgr;
1767 u32 ul_num_bytes = 0;
1768 u32 ul_num_words = 0;
1769 u32 ul_word_size = 2;
1776 struct bridge_dev_context *pbridge_context = hbridge_context;
1777 struct bridge_drv_interface *intf_fxns;
1778 struct dev_object *dev_obj = (struct dev_object *)
1779 pbridge_context->dev_obj;
1781 status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1784 /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1786 cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
1792 cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
1795 /* trace_cur_pos will hold the address of a DSP pointer */
1796 status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
1802 ul_num_bytes = (ul_trace_end - ul_trace_begin);
1804 ul_num_words = ul_num_bytes * ul_word_size;
1805 status = dev_get_intf_fxns(dev_obj, &intf_fxns);
1810 psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1811 if (psz_buf != NULL) {
1812 /* Read trace buffer data */
1813 status = (*intf_fxns->brd_read)(pbridge_context,
1814 (u8 *)psz_buf, (u32)ul_trace_begin,
1820 /* Pack and do newline conversion */
1821 pr_debug("PrintDspTraceBuffer: "
1822 "before pack and unpack.\n");
1823 pr_debug("%s: DSP Trace Buffer Begin:\n"
1824 "=======================\n%s\n",
1827 /* Read the value at the DSP address in trace_cur_pos. */
1828 status = (*intf_fxns->brd_read)(pbridge_context,
1829 (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1833 /* Pack and do newline conversion */
1834 pr_info("DSP Trace Buffer Begin:\n"
1835 "=======================\n%s\n",
1839 /* convert to offset */
1840 trace_cur_pos = trace_cur_pos - ul_trace_begin;
1844 * The buffer is not full, find the end of the
1845 * data -- buf_end will be >= pszBuf after
1848 buf_end = &psz_buf[ul_num_bytes+1];
1849 /* DSP print position */
1850 trace_end = &psz_buf[trace_cur_pos];
1853 * Search buffer for a new_line and replace it
1854 * with '\0', then print as string.
1855 * Continue until end of buffer is reached.
1857 str_beg = trace_end;
1858 ul_num_bytes = buf_end - str_beg;
1860 while (str_beg < buf_end) {
1861 new_line = strnchr(str_beg, ul_num_bytes,
1863 if (new_line && new_line < buf_end) {
1865 pr_debug("%s\n", str_beg);
1866 str_beg = ++new_line;
1867 ul_num_bytes = buf_end - str_beg;
1870 * Assume buffer empty if it contains
1873 if (*str_beg != '\0') {
1874 str_beg[ul_num_bytes] = 0;
1875 pr_debug("%s\n", str_beg);
1882 * Search buffer for a nNewLine and replace it
1883 * with '\0', then print as string.
1884 * Continue until buffer is exhausted.
1887 ul_num_bytes = trace_end - str_beg;
1889 while (str_beg < trace_end) {
1890 new_line = strnchr(str_beg, ul_num_bytes, '\n');
1891 if (new_line != NULL && new_line < trace_end) {
1893 pr_debug("%s\n", str_beg);
1894 str_beg = ++new_line;
1895 ul_num_bytes = trace_end - str_beg;
1898 * Assume buffer empty if it contains
1901 if (*str_beg != '\0') {
1902 str_beg[ul_num_bytes] = 0;
1903 pr_debug("%s\n", str_beg);
1905 str_beg = trace_end;
1910 pr_info("\n=======================\n"
1911 "DSP Trace Buffer End:\n");
1918 dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
1923 * dump_dsp_stack() - This function dumps the data on the DSP stack.
1924 * @bridge_context: Bridge driver's device context pointer.
1927 int dump_dsp_stack(struct bridge_dev_context *bridge_context)
1930 struct cod_manager *code_mgr;
1931 struct node_mgr *node_mgr;
1937 } mmu_fault_dbg_info;
1947 const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
1948 "IRP", "NRP", "AMR", "SSR",
1949 "ILC", "RILC", "IER", "CSR"};
1950 const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
1951 struct bridge_drv_interface *intf_fxns;
1952 struct dev_object *dev_object = bridge_context->dev_obj;
1954 status = dev_get_cod_mgr(dev_object, &code_mgr);
1956 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
1961 status = dev_get_node_manager(dev_object, &node_mgr);
1963 pr_debug("%s: Failed on dev_get_node_manager.\n",
1970 /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
1972 cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
1973 pr_debug("%s: trace_begin Value 0x%x\n",
1974 __func__, trace_begin);
1976 pr_debug("%s: Failed on cod_get_sym_value.\n",
1980 status = dev_get_intf_fxns(dev_object, &intf_fxns);
1982 * Check for the "magic number" in the trace buffer. If it has
1983 * yet to appear then poll the trace buffer to wait for it. Its
1984 * appearance signals that the DSP has finished dumping its state.
1986 mmu_fault_dbg_info.head[0] = 0;
1987 mmu_fault_dbg_info.head[1] = 0;
1990 while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
1991 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
1992 poll_cnt < POLL_MAX) {
1994 /* Read DSP dump size from the DSP trace buffer... */
1995 status = (*intf_fxns->brd_read)(bridge_context,
1996 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
1997 sizeof(mmu_fault_dbg_info), 0);
2005 if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
2006 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
2008 pr_err("%s:No DSP MMU-Fault information available.\n",
2014 total_size = mmu_fault_dbg_info.size;
2015 /* Limit the size in case DSP went crazy */
2016 if (total_size > MAX_MMU_DBGBUFF)
2017 total_size = MAX_MMU_DBGBUFF;
2019 buffer = kzalloc(total_size, GFP_ATOMIC);
2022 pr_debug("%s: Failed to "
2023 "allocate stack dump buffer.\n", __func__);
2027 buffer_beg = buffer;
2028 buffer_end = buffer + total_size / 4;
2030 /* Read bytes from the DSP trace buffer... */
2031 status = (*intf_fxns->brd_read)(bridge_context,
2032 (u8 *)buffer, (u32)trace_begin,
2035 pr_debug("%s: Failed to Read Trace Buffer.\n",
2040 pr_err("\nAproximate Crash Position:\n"
2041 "--------------------------\n");
2043 exc_type = buffer[3];
2045 i = buffer[79]; /* IRP */
2047 i = buffer[80]; /* NRP */
2050 cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
2056 if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
2057 0x1000, &offset_output, name) == 0))
2058 pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
2061 pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
2065 pr_err("\nExecution Info:\n"
2066 "---------------\n");
2068 if (*buffer < ARRAY_SIZE(exec_ctxt)) {
2069 pr_err("Execution context \t%s\n",
2070 exec_ctxt[*buffer++]);
2072 pr_err("Execution context corrupt\n");
2076 pr_err("Task Handle\t\t0x%x\n", *buffer++);
2077 pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
2078 pr_err("Stack Top\t\t0x%x\n", *buffer++);
2079 pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
2080 pr_err("Stack Size\t\t0x%x\n", *buffer++);
2081 pr_err("Stack Size In Use\t0x%x\n", *buffer++);
2083 pr_err("\nCPU Registers\n"
2084 "---------------\n");
2086 for (i = 0; i < 32; i++) {
2087 if (i == 4 || i == 6 || i == 8)
2088 pr_err("A%d 0x%-8x [Function Argument %d]\n",
2091 pr_err("A15 0x%-8x [Frame Pointer]\n",
2094 pr_err("A%d 0x%x\n", i, *buffer++);
2097 pr_err("\nB0 0x%x\n", *buffer++);
2098 pr_err("B1 0x%x\n", *buffer++);
2099 pr_err("B2 0x%x\n", *buffer++);
2101 if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2102 *buffer, 0x1000, &offset_output, name) == 0))
2104 pr_err("B3 0x%-8x [Function Return Pointer:"
2105 " \"%s\" + 0x%x]\n", *buffer, name,
2106 *buffer - offset_output);
2108 pr_err("B3 0x%-8x [Function Return Pointer:"
2109 "Unable to match to a symbol.]\n", *buffer);
2113 for (i = 4; i < 32; i++) {
2114 if (i == 4 || i == 6 || i == 8)
2115 pr_err("B%d 0x%-8x [Function Argument %d]\n",
2118 pr_err("B14 0x%-8x [Data Page Pointer]\n",
2121 pr_err("B%d 0x%x\n", i, *buffer++);
2126 for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
2127 pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
2132 for (i = 0; buffer < buffer_end; i++, buffer++) {
2133 if ((*buffer > dyn_ext_base) && (
2134 node_find_addr(node_mgr, *buffer , 0x600,
2135 &offset_output, name) == 0))
2136 pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2138 *buffer - offset_output);
2140 pr_err("[%d] 0x%x\n", i, *buffer);
2149 * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2150 * @bridge_context: Bridge driver's device context pointer.
2153 void dump_dl_modules(struct bridge_dev_context *bridge_context)
2155 struct cod_manager *code_mgr;
2156 struct bridge_drv_interface *intf_fxns;
2157 struct bridge_dev_context *bridge_ctxt = bridge_context;
2158 struct dev_object *dev_object = bridge_ctxt->dev_obj;
2159 struct modules_header modules_hdr;
2160 struct dll_module *module_struct = NULL;
2161 u32 module_dsp_addr;
2163 u32 module_struct_size = 0;
2168 status = dev_get_intf_fxns(dev_object, &intf_fxns);
2170 pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
2174 status = dev_get_cod_mgr(dev_object, &code_mgr);
2176 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2181 /* Lookup the address of the modules_header structure */
2182 status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
2184 pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2189 pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2191 /* Copy the modules_header structure from DSP memory. */
2192 status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr,
2193 (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2196 pr_debug("%s: Failed failed to read modules header.\n",
2201 module_dsp_addr = modules_hdr.first_module;
2202 module_size = modules_hdr.first_module_size;
2204 pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2207 pr_err("\nDynamically Loaded Modules:\n"
2208 "---------------------------\n");
2210 /* For each dll_module structure in the list... */
2211 while (module_size) {
2213 * Allocate/re-allocate memory to hold the dll_module
2214 * structure. The memory is re-allocated only if the existing
2215 * allocation is too small.
2217 if (module_size > module_struct_size) {
2218 kfree(module_struct);
2219 module_struct = kzalloc(module_size+128, GFP_ATOMIC);
2220 module_struct_size = module_size+128;
2221 pr_debug("%s: allocated module struct %p %d\n",
2222 __func__, module_struct, module_struct_size);
2226 /* Copy the dll_module structure from DSP memory */
2227 status = (*intf_fxns->brd_read)(bridge_context,
2228 (u8 *)module_struct, module_dsp_addr, module_size, 0);
2232 "%s: Failed to read dll_module stuct for 0x%x.\n",
2233 __func__, module_dsp_addr);
2237 /* Update info regarding the _next_ module in the list. */
2238 module_dsp_addr = module_struct->next_module;
2239 module_size = module_struct->next_module_size;
2241 pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2242 __func__, module_dsp_addr, module_size,
2243 module_struct->num_sects);
2246 * The section name strings start immedialty following
2247 * the array of dll_sect structures.
2249 sect_str = (char *) &module_struct->
2250 sects[module_struct->num_sects];
2251 pr_err("%s\n", sect_str);
2254 * Advance to the first section name string.
2255 * Each string follows the one before.
2257 sect_str += strlen(sect_str) + 1;
2259 /* Access each dll_sect structure and its name string. */
2261 sect_ndx < module_struct->num_sects; sect_ndx++) {
2262 pr_err(" Section: 0x%x ",
2263 module_struct->sects[sect_ndx].sect_load_adr);
2265 if (((u32) sect_str - (u32) module_struct) <
2266 module_struct_size) {
2267 pr_err("%s\n", sect_str);
2268 /* Each string follows the one before. */
2269 sect_str += strlen(sect_str)+1;
2271 pr_err("<string error>\n");
2272 pr_debug("%s: section name sting address "
2273 "is invalid %p\n", __func__, sect_str);
2278 kfree(module_struct);