4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * IO dispatcher for a shared memory channel driver.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * There is an important invariant condition which must be maintained per
22 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
23 * which may cause timeouts and/or failure of the sync_wait_on_event
26 #include <linux/types.h>
29 #include <dspbridge/host_os.h>
30 #include <linux/workqueue.h>
32 /* ----------------------------------- DSP/BIOS Bridge */
33 #include <dspbridge/dbdefs.h>
36 #include <dspbridge/dbc.h>
39 #include <dspbridge/ntfy.h>
40 #include <dspbridge/sync.h>
43 #include <dspbridge/dspdeh.h>
44 #include <dspbridge/dspio.h>
45 #include <dspbridge/dspioctl.h>
46 #include <dspbridge/wdt.h>
48 #include <tiomap_io.h>
49 #include <_tiomap_pwr.h>
51 /* Platform Manager */
52 #include <dspbridge/cod.h>
53 #include <dspbridge/node.h>
54 #include <dspbridge/dev.h>
57 #include <dspbridge/rms_sh.h>
58 #include <dspbridge/mgr.h>
59 #include <dspbridge/drv.h>
61 #include "module_list.h"
64 #include <dspbridge/io_sm.h>
67 /* Defines, Data Structures, Typedefs */
68 #define OUTPUTNOTREADY 0xffff
69 #define NOTENABLED 0xffff /* Channel(s) not enabled */
71 #define EXTEND "_EXT_END"
73 #define SWAP_WORD(x) (x)
74 #define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
76 #define MAX_PM_REQS 32
78 #define MMU_FAULT_HEAD1 0xa5a5a5a5
79 #define MMU_FAULT_HEAD2 0x96969696
81 #define MAX_MMU_DBGBUFF 10240
83 /* IO Manager: only one created per board */
85 /* These four fields must be the first fields in a io_mgr_ struct */
86 /* Bridge device context */
87 struct bridge_dev_context *hbridge_context;
88 /* Function interface to Bridge driver */
89 struct bridge_drv_interface *intf_fxns;
90 struct dev_object *hdev_obj; /* Device this board represents */
92 /* These fields initialized in bridge_io_create() */
93 struct chnl_mgr *hchnl_mgr;
94 struct shm *shared_mem; /* Shared Memory control */
95 u8 *input; /* Address of input channel */
96 u8 *output; /* Address of output channel */
97 struct msg_mgr *hmsg_mgr; /* Message manager */
98 /* Msg control for from DSP messages */
99 struct msg_ctrl *msg_input_ctrl;
100 /* Msg control for to DSP messages */
101 struct msg_ctrl *msg_output_ctrl;
102 u8 *msg_input; /* Address of input messages */
103 u8 *msg_output; /* Address of output messages */
104 u32 usm_buf_size; /* Size of a shared memory I/O channel */
105 bool shared_irq; /* Is this IRQ shared? */
106 u32 word_size; /* Size in bytes of DSP word */
107 u16 intr_val; /* Interrupt value */
108 /* Private extnd proc info; mmu setup */
109 struct mgr_processorextinfo ext_proc_info;
110 struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */
111 struct work_struct io_workq; /* workqueue */
112 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
113 u32 ul_trace_buffer_begin; /* Trace message start address */
114 u32 ul_trace_buffer_end; /* Trace message end address */
115 u32 ul_trace_buffer_current; /* Trace message current address */
116 u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */
122 u32 dpc_req; /* Number of requested DPC's. */
123 u32 dpc_sched; /* Number of executed DPC's. */
124 struct tasklet_struct dpc_tasklet;
129 /* Function Prototypes */
130 static void io_dispatch_pm(struct io_mgr *pio_mgr);
131 static void notify_chnl_complete(struct chnl_object *pchnl,
132 struct chnl_irp *chnl_packet_obj);
133 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
135 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
137 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
138 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
139 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
140 struct chnl_object *pchnl, u32 mask);
142 /* Bus Addr (cached kernel) */
143 static int register_shm_segs(struct io_mgr *hio_mgr,
144 struct cod_manager *cod_man,
147 static inline void set_chnl_free(struct shm *sm, u32 chnl)
149 sm->host_free_mask &= ~(1 << chnl);
152 static inline void set_chnl_busy(struct shm *sm, u32 chnl)
154 sm->host_free_mask |= 1 << chnl;
159 * ======== bridge_io_create ========
160 * Create an IO manager object.
162 int bridge_io_create(struct io_mgr **io_man,
163 struct dev_object *hdev_obj,
164 const struct io_attrs *mgr_attrts)
167 struct io_mgr *pio_mgr = NULL;
168 struct shm *shared_mem = NULL;
169 struct bridge_dev_context *hbridge_context = NULL;
170 struct cfg_devnode *dev_node_obj;
171 struct chnl_mgr *hchnl_mgr;
174 /* Check requirements */
175 if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) {
179 dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
180 if (!hchnl_mgr || hchnl_mgr->hio_mgr) {
185 * Message manager will be created when a file is loaded, since
186 * size of message buffer in shared memory is configurable in
189 dev_get_bridge_context(hdev_obj, &hbridge_context);
190 if (!hbridge_context) {
194 dev_get_dev_type(hdev_obj, &dev_type);
196 * DSP shared memory area will get set properly when
197 * a program is loaded. They are unknown until a COFF file is
198 * loaded. I chose the value -1 because it was less likely to be
199 * a valid address than 0.
201 shared_mem = (struct shm *)-1;
203 /* Allocate IO manager object */
204 pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
205 if (pio_mgr == NULL) {
210 /* Initialize chnl_mgr object */
211 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
212 pio_mgr->pmsg = NULL;
214 pio_mgr->hchnl_mgr = hchnl_mgr;
215 pio_mgr->word_size = mgr_attrts->word_size;
216 pio_mgr->shared_mem = shared_mem;
218 if (dev_type == DSP_UNIT) {
219 /* Create an IO DPC */
220 tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
222 /* Initialize DPC counters */
223 pio_mgr->dpc_req = 0;
224 pio_mgr->dpc_sched = 0;
226 spin_lock_init(&pio_mgr->dpc_lock);
228 status = dev_get_dev_node(hdev_obj, &dev_node_obj);
232 pio_mgr->hbridge_context = hbridge_context;
233 pio_mgr->shared_irq = mgr_attrts->irq_shared;
242 bridge_io_destroy(pio_mgr);
246 /* Return IO manager object to caller... */
247 hchnl_mgr->hio_mgr = pio_mgr;
254 * ======== bridge_io_destroy ========
256 * Disable interrupts, destroy the IO manager.
258 int bridge_io_destroy(struct io_mgr *hio_mgr)
262 /* Free IO DPC object */
263 tasklet_kill(&hio_mgr->dpc_tasklet);
265 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
266 kfree(hio_mgr->pmsg);
269 /* Free this IO manager object */
279 * ======== bridge_io_on_loaded ========
281 * Called when a new program is loaded to get shared memory buffer
282 * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
283 * are in DSP address units.
285 int bridge_io_on_loaded(struct io_mgr *hio_mgr)
287 struct cod_manager *cod_man;
288 struct chnl_mgr *hchnl_mgr;
289 struct msg_mgr *hmsg_mgr;
290 struct shm_segs *sm_sg;
292 u32 ul_shm_base_offset;
294 u32 ul_shm_length = -1;
295 u32 ul_mem_length = -1;
298 u32 ul_msg_length = -1;
309 /* DSP MMU setup table */
310 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
311 struct cfg_hostres *host_res;
312 struct bridge_dev_context *pbridge_context;
315 u32 ul_seg1_size = 0;
317 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
318 if (!pbridge_context) {
323 host_res = pbridge_context->resources;
328 sm_sg = &pbridge_context->sh_s;
330 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
335 hchnl_mgr = hio_mgr->hchnl_mgr;
336 /* The message manager is destroyed when the board is stopped. */
337 dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
338 hmsg_mgr = hio_mgr->hmsg_mgr;
339 if (!hchnl_mgr || !hmsg_mgr) {
343 if (hio_mgr->shared_mem)
344 hio_mgr->shared_mem = NULL;
346 /* Get start and length of channel part of shared memory */
347 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
353 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
359 if (ul_shm_limit <= ul_shm_base) {
363 /* Get total length in bytes */
364 ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
365 /* Calculate size of a PROCCOPY shared memory region */
366 dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
367 __func__, (ul_shm_length - sizeof(struct shm)));
369 /* Get start and length of message part of shared memory */
370 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
373 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
376 if (ul_msg_limit <= ul_msg_base) {
380 * Length (bytes) of messaging part of shared
384 (ul_msg_limit - ul_msg_base +
385 1) * hio_mgr->word_size;
387 * Total length (bytes) of shared memory:
390 ul_mem_length = ul_shm_length + ul_msg_length;
399 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
401 cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
403 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
411 cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
416 status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
421 /* Get memory reserved in host resources */
422 (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
423 &hio_mgr->ext_proc_info,
425 mgr_processorextinfo),
428 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
430 ul_gpp_pa = host_res->dw_mem_phys[1];
431 ul_gpp_va = host_res->dw_mem_base[1];
432 /* This is the virtual uncached ioremapped address!!! */
433 /* Why can't we directly take the DSPVA from the symbols? */
434 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
435 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
437 (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
439 ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
441 ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
442 ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
444 if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
447 dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
448 "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
449 "ul_seg_size %x ul_seg1_size %x \n", __func__,
450 ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
451 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
453 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
454 host_res->dw_mem_length[1]) {
455 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
456 __func__, host_res->dw_mem_length[1],
457 ul_seg_size + ul_seg1_size + ul_pad_size);
464 sm_sg->seg1_pa = ul_gpp_pa;
465 sm_sg->seg1_da = ul_dyn_ext_base;
466 sm_sg->seg1_va = ul_gpp_va;
467 sm_sg->seg1_size = ul_seg1_size;
468 sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size;
469 sm_sg->seg0_da = ul_dsp_va;
470 sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size;
471 sm_sg->seg0_size = ul_seg_size;
474 * Copy remaining entries from CDB. All entries are 1 MB and
475 * should not conflict with shm entries on MPU or DSP side.
477 for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
478 if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
481 if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
483 && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
484 ul_gpp_pa + ul_seg_size)
485 || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt >
486 ul_dsp_va - 0x100000 / hio_mgr->word_size
487 && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <=
488 ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
490 "CDB MMU entry %d conflicts with "
491 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
492 "GppPa %x, DspVa %x, Bytes %x.\n", i,
493 hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
494 hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt,
495 ul_gpp_pa, ul_dsp_va, ul_seg_size);
498 if (ndx < MAX_LOCK_TLB_ENTRIES) {
499 ae_proc[ndx].ul_dsp_va =
500 hio_mgr->ext_proc_info.ty_tlb[i].
502 ae_proc[ndx].ul_gpp_pa =
503 hio_mgr->ext_proc_info.ty_tlb[i].
505 ae_proc[ndx].ul_gpp_va = 0;
507 ae_proc[ndx].ul_size = 0x100000;
508 dev_dbg(bridge, "shm MMU entry PA %x "
509 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
510 ae_proc[ndx].ul_dsp_va);
518 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
519 ae_proc[i].ul_dsp_va = 0;
520 ae_proc[i].ul_gpp_pa = 0;
521 ae_proc[i].ul_gpp_va = 0;
522 ae_proc[i].ul_size = 0;
525 * Set the shm physical address entry (grayed out in CDB file)
526 * to the virtual uncached ioremapped address of shm reserved
529 hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
530 (ul_gpp_va + ul_seg1_size + ul_pad_size);
533 * Need shm Phys addr. IO supports only one DSP for now:
536 if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
540 if (sm_sg->seg0_da > ul_shm_base) {
544 /* ul_shm_base may not be at ul_dsp_va address */
545 ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) *
548 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
549 * bridge_brd_start() the MMU will be re-programed with MMU
550 * DSPVa-GPPPa pair info while DSP is in a known
555 hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context,
556 BRDIOCTL_SETMMUCONFIG,
560 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
561 ul_shm_base += ul_shm_base_offset;
562 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
564 if (ul_shm_base == 0) {
569 status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa);
572 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
573 hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
574 hio_mgr->output = hio_mgr->input + (ul_shm_length -
575 sizeof(struct shm)) / 2;
576 hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input;
578 /* Set up Shared memory addresses for messaging. */
579 hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
582 (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
583 hio_mgr->msg_output_ctrl =
584 (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
586 hio_mgr->msg_output =
587 (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
589 ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
590 / sizeof(struct msg_dspmsg);
591 dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
592 "output %p, msg_input_ctrl %p, msg_input %p, "
593 "msg_output_ctrl %p, msg_output %p\n",
594 (u8 *) hio_mgr->shared_mem, hio_mgr->input,
595 hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
596 hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
597 hio_mgr->msg_output);
598 dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
600 memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
602 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
603 /* Get the start address of trace buffer */
604 status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
605 &hio_mgr->ul_trace_buffer_begin);
611 hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
612 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
613 (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
614 /* Get the end address of trace buffer */
615 status = cod_get_sym_value(cod_man, SYS_PUTCEND,
616 &hio_mgr->ul_trace_buffer_end);
621 hio_mgr->ul_trace_buffer_end =
622 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
623 (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
624 /* Get the current address of DSP write pointer */
625 status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
626 &hio_mgr->ul_trace_buffer_current);
631 hio_mgr->ul_trace_buffer_current =
632 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
633 (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
634 /* Calculate the size of trace buffer */
635 kfree(hio_mgr->pmsg);
636 hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
637 hio_mgr->ul_trace_buffer_begin) *
638 hio_mgr->word_size) + 2, GFP_KERNEL);
642 hio_mgr->ul_dsp_va = ul_dsp_va;
643 hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
651 * ======== io_buf_size ========
652 * Size of shared memory I/O channel.
654 u32 io_buf_size(struct io_mgr *hio_mgr)
657 return hio_mgr->usm_buf_size;
663 * ======== io_cancel_chnl ========
664 * Cancel IO on a given PCPY channel.
666 void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
668 struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
673 sm = hio_mgr->shared_mem;
675 /* Inform DSP that we have no more buffers on this channel */
676 set_chnl_free(sm, chnl);
678 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
685 * ======== io_dispatch_pm ========
686 * Performs I/O dispatch on PM related messages from DSP
688 static void io_dispatch_pm(struct io_mgr *pio_mgr)
693 /* Perform Power message processing here */
694 parg[0] = pio_mgr->intr_val;
696 /* Send the command to the Bridge clk/pwr manager to handle */
697 if (parg[0] == MBX_PM_HIBERNATE_EN) {
698 dev_dbg(bridge, "PM: Hibernate command\n");
699 status = pio_mgr->intf_fxns->
700 pfn_dev_cntrl(pio_mgr->hbridge_context,
701 BRDIOCTL_PWR_HIBERNATE, parg);
703 pr_err("%s: hibernate cmd failed 0x%x\n",
705 } else if (parg[0] == MBX_PM_OPP_REQ) {
706 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
707 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
708 status = pio_mgr->intf_fxns->
709 pfn_dev_cntrl(pio_mgr->hbridge_context,
710 BRDIOCTL_CONSTRAINT_REQUEST, parg);
712 dev_dbg(bridge, "PM: Failed to set constraint "
713 "= 0x%x\n", parg[1]);
715 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
717 status = pio_mgr->intf_fxns->
718 pfn_dev_cntrl(pio_mgr->hbridge_context,
719 BRDIOCTL_CLK_CTRL, parg);
721 dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
727 * ======== io_dpc ========
728 * Deferred procedure call for shared memory channel driver ISR. Carries
729 * out the dispatch of I/O as a non-preemptible event.It can only be
730 * pre-empted by an ISR.
732 void io_dpc(unsigned long ref_data)
734 struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
735 struct chnl_mgr *chnl_mgr_obj;
736 struct msg_mgr *msg_mgr_obj;
737 struct deh_mgr *hdeh_mgr;
743 chnl_mgr_obj = pio_mgr->hchnl_mgr;
744 dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
745 dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
749 requested = pio_mgr->dpc_req;
750 serviced = pio_mgr->dpc_sched;
752 if (serviced == requested)
755 /* Process pending DPC's */
757 /* Check value of interrupt reg to ensure it's a valid error */
758 if ((pio_mgr->intr_val > DEH_BASE) &&
759 (pio_mgr->intr_val < DEH_LIMIT)) {
760 /* Notify DSP/BIOS exception */
762 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
763 print_dsp_debug_trace(pio_mgr);
765 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
769 /* Proc-copy chanel dispatch */
770 input_chnl(pio_mgr, NULL, IO_SERVICE);
771 output_chnl(pio_mgr, NULL, IO_SERVICE);
775 /* Perform I/O dispatch on message queues */
776 input_msg(pio_mgr, msg_mgr_obj);
777 output_msg(pio_mgr, msg_mgr_obj);
781 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
782 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
783 /* Notify DSP Trace message */
784 print_dsp_debug_trace(pio_mgr);
788 } while (serviced != requested);
789 pio_mgr->dpc_sched = requested;
795 * ======== io_mbox_msg ========
796 * Main interrupt handler for the shared memory IO manager.
797 * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
798 * schedules a DPC to dispatch I/O.
800 void io_mbox_msg(u32 msg)
802 struct io_mgr *pio_mgr;
803 struct dev_object *dev_obj;
806 dev_obj = dev_get_first();
807 dev_get_io_mgr(dev_obj, &pio_mgr);
812 pio_mgr->intr_val = (u16)msg;
813 if (pio_mgr->intr_val & MBX_PM_CLASS)
814 io_dispatch_pm(pio_mgr);
816 if (pio_mgr->intr_val == MBX_DEH_RESET) {
817 pio_mgr->intr_val = 0;
819 spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
821 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
822 tasklet_schedule(&pio_mgr->dpc_tasklet);
828 * ======== io_request_chnl ========
830 * Request chanenel I/O from the DSP. Sets flags in shared memory, then
831 * interrupts the DSP.
833 void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
834 u8 io_mode, u16 *mbx_val)
836 struct chnl_mgr *chnl_mgr_obj;
839 if (!pchnl || !mbx_val)
841 chnl_mgr_obj = io_manager->hchnl_mgr;
842 sm = io_manager->shared_mem;
843 if (io_mode == IO_INPUT) {
845 * Assertion fires if CHNL_AddIOReq() called on a stream
846 * which was cancelled, or attached to a dead board.
848 DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) ||
849 (pchnl->dw_state == CHNL_STATEEOS));
850 /* Indicate to the DSP we have a buffer available for input */
851 set_chnl_busy(sm, pchnl->chnl_id);
852 *mbx_val = MBX_PCPY_CLASS;
853 } else if (io_mode == IO_OUTPUT) {
855 * This assertion fails if CHNL_AddIOReq() was called on a
856 * stream which was cancelled, or attached to a dead board.
858 DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) ==
861 * Record the fact that we have a buffer available for
864 chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
866 DBC_ASSERT(io_mode); /* Shouldn't get here. */
873 * ======== iosm_schedule ========
874 * Schedule DPC for IO.
876 void iosm_schedule(struct io_mgr *io_manager)
883 /* Increment count of DPC's pending. */
884 spin_lock_irqsave(&io_manager->dpc_lock, flags);
885 io_manager->dpc_req++;
886 spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
889 tasklet_schedule(&io_manager->dpc_tasklet);
893 * ======== find_ready_output ========
894 * Search for a host output channel which is ready to send. If this is
895 * called as a result of servicing the DPC, then implement a round
896 * robin search; otherwise, this was called by a client thread (via
897 * IO_Dispatch()), so just start searching from the current channel id.
899 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
900 struct chnl_object *pchnl, u32 mask)
902 u32 ret = OUTPUTNOTREADY;
907 NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
908 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
909 if (id >= CHNL_MAXCHANNELS)
918 chnl_mgr_obj->dw_last_output = id;
922 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
924 } while (id != start_id);
931 * ======== input_chnl ========
932 * Dispatch a buffer on an input channel.
934 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
937 struct chnl_mgr *chnl_mgr_obj;
941 struct chnl_irp *chnl_packet_obj = NULL;
943 bool clear_chnl = false;
944 bool notify_client = false;
946 sm = pio_mgr->shared_mem;
947 chnl_mgr_obj = pio_mgr->hchnl_mgr;
949 /* Attempt to perform input */
953 bytes = sm->input_size * chnl_mgr_obj->word_size;
954 chnl_id = sm->input_id;
956 if (chnl_id >= CHNL_MAXCHANNELS) {
957 /* Shouldn't be here: would indicate corrupted shm. */
961 pchnl = chnl_mgr_obj->ap_channel[chnl_id];
962 if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
963 if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
964 if (!pchnl->pio_requests)
966 /* Get the I/O request, and attempt a transfer */
967 chnl_packet_obj = (struct chnl_irp *)
968 lst_get_head(pchnl->pio_requests);
969 if (chnl_packet_obj) {
971 if (pchnl->cio_reqs < 0)
974 * Ensure we don't overflow the client's
977 bytes = min(bytes, chnl_packet_obj->byte_size);
978 memcpy(chnl_packet_obj->host_sys_buf,
979 pio_mgr->input, bytes);
980 pchnl->bytes_moved += bytes;
981 chnl_packet_obj->byte_size = bytes;
982 chnl_packet_obj->dw_arg = dw_arg;
983 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
987 * This assertion fails if the DSP
988 * sends EOS more than once on this
991 if (pchnl->dw_state & CHNL_STATEEOS)
994 * Zero bytes indicates EOS. Update
995 * IOC status for this chirp, and also
998 chnl_packet_obj->status |=
1000 pchnl->dw_state |= CHNL_STATEEOS;
1002 * Notify that end of stream has
1005 ntfy_notify(pchnl->ntfy_obj,
1008 /* Tell DSP if no more I/O buffers available */
1009 if (!pchnl->pio_requests)
1011 if (LST_IS_EMPTY(pchnl->pio_requests)) {
1012 set_chnl_free(sm, pchnl->chnl_id);
1015 notify_client = true;
1018 * Input full for this channel, but we have no
1019 * buffers available. The channel must be
1020 * "idling". Clear out the physical input
1026 /* Input channel cancelled: clear input channel */
1030 /* DPC fired after host closed channel: clear input channel */
1034 /* Indicate to the DSP we have read the input */
1036 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1038 if (notify_client) {
1039 /* Notify client with IO completion record */
1040 notify_chnl_complete(pchnl, chnl_packet_obj);
1047 * ======== input_msg ========
1048 * Copies messages from shared memory to the message queues.
1050 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1055 struct msg_queue *msg_queue_obj;
1056 struct msg_frame *pmsg;
1057 struct msg_dspmsg msg;
1058 struct msg_ctrl *msg_ctr_obj;
1062 msg_ctr_obj = pio_mgr->msg_input_ctrl;
1063 /* Get the number of input messages to be read */
1064 input_empty = msg_ctr_obj->buf_empty;
1065 num_msgs = msg_ctr_obj->size;
1069 msg_input = pio_mgr->msg_input;
1070 for (i = 0; i < num_msgs; i++) {
1071 /* Read the next message */
1072 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
1074 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1075 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
1077 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1078 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
1080 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1081 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1083 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1084 msg_input += sizeof(struct msg_dspmsg);
1085 if (!hmsg_mgr->queue_list)
1088 /* Determine which queue to put the message in */
1090 (struct msg_queue *)lst_first(hmsg_mgr->queue_list);
1091 dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
1092 "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
1093 msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
1095 * Interrupt may occur before shared memory and message
1096 * input locations have been set up. If all nodes were
1097 * cleaned up, hmsg_mgr->max_msgs should be 0.
1099 while (msg_queue_obj != NULL) {
1100 if (msg.msgq_id == msg_queue_obj->msgq_id) {
1102 if (msg.msg.dw_cmd == RMS_EXITACK) {
1104 * Call the node exit notification.
1105 * The exit message does not get
1108 (*hmsg_mgr->on_exit) ((void *)
1113 * Not an exit acknowledgement, queue
1116 if (!msg_queue_obj->msg_free_list)
1118 pmsg = (struct msg_frame *)lst_get_head
1119 (msg_queue_obj->msg_free_list);
1120 if (msg_queue_obj->msg_used_list
1122 pmsg->msg_data = msg;
1124 (msg_queue_obj->msg_used_list,
1125 (struct list_head *)pmsg);
1127 (msg_queue_obj->ntfy_obj,
1128 DSP_NODEMESSAGEREADY);
1130 (msg_queue_obj->sync_event);
1133 * No free frame to copy the
1136 pr_err("%s: no free msg frames,"
1137 " discarding msg\n",
1144 if (!hmsg_mgr->queue_list || !msg_queue_obj)
1147 (struct msg_queue *)lst_next(hmsg_mgr->queue_list,
1148 (struct list_head *)
1152 /* Set the post SWI flag */
1154 /* Tell the DSP we've read the messages */
1155 msg_ctr_obj->buf_empty = true;
1156 msg_ctr_obj->post_swi = true;
1157 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1164 * ======== notify_chnl_complete ========
1166 * Signal the channel event, notifying the client that I/O has completed.
1168 static void notify_chnl_complete(struct chnl_object *pchnl,
1169 struct chnl_irp *chnl_packet_obj)
1173 if (!pchnl || !pchnl->sync_event ||
1174 !pchnl->pio_completions || !chnl_packet_obj)
1178 * Note: we signal the channel event only if the queue of IO
1179 * completions is empty. If it is not empty, the event is sure to be
1180 * signalled by the only IO completion list consumer:
1181 * bridge_chnl_get_ioc().
1183 signal_event = LST_IS_EMPTY(pchnl->pio_completions);
1184 /* Enqueue the IO completion info for the client */
1185 lst_put_tail(pchnl->pio_completions,
1186 (struct list_head *)chnl_packet_obj);
1189 if (pchnl->cio_cs > pchnl->chnl_packets)
1191 /* Signal the channel event (if not already set) that IO is complete */
1193 sync_set_event(pchnl->sync_event);
1195 /* Notify that IO is complete */
1196 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
1202 * ======== output_chnl ========
1204 * Dispatch a buffer on an output channel.
1206 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1209 struct chnl_mgr *chnl_mgr_obj;
1212 struct chnl_irp *chnl_packet_obj;
1215 chnl_mgr_obj = pio_mgr->hchnl_mgr;
1216 sm = pio_mgr->shared_mem;
1217 /* Attempt to perform output */
1218 if (sm->output_full)
1221 if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1224 /* Look to see if both a PC and DSP output channel are ready */
1225 dw_dsp_f_mask = sm->dsp_free_mask;
1227 find_ready_output(chnl_mgr_obj, pchnl,
1228 (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
1229 if (chnl_id == OUTPUTNOTREADY)
1232 pchnl = chnl_mgr_obj->ap_channel[chnl_id];
1233 if (!pchnl || !pchnl->pio_requests) {
1234 /* Shouldn't get here */
1237 /* Get the I/O request, and attempt a transfer */
1238 chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
1239 if (!chnl_packet_obj)
1243 if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
1246 /* Record fact that no more I/O buffers available */
1247 if (LST_IS_EMPTY(pchnl->pio_requests))
1248 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
1250 /* Transfer buffer to DSP side */
1251 chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size,
1252 chnl_packet_obj->byte_size);
1253 memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
1254 chnl_packet_obj->byte_size);
1255 pchnl->bytes_moved += chnl_packet_obj->byte_size;
1256 /* Write all 32 bits of arg */
1257 sm->arg = chnl_packet_obj->dw_arg;
1258 #if _CHNL_WORDSIZE == 2
1259 /* Access can be different SM access word size (e.g. 16/32 bit words) */
1260 sm->output_id = (u16) chnl_id;
1261 sm->output_size = (u16) (chnl_packet_obj->byte_size +
1262 chnl_mgr_obj->word_size - 1) /
1263 (u16) chnl_mgr_obj->word_size;
1265 sm->output_id = chnl_id;
1266 sm->output_size = (chnl_packet_obj->byte_size +
1267 chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
1269 sm->output_full = 1;
1270 /* Indicate to the DSP we have written the output */
1271 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1272 /* Notify client with IO completion record (keep EOS) */
1273 chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1274 notify_chnl_complete(pchnl, chnl_packet_obj);
1275 /* Notify if stream is done. */
1276 if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
1277 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
1284 * ======== output_msg ========
1285 * Copies messages from the message queues to the shared memory.
1287 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1292 struct msg_frame *pmsg;
1293 struct msg_ctrl *msg_ctr_obj;
1298 msg_ctr_obj = pio_mgr->msg_output_ctrl;
1300 /* Check if output has been cleared */
1301 output_empty = msg_ctr_obj->buf_empty;
1303 num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1304 hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1305 msg_output = pio_mgr->msg_output;
1306 /* Copy num_msgs messages into shared memory */
1307 for (i = 0; i < num_msgs; i++) {
1308 if (!hmsg_mgr->msg_used_list) {
1312 pmsg = (struct msg_frame *)
1313 lst_get_head(hmsg_mgr->msg_used_list);
1316 val = (pmsg->msg_data).msgq_id;
1317 addr = (u32) &(((struct msg_dspmsg *)
1318 msg_output)->msgq_id);
1319 write_ext32_bit_dsp_data(
1320 pio_mgr->hbridge_context, addr, val);
1321 val = (pmsg->msg_data).msg.dw_cmd;
1322 addr = (u32) &((((struct msg_dspmsg *)
1323 msg_output)->msg).dw_cmd);
1324 write_ext32_bit_dsp_data(
1325 pio_mgr->hbridge_context, addr, val);
1326 val = (pmsg->msg_data).msg.dw_arg1;
1327 addr = (u32) &((((struct msg_dspmsg *)
1328 msg_output)->msg).dw_arg1);
1329 write_ext32_bit_dsp_data(
1330 pio_mgr->hbridge_context, addr, val);
1331 val = (pmsg->msg_data).msg.dw_arg2;
1332 addr = (u32) &((((struct msg_dspmsg *)
1333 msg_output)->msg).dw_arg2);
1334 write_ext32_bit_dsp_data(
1335 pio_mgr->hbridge_context, addr, val);
1336 msg_output += sizeof(struct msg_dspmsg);
1337 if (!hmsg_mgr->msg_free_list)
1339 lst_put_tail(hmsg_mgr->msg_free_list,
1340 (struct list_head *)pmsg);
1341 sync_set_event(hmsg_mgr->sync_event);
1346 hmsg_mgr->msgs_pending -= num_msgs;
1347 #if _CHNL_WORDSIZE == 2
1349 * Access can be different SM access word size
1350 * (e.g. 16/32 bit words)
1352 msg_ctr_obj->size = (u16) num_msgs;
1354 msg_ctr_obj->size = num_msgs;
1356 msg_ctr_obj->buf_empty = false;
1357 /* Set the post SWI flag */
1358 msg_ctr_obj->post_swi = true;
1359 /* Tell the DSP we have written the output. */
1360 sm_interrupt_dsp(pio_mgr->hbridge_context,
1369 * ======== register_shm_segs ========
1371 * Registers GPP SM segment with CMM.
1373 static int register_shm_segs(struct io_mgr *hio_mgr,
1374 struct cod_manager *cod_man,
1378 u32 ul_shm0_base = 0;
1380 u32 ul_shm0_rsrvd_start = 0;
1381 u32 ul_rsrvd_size = 0;
1384 u32 ul_shm_seg_id0 = 0;
1385 u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1388 * Read address and size info for first SM region.
1389 * Get start of 1st SM Heap region.
1392 cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
1393 if (ul_shm0_base == 0) {
1397 /* Get end of 1st SM Heap region */
1399 /* Get start and length of message part of shared memory */
1400 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
1402 if (shm0_end == 0) {
1407 /* Start of Gpp reserved region */
1409 /* Get start and length of message part of shared memory */
1411 cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
1412 &ul_shm0_rsrvd_start);
1413 if (ul_shm0_rsrvd_start == 0) {
1418 /* Register with CMM */
1420 status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
1422 status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
1426 /* Register new SM region(s) */
1427 if (!status && (shm0_end - ul_shm0_base) > 0) {
1428 /* Calc size (bytes) of SM the GPP can alloc from */
1430 (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
1431 if (ul_rsrvd_size <= 0) {
1435 /* Calc size of SM DSP can alloc from */
1437 (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
1438 if (ul_dsp_size <= 0) {
1442 /* First TLB entry reserved for Bridge SM use. */
1443 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
1444 /* Get size in bytes */
1446 hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt *
1449 * Calc byte offset used to convert GPP phys <-> DSP byte
1452 if (dw_gpp_base_pa > ul_dsp_virt)
1453 dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1455 dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1457 if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
1462 * Calc Gpp phys base of SM region.
1463 * This is actually uncached kernel virtual address.
1466 ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
1469 * Calc Gpp phys base of SM region.
1470 * This is the physical address.
1473 dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
1475 /* Register SM Segment 0. */
1477 cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
1478 ul_rsrvd_size, dw_offset,
1480 ul_dsp_virt) ? CMM_ADDTODSPPA :
1482 (u32) (ul_shm0_base *
1483 hio_mgr->word_size),
1484 ul_dsp_size, &ul_shm_seg_id0,
1486 /* First SM region is seg_id = 1 */
1487 if (ul_shm_seg_id0 != 1)
1494 /* ZCPY IO routines. */
1496 * ======== IO_SHMcontrol ========
1497 * Sets the requested shm setting.
1499 int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1501 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1503 struct dspbridge_platform_data *pdata =
1504 omap_dspbridge_dev->dev.platform_data;
1508 /* Update the shared memory with requested OPP information */
1510 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
1517 * Update the shared memory with the voltage, frequency,
1518 * min and max frequency values for an OPP.
1520 for (i = 0; i <= dsp_max_opps; i++) {
1521 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1522 voltage = vdd1_dsp_freq[i][0];
1523 dev_dbg(bridge, "OPP-shm: voltage: %d\n",
1524 vdd1_dsp_freq[i][0]);
1525 hio_mgr->shared_mem->opp_table_struct.
1526 opp_point[i].frequency = vdd1_dsp_freq[i][1];
1527 dev_dbg(bridge, "OPP-shm: frequency: %d\n",
1528 vdd1_dsp_freq[i][1]);
1529 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1530 min_freq = vdd1_dsp_freq[i][2];
1531 dev_dbg(bridge, "OPP-shm: min freq: %d\n",
1532 vdd1_dsp_freq[i][2]);
1533 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1534 max_freq = vdd1_dsp_freq[i][3];
1535 dev_dbg(bridge, "OPP-shm: max freq: %d\n",
1536 vdd1_dsp_freq[i][3]);
1538 hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
1540 dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
1541 /* Update the current OPP number */
1542 if (pdata->dsp_get_opp)
1543 i = (*pdata->dsp_get_opp) ();
1544 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
1545 dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
1548 /* Get the OPP that DSP has requested */
1549 *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
1559 * ======== bridge_io_get_proc_load ========
1560 * Gets the Processor's Load information
1562 int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1563 struct dsp_procloadstat *proc_lstat)
1565 proc_lstat->curr_load =
1566 hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1567 proc_lstat->predicted_load =
1568 hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
1569 proc_lstat->curr_dsp_freq =
1570 hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
1571 proc_lstat->predicted_freq =
1572 hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
1574 dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1575 "Pred Freq = %d\n", proc_lstat->curr_load,
1576 proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
1577 proc_lstat->predicted_freq);
1581 void io_sm_init(void)
1586 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
1587 void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1589 u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1592 /* Get the DSP current pointer */
1593 ul_gpp_cur_pointer =
1594 *(u32 *) (hio_mgr->ul_trace_buffer_current);
1595 ul_gpp_cur_pointer =
1596 hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
1597 hio_mgr->ul_dsp_va);
1599 /* No new debug messages available yet */
1600 if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
1602 } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
1603 /* Continuous data */
1604 ul_new_message_length =
1605 ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
1607 memcpy(hio_mgr->pmsg,
1608 (char *)hio_mgr->ul_gpp_read_pointer,
1609 ul_new_message_length);
1610 hio_mgr->pmsg[ul_new_message_length] = '\0';
1612 * Advance the GPP trace pointer to DSP current
1615 hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
1616 /* Print the trace messages */
1617 pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
1618 } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
1619 /* Handle trace buffer wraparound */
1620 memcpy(hio_mgr->pmsg,
1621 (char *)hio_mgr->ul_gpp_read_pointer,
1622 hio_mgr->ul_trace_buffer_end -
1623 hio_mgr->ul_gpp_read_pointer);
1624 ul_new_message_length =
1625 ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
1626 memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
1627 hio_mgr->ul_gpp_read_pointer],
1628 (char *)hio_mgr->ul_trace_buffer_begin,
1629 ul_new_message_length);
1630 hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
1631 hio_mgr->ul_gpp_read_pointer +
1632 ul_new_message_length] = '\0';
1634 * Advance the GPP trace pointer to DSP current
1637 hio_mgr->ul_gpp_read_pointer =
1638 hio_mgr->ul_trace_buffer_begin +
1639 ul_new_message_length;
1640 /* Print the trace messages */
1641 pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
1647 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1649 * ======== print_dsp_trace_buffer ========
1650 * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1652 * hdeh_mgr: Handle to DEH manager object
1653 * number of extra carriage returns to generate.
1656 * -ENOMEM: Unable to allocate memory.
1658 * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1660 int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1663 struct cod_manager *cod_mgr;
1667 u32 ul_num_bytes = 0;
1668 u32 ul_num_words = 0;
1669 u32 ul_word_size = 2;
1676 struct bridge_dev_context *pbridge_context = hbridge_context;
1677 struct bridge_drv_interface *intf_fxns;
1678 struct dev_object *dev_obj = (struct dev_object *)
1679 pbridge_context->hdev_obj;
1681 status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1684 /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1686 cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
1692 cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
1695 /* trace_cur_pos will hold the address of a DSP pointer */
1696 status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
1702 ul_num_bytes = (ul_trace_end - ul_trace_begin);
1704 ul_num_words = ul_num_bytes * ul_word_size;
1705 status = dev_get_intf_fxns(dev_obj, &intf_fxns);
1710 psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1711 if (psz_buf != NULL) {
1712 /* Read trace buffer data */
1713 status = (*intf_fxns->pfn_brd_read)(pbridge_context,
1714 (u8 *)psz_buf, (u32)ul_trace_begin,
1720 /* Pack and do newline conversion */
1721 pr_debug("PrintDspTraceBuffer: "
1722 "before pack and unpack.\n");
1723 pr_debug("%s: DSP Trace Buffer Begin:\n"
1724 "=======================\n%s\n",
1727 /* Read the value at the DSP address in trace_cur_pos. */
1728 status = (*intf_fxns->pfn_brd_read)(pbridge_context,
1729 (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1733 /* Pack and do newline conversion */
1734 pr_info("DSP Trace Buffer Begin:\n"
1735 "=======================\n%s\n",
1739 /* convert to offset */
1740 trace_cur_pos = trace_cur_pos - ul_trace_begin;
1744 * The buffer is not full, find the end of the
1745 * data -- buf_end will be >= pszBuf after
1748 buf_end = &psz_buf[ul_num_bytes+1];
1749 /* DSP print position */
1750 trace_end = &psz_buf[trace_cur_pos];
1753 * Search buffer for a new_line and replace it
1754 * with '\0', then print as string.
1755 * Continue until end of buffer is reached.
1757 str_beg = trace_end;
1758 ul_num_bytes = buf_end - str_beg;
1760 while (str_beg < buf_end) {
1761 new_line = strnchr(str_beg, ul_num_bytes,
1763 if (new_line && new_line < buf_end) {
1765 pr_debug("%s\n", str_beg);
1766 str_beg = ++new_line;
1767 ul_num_bytes = buf_end - str_beg;
1770 * Assume buffer empty if it contains
1773 if (*str_beg != '\0') {
1774 str_beg[ul_num_bytes] = 0;
1775 pr_debug("%s\n", str_beg);
1782 * Search buffer for a nNewLine and replace it
1783 * with '\0', then print as string.
1784 * Continue until buffer is exhausted.
1787 ul_num_bytes = trace_end - str_beg;
1789 while (str_beg < trace_end) {
1790 new_line = strnchr(str_beg, ul_num_bytes, '\n');
1791 if (new_line != NULL && new_line < trace_end) {
1793 pr_debug("%s\n", str_beg);
1794 str_beg = ++new_line;
1795 ul_num_bytes = trace_end - str_beg;
1798 * Assume buffer empty if it contains
1801 if (*str_beg != '\0') {
1802 str_beg[ul_num_bytes] = 0;
1803 pr_debug("%s\n", str_beg);
1805 str_beg = trace_end;
1810 pr_info("\n=======================\n"
1811 "DSP Trace Buffer End:\n");
1818 dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
1823 * dump_dsp_stack() - This function dumps the data on the DSP stack.
1824 * @bridge_context: Bridge driver's device context pointer.
1827 int dump_dsp_stack(struct bridge_dev_context *bridge_context)
1830 struct cod_manager *code_mgr;
1831 struct node_mgr *node_mgr;
1837 } mmu_fault_dbg_info;
1847 const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
1848 "IRP", "NRP", "AMR", "SSR",
1849 "ILC", "RILC", "IER", "CSR"};
1850 const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
1851 struct bridge_drv_interface *intf_fxns;
1852 struct dev_object *dev_object = bridge_context->hdev_obj;
1854 status = dev_get_cod_mgr(dev_object, &code_mgr);
1856 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
1861 status = dev_get_node_manager(dev_object, &node_mgr);
1863 pr_debug("%s: Failed on dev_get_node_manager.\n",
1870 /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
1872 cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
1873 pr_debug("%s: trace_begin Value 0x%x\n",
1874 __func__, trace_begin);
1876 pr_debug("%s: Failed on cod_get_sym_value.\n",
1880 status = dev_get_intf_fxns(dev_object, &intf_fxns);
1882 * Check for the "magic number" in the trace buffer. If it has
1883 * yet to appear then poll the trace buffer to wait for it. Its
1884 * appearance signals that the DSP has finished dumping its state.
1886 mmu_fault_dbg_info.head[0] = 0;
1887 mmu_fault_dbg_info.head[1] = 0;
1890 while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
1891 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
1892 poll_cnt < POLL_MAX) {
1894 /* Read DSP dump size from the DSP trace buffer... */
1895 status = (*intf_fxns->pfn_brd_read)(bridge_context,
1896 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
1897 sizeof(mmu_fault_dbg_info), 0);
1905 if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
1906 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
1908 pr_err("%s:No DSP MMU-Fault information available.\n",
1914 total_size = mmu_fault_dbg_info.size;
1915 /* Limit the size in case DSP went crazy */
1916 if (total_size > MAX_MMU_DBGBUFF)
1917 total_size = MAX_MMU_DBGBUFF;
1919 buffer = kzalloc(total_size, GFP_ATOMIC);
1922 pr_debug("%s: Failed to "
1923 "allocate stack dump buffer.\n", __func__);
1927 buffer_beg = buffer;
1928 buffer_end = buffer + total_size / 4;
1930 /* Read bytes from the DSP trace buffer... */
1931 status = (*intf_fxns->pfn_brd_read)(bridge_context,
1932 (u8 *)buffer, (u32)trace_begin,
1935 pr_debug("%s: Failed to Read Trace Buffer.\n",
1940 pr_err("\nAproximate Crash Position:\n"
1941 "--------------------------\n");
1943 exc_type = buffer[3];
1945 i = buffer[79]; /* IRP */
1947 i = buffer[80]; /* NRP */
1950 cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
1956 if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
1957 0x1000, &offset_output, name) == 0))
1958 pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
1961 pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
1965 pr_err("\nExecution Info:\n"
1966 "---------------\n");
1968 if (*buffer < ARRAY_SIZE(exec_ctxt)) {
1969 pr_err("Execution context \t%s\n",
1970 exec_ctxt[*buffer++]);
1972 pr_err("Execution context corrupt\n");
1976 pr_err("Task Handle\t\t0x%x\n", *buffer++);
1977 pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
1978 pr_err("Stack Top\t\t0x%x\n", *buffer++);
1979 pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
1980 pr_err("Stack Size\t\t0x%x\n", *buffer++);
1981 pr_err("Stack Size In Use\t0x%x\n", *buffer++);
1983 pr_err("\nCPU Registers\n"
1984 "---------------\n");
1986 for (i = 0; i < 32; i++) {
1987 if (i == 4 || i == 6 || i == 8)
1988 pr_err("A%d 0x%-8x [Function Argument %d]\n",
1991 pr_err("A15 0x%-8x [Frame Pointer]\n",
1994 pr_err("A%d 0x%x\n", i, *buffer++);
1997 pr_err("\nB0 0x%x\n", *buffer++);
1998 pr_err("B1 0x%x\n", *buffer++);
1999 pr_err("B2 0x%x\n", *buffer++);
2001 if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2002 *buffer, 0x1000, &offset_output, name) == 0))
2004 pr_err("B3 0x%-8x [Function Return Pointer:"
2005 " \"%s\" + 0x%x]\n", *buffer, name,
2006 *buffer - offset_output);
2008 pr_err("B3 0x%-8x [Function Return Pointer:"
2009 "Unable to match to a symbol.]\n", *buffer);
2013 for (i = 4; i < 32; i++) {
2014 if (i == 4 || i == 6 || i == 8)
2015 pr_err("B%d 0x%-8x [Function Argument %d]\n",
2018 pr_err("B14 0x%-8x [Data Page Pointer]\n",
2021 pr_err("B%d 0x%x\n", i, *buffer++);
2026 for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
2027 pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
2032 for (i = 0; buffer < buffer_end; i++, buffer++) {
2033 if ((*buffer > dyn_ext_base) && (
2034 node_find_addr(node_mgr, *buffer , 0x600,
2035 &offset_output, name) == 0))
2036 pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2038 *buffer - offset_output);
2040 pr_err("[%d] 0x%x\n", i, *buffer);
2049 * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2050 * @bridge_context: Bridge driver's device context pointer.
2053 void dump_dl_modules(struct bridge_dev_context *bridge_context)
2055 struct cod_manager *code_mgr;
2056 struct bridge_drv_interface *intf_fxns;
2057 struct bridge_dev_context *bridge_ctxt = bridge_context;
2058 struct dev_object *dev_object = bridge_ctxt->hdev_obj;
2059 struct modules_header modules_hdr;
2060 struct dll_module *module_struct = NULL;
2061 u32 module_dsp_addr;
2063 u32 module_struct_size = 0;
2068 status = dev_get_intf_fxns(dev_object, &intf_fxns);
2070 pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
2074 status = dev_get_cod_mgr(dev_object, &code_mgr);
2076 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2081 /* Lookup the address of the modules_header structure */
2082 status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
2084 pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2089 pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2091 /* Copy the modules_header structure from DSP memory. */
2092 status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr,
2093 (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2096 pr_debug("%s: Failed failed to read modules header.\n",
2101 module_dsp_addr = modules_hdr.first_module;
2102 module_size = modules_hdr.first_module_size;
2104 pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2107 pr_err("\nDynamically Loaded Modules:\n"
2108 "---------------------------\n");
2110 /* For each dll_module structure in the list... */
2111 while (module_size) {
2113 * Allocate/re-allocate memory to hold the dll_module
2114 * structure. The memory is re-allocated only if the existing
2115 * allocation is too small.
2117 if (module_size > module_struct_size) {
2118 kfree(module_struct);
2119 module_struct = kzalloc(module_size+128, GFP_ATOMIC);
2120 module_struct_size = module_size+128;
2121 pr_debug("%s: allocated module struct %p %d\n",
2122 __func__, module_struct, module_struct_size);
2126 /* Copy the dll_module structure from DSP memory */
2127 status = (*intf_fxns->pfn_brd_read)(bridge_context,
2128 (u8 *)module_struct, module_dsp_addr, module_size, 0);
2132 "%s: Failed to read dll_module stuct for 0x%x.\n",
2133 __func__, module_dsp_addr);
2137 /* Update info regarding the _next_ module in the list. */
2138 module_dsp_addr = module_struct->next_module;
2139 module_size = module_struct->next_module_size;
2141 pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2142 __func__, module_dsp_addr, module_size,
2143 module_struct->num_sects);
2146 * The section name strings start immedialty following
2147 * the array of dll_sect structures.
2149 sect_str = (char *) &module_struct->
2150 sects[module_struct->num_sects];
2151 pr_err("%s\n", sect_str);
2154 * Advance to the first section name string.
2155 * Each string follows the one before.
2157 sect_str += strlen(sect_str) + 1;
2159 /* Access each dll_sect structure and its name string. */
2161 sect_ndx < module_struct->num_sects; sect_ndx++) {
2162 pr_err(" Section: 0x%x ",
2163 module_struct->sects[sect_ndx].sect_load_adr);
2165 if (((u32) sect_str - (u32) module_struct) <
2166 module_struct_size) {
2167 pr_err("%s\n", sect_str);
2168 /* Each string follows the one before. */
2169 sect_str += strlen(sect_str)+1;
2171 pr_err("<string error>\n");
2172 pr_debug("%s: section name sting address "
2173 "is invalid %p\n", __func__, sect_str);
2178 kfree(module_struct);