4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Processor interface at the driver level.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ------------------------------------ Host OS */
21 #include <linux/dma-mapping.h>
22 #include <linux/scatterlist.h>
23 #include <dspbridge/host_os.h>
25 /* ----------------------------------- DSP/BIOS Bridge */
26 #include <dspbridge/dbdefs.h>
28 /* ----------------------------------- Trace & Debug */
29 #include <dspbridge/dbc.h>
31 /* ----------------------------------- OS Adaptation Layer */
32 #include <dspbridge/list.h>
33 #include <dspbridge/ntfy.h>
34 #include <dspbridge/sync.h>
35 /* ----------------------------------- Bridge Driver */
36 #include <dspbridge/dspdefs.h>
37 #include <dspbridge/dspdeh.h>
38 /* ----------------------------------- Platform Manager */
39 #include <dspbridge/cod.h>
40 #include <dspbridge/dev.h>
41 #include <dspbridge/procpriv.h>
43 /* ----------------------------------- Resource Manager */
44 #include <dspbridge/mgr.h>
45 #include <dspbridge/node.h>
46 #include <dspbridge/nldr.h>
47 #include <dspbridge/rmm.h>
49 /* ----------------------------------- Others */
50 #include <dspbridge/dbdcd.h>
51 #include <dspbridge/msg.h>
52 #include <dspbridge/dspioctl.h>
53 #include <dspbridge/drv.h>
56 /* ----------------------------------- This */
57 #include <dspbridge/proc.h>
58 #include <dspbridge/pwr.h>
60 #include <dspbridge/resourcecleanup.h>
61 /* ----------------------------------- Defines, Data Structures, Typedefs */
62 #define MAXCMDLINELEN 255
63 #define PROC_ENVPROCID "PROC_ID=%d"
64 #define MAXPROCIDLEN (8 + 5)
65 #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
66 #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
67 #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
69 #define DSP_CACHE_LINE 128
71 #define BUFMODE_MASK (3 << 14)
73 /* Buffer modes from DSP perspective */
74 #define RBUF 0x4000 /* Input buffer */
75 #define WBUF 0x8000 /* Output Buffer */
77 extern struct device *bridge;
79 /* ----------------------------------- Globals */
81 /* The proc_object structure. */
83 struct list_head link; /* Link to next proc_object */
84 struct dev_object *hdev_obj; /* Device this PROC represents */
85 u32 process; /* Process owning this Processor */
86 struct mgr_object *hmgr_obj; /* Manager Object Handle */
87 u32 attach_count; /* Processor attach count */
88 u32 processor_id; /* Processor number */
89 u32 utimeout; /* Time out count */
90 enum dsp_procstate proc_state; /* Processor state */
91 u32 ul_unit; /* DDSP unit number */
92 bool is_already_attached; /*
93 * True if the Device below has
96 struct ntfy_object *ntfy_obj; /* Manages notifications */
97 /* Bridge Context Handle */
98 struct bridge_dev_context *hbridge_context;
99 /* Function interface to Bridge driver */
100 struct bridge_drv_interface *intf_fxns;
102 struct list_head proc_list;
107 DEFINE_MUTEX(proc_lock); /* For critical sections */
109 /* ----------------------------------- Function Prototypes */
110 static int proc_monitor(struct proc_object *proc_obj);
111 static s32 get_envp_count(char **envp);
112 static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
113 s32 cnew_envp, char *sz_var);
115 /* remember mapping information */
116 static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
117 u32 mpu_addr, u32 dsp_addr, u32 size)
119 struct dmm_map_object *map_obj;
121 u32 num_usr_pgs = size / PG_SIZE4K;
123 pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
127 map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
129 pr_err("%s: kzalloc failed\n", __func__);
132 INIT_LIST_HEAD(&map_obj->link);
134 map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
136 if (!map_obj->pages) {
137 pr_err("%s: kzalloc failed\n", __func__);
142 map_obj->mpu_addr = mpu_addr;
143 map_obj->dsp_addr = dsp_addr;
144 map_obj->size = size;
145 map_obj->num_usr_pgs = num_usr_pgs;
147 spin_lock(&pr_ctxt->dmm_map_lock);
148 list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
149 spin_unlock(&pr_ctxt->dmm_map_lock);
154 static void remove_mapping_information(struct process_context *pr_ctxt,
157 struct dmm_map_object *map_obj;
159 pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr);
161 spin_lock(&pr_ctxt->dmm_map_lock);
162 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
163 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n",
168 if (map_obj->dsp_addr == dsp_addr) {
169 pr_debug("%s: match, deleting map info\n", __func__);
170 list_del(&map_obj->link);
171 kfree(map_obj->dma_info.sg);
172 kfree(map_obj->pages);
176 pr_debug("%s: candidate didn't match\n", __func__);
179 pr_err("%s: failed to find given map info\n", __func__);
181 spin_unlock(&pr_ctxt->dmm_map_lock);
184 static int match_containing_map_obj(struct dmm_map_object *map_obj,
185 u32 mpu_addr, u32 size)
187 u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
189 return mpu_addr >= map_obj->mpu_addr &&
190 mpu_addr + size <= map_obj_end;
193 static struct dmm_map_object *find_containing_mapping(
194 struct process_context *pr_ctxt,
195 u32 mpu_addr, u32 size)
197 struct dmm_map_object *map_obj;
198 pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
201 spin_lock(&pr_ctxt->dmm_map_lock);
202 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
203 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
208 if (match_containing_map_obj(map_obj, mpu_addr, size)) {
209 pr_debug("%s: match!\n", __func__);
213 pr_debug("%s: no match!\n", __func__);
218 spin_unlock(&pr_ctxt->dmm_map_lock);
222 static int find_first_page_in_cache(struct dmm_map_object *map_obj,
223 unsigned long mpu_addr)
225 u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
226 u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
227 int pg_index = requested_base_page - mapped_base_page;
229 if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
230 pr_err("%s: failed (got %d)\n", __func__, pg_index);
234 pr_debug("%s: first page is %d\n", __func__, pg_index);
238 static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
241 pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
242 pg_i, map_obj->num_usr_pgs);
244 if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
245 pr_err("%s: requested pg_i %d is out of mapped range\n",
250 return map_obj->pages[pg_i];
254 * ======== proc_attach ========
256 * Prepare for communication with a particular DSP processor, and return
257 * a handle to the processor object.
260 proc_attach(u32 processor_id,
261 const struct dsp_processorattrin *attr_in,
262 void **ph_processor, struct process_context *pr_ctxt)
265 struct dev_object *hdev_obj;
266 struct proc_object *p_proc_object = NULL;
267 struct mgr_object *hmgr_obj = NULL;
268 struct drv_object *hdrv_obj = NULL;
269 struct drv_data *drv_datap = dev_get_drvdata(bridge);
272 DBC_REQUIRE(refs > 0);
273 DBC_REQUIRE(ph_processor != NULL);
275 if (pr_ctxt->hprocessor) {
276 *ph_processor = pr_ctxt->hprocessor;
280 /* Get the Driver and Manager Object Handles */
281 if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) {
283 pr_err("%s: Failed to get object handles\n", __func__);
285 hdrv_obj = drv_datap->drv_object;
286 hmgr_obj = drv_datap->mgr_object;
290 /* Get the Device Object */
291 status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
294 status = dev_get_dev_type(hdev_obj, &dev_type);
299 /* If we made it this far, create the Proceesor object: */
300 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
301 /* Fill out the Processor Object: */
302 if (p_proc_object == NULL) {
306 p_proc_object->hdev_obj = hdev_obj;
307 p_proc_object->hmgr_obj = hmgr_obj;
308 p_proc_object->processor_id = dev_type;
309 /* Store TGID instead of process handle */
310 p_proc_object->process = current->tgid;
312 INIT_LIST_HEAD(&p_proc_object->proc_list);
315 p_proc_object->utimeout = attr_in->utimeout;
317 p_proc_object->utimeout = PROC_DFLT_TIMEOUT;
319 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
321 status = dev_get_bridge_context(hdev_obj,
322 &p_proc_object->hbridge_context);
324 kfree(p_proc_object);
326 kfree(p_proc_object);
331 /* Create the Notification Object */
332 /* This is created with no event mask, no notify mask
333 * and no valid handle to the notification. They all get
334 * filled up when proc_register_notify is called */
335 p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
337 if (p_proc_object->ntfy_obj)
338 ntfy_init(p_proc_object->ntfy_obj);
343 /* Insert the Processor Object into the DEV List.
344 * Return handle to this Processor Object:
345 * Find out if the Device is already attached to a
346 * Processor. If so, return AlreadyAttached status */
347 lst_init_elem(&p_proc_object->link);
348 status = dev_insert_proc_object(p_proc_object->hdev_obj,
351 is_already_attached);
353 if (p_proc_object->is_already_attached)
356 if (p_proc_object->ntfy_obj) {
357 ntfy_delete(p_proc_object->ntfy_obj);
358 kfree(p_proc_object->ntfy_obj);
361 kfree(p_proc_object);
364 *ph_processor = (void *)p_proc_object;
365 pr_ctxt->hprocessor = *ph_processor;
366 (void)proc_notify_clients(p_proc_object,
367 DSP_PROCESSORATTACH);
370 /* Don't leak memory if status is failed */
371 kfree(p_proc_object);
374 DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
375 (!status && p_proc_object) ||
376 (status == 0 && p_proc_object));
381 static int get_exec_file(struct cfg_devnode *dev_node_obj,
382 struct dev_object *hdev_obj,
383 u32 size, char *exec_file)
387 struct drv_data *drv_datap = dev_get_drvdata(bridge);
389 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
394 if (dev_type == DSP_UNIT) {
395 if (!drv_datap || !drv_datap->base_img)
398 if (strlen(drv_datap->base_img) > size)
401 strcpy(exec_file, drv_datap->base_img);
402 } else if (dev_type == IVA_UNIT && iva_img) {
403 len = strlen(iva_img);
404 strncpy(exec_file, iva_img, len + 1);
413 * ======== proc_auto_start ======== =
415 * A Particular device gets loaded with the default image
416 * if the AutoStart flag is set.
418 * hdev_obj: Handle to the Device
420 * 0: On Successful Loading
421 * -EPERM General Failure
426 int proc_auto_start(struct cfg_devnode *dev_node_obj,
427 struct dev_object *hdev_obj)
430 struct proc_object *p_proc_object;
431 char sz_exec_file[MAXCMDLINELEN];
433 struct mgr_object *hmgr_obj = NULL;
434 struct drv_data *drv_datap = dev_get_drvdata(bridge);
437 DBC_REQUIRE(refs > 0);
438 DBC_REQUIRE(dev_node_obj != NULL);
439 DBC_REQUIRE(hdev_obj != NULL);
441 /* Create a Dummy PROC Object */
442 if (!drv_datap || !drv_datap->mgr_object) {
444 pr_err("%s: Failed to retrieve the object handle\n", __func__);
447 hmgr_obj = drv_datap->mgr_object;
450 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
451 if (p_proc_object == NULL) {
455 p_proc_object->hdev_obj = hdev_obj;
456 p_proc_object->hmgr_obj = hmgr_obj;
457 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
459 status = dev_get_bridge_context(hdev_obj,
460 &p_proc_object->hbridge_context);
464 /* Stop the Device, put it into standby mode */
465 status = proc_stop(p_proc_object);
470 /* Get the default executable for this board... */
471 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
472 p_proc_object->processor_id = dev_type;
473 status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
476 argv[0] = sz_exec_file;
478 /* ...and try to load it: */
479 status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
481 status = proc_start(p_proc_object);
483 kfree(p_proc_object->psz_last_coff);
484 p_proc_object->psz_last_coff = NULL;
486 kfree(p_proc_object);
492 * ======== proc_ctrl ========
494 * Pass control information to the GPP device driver managing the
497 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
498 * application developer's API.
499 * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
500 * Operation. arg can be null.
502 int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
505 struct proc_object *p_proc_object = hprocessor;
508 DBC_REQUIRE(refs > 0);
511 /* intercept PWR deep sleep command */
512 if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
513 timeout = arg->cb_data;
514 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
516 /* intercept PWR emergency sleep command */
517 else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
518 timeout = arg->cb_data;
519 status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
520 } else if (dw_cmd == PWR_DEEPSLEEP) {
521 /* timeout = arg->cb_data; */
522 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
524 /* intercept PWR wake commands */
525 else if (dw_cmd == BRDIOCTL_WAKEUP) {
526 timeout = arg->cb_data;
527 status = pwr_wake_dsp(timeout);
528 } else if (dw_cmd == PWR_WAKEUP) {
529 /* timeout = arg->cb_data; */
530 status = pwr_wake_dsp(timeout);
532 if (!((*p_proc_object->intf_fxns->pfn_dev_cntrl)
533 (p_proc_object->hbridge_context, dw_cmd,
547 * ======== proc_detach ========
549 * Destroys the Processor Object. Removes the notification from the Dev
552 int proc_detach(struct process_context *pr_ctxt)
555 struct proc_object *p_proc_object = NULL;
557 DBC_REQUIRE(refs > 0);
559 p_proc_object = (struct proc_object *)pr_ctxt->hprocessor;
562 /* Notify the Client */
563 ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
564 /* Remove the notification memory */
565 if (p_proc_object->ntfy_obj) {
566 ntfy_delete(p_proc_object->ntfy_obj);
567 kfree(p_proc_object->ntfy_obj);
570 kfree(p_proc_object->psz_last_coff);
571 p_proc_object->psz_last_coff = NULL;
572 /* Remove the Proc from the DEV List */
573 (void)dev_remove_proc_object(p_proc_object->hdev_obj,
574 (u32) p_proc_object);
575 /* Free the Processor Object */
576 kfree(p_proc_object);
577 pr_ctxt->hprocessor = NULL;
586 * ======== proc_enum_nodes ========
588 * Enumerate and get configuration information about nodes allocated
589 * on a DSP processor.
591 int proc_enum_nodes(void *hprocessor, void **node_tab,
592 u32 node_tab_size, u32 *pu_num_nodes,
596 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
597 struct node_mgr *hnode_mgr = NULL;
599 DBC_REQUIRE(refs > 0);
600 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
601 DBC_REQUIRE(pu_num_nodes != NULL);
602 DBC_REQUIRE(pu_allocated != NULL);
605 if (!(dev_get_node_manager(p_proc_object->hdev_obj,
608 status = node_enum_nodes(hnode_mgr, node_tab,
621 /* Cache operation against kernel address instead of users */
622 static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
623 ssize_t len, int pg_i)
626 unsigned long offset;
629 struct scatterlist *sg = map_obj->dma_info.sg;
632 page = get_mapping_page(map_obj, pg_i);
634 pr_err("%s: no page for %08lx\n", __func__, start);
637 } else if (IS_ERR(page)) {
638 pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
644 offset = start & ~PAGE_MASK;
645 rest = min_t(ssize_t, PAGE_SIZE - offset, len);
647 sg_set_page(&sg[i], page, rest, offset);
654 if (i != map_obj->dma_info.num_pages) {
655 pr_err("%s: bad number of sg iterations\n", __func__);
664 static int memory_regain_ownership(struct dmm_map_object *map_obj,
665 unsigned long start, ssize_t len, enum dma_data_direction dir)
668 unsigned long first_data_page = start >> PAGE_SHIFT;
669 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
670 /* calculating the number of pages this area spans */
671 unsigned long num_pages = last_data_page - first_data_page + 1;
672 struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
677 if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
678 pr_err("%s: dma info doesn't match given params\n", __func__);
682 dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
684 pr_debug("%s: dma_map_sg unmapped\n", __func__);
688 map_obj->dma_info.sg = NULL;
694 /* Cache operation against kernel address instead of users */
695 static int memory_give_ownership(struct dmm_map_object *map_obj,
696 unsigned long start, ssize_t len, enum dma_data_direction dir)
698 int pg_i, ret, sg_num;
699 struct scatterlist *sg;
700 unsigned long first_data_page = start >> PAGE_SHIFT;
701 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
702 /* calculating the number of pages this area spans */
703 unsigned long num_pages = last_data_page - first_data_page + 1;
705 pg_i = find_first_page_in_cache(map_obj, start);
707 pr_err("%s: failed to find first page in cache\n", __func__);
712 sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
714 pr_err("%s: kcalloc failed\n", __func__);
719 sg_init_table(sg, num_pages);
721 /* cleanup a previous sg allocation */
722 /* this may happen if application doesn't signal for e/o DMA */
723 kfree(map_obj->dma_info.sg);
725 map_obj->dma_info.sg = sg;
726 map_obj->dma_info.dir = dir;
727 map_obj->dma_info.num_pages = num_pages;
729 ret = build_dma_sg(map_obj, start, len, pg_i);
733 sg_num = dma_map_sg(bridge, sg, num_pages, dir);
735 pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
740 pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
741 map_obj->dma_info.sg_num = sg_num;
747 map_obj->dma_info.sg = NULL;
752 int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
753 enum dma_data_direction dir)
755 /* Keep STATUS here for future additions to this function */
757 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
758 struct dmm_map_object *map_obj;
760 DBC_REQUIRE(refs > 0);
767 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
771 /* find requested memory are in cached mapping information */
772 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
774 pr_err("%s: find_containing_mapping failed\n", __func__);
779 if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
780 pr_err("%s: InValid address parameters %p %x\n",
781 __func__, pmpu_addr, ul_size);
790 int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
791 enum dma_data_direction dir)
793 /* Keep STATUS here for future additions to this function */
795 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
796 struct dmm_map_object *map_obj;
798 DBC_REQUIRE(refs > 0);
805 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
809 /* find requested memory are in cached mapping information */
810 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
812 pr_err("%s: find_containing_mapping failed\n", __func__);
817 if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
818 pr_err("%s: InValid address parameters %p %x\n",
819 __func__, pmpu_addr, ul_size);
829 * ======== proc_flush_memory ========
833 int proc_flush_memory(void *hprocessor, void *pmpu_addr,
834 u32 ul_size, u32 ul_flags)
836 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
838 return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
842 * ======== proc_invalidate_memory ========
844 * Invalidates the memory specified
846 int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
848 enum dma_data_direction dir = DMA_FROM_DEVICE;
850 return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
854 * ======== proc_get_resource_info ========
856 * Enumerate the resources currently available on a processor.
858 int proc_get_resource_info(void *hprocessor, u32 resource_type,
859 struct dsp_resourceinfo *resource_info,
860 u32 resource_info_size)
863 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
864 struct node_mgr *hnode_mgr = NULL;
865 struct nldr_object *nldr_obj = NULL;
866 struct rmm_target_obj *rmm = NULL;
867 struct io_mgr *hio_mgr = NULL; /* IO manager handle */
869 DBC_REQUIRE(refs > 0);
870 DBC_REQUIRE(resource_info != NULL);
871 DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
873 if (!p_proc_object) {
877 switch (resource_type) {
878 case DSP_RESOURCE_DYNDARAM:
879 case DSP_RESOURCE_DYNSARAM:
880 case DSP_RESOURCE_DYNEXTERNAL:
881 case DSP_RESOURCE_DYNSRAM:
882 status = dev_get_node_manager(p_proc_object->hdev_obj,
889 status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
891 status = nldr_get_rmm_manager(nldr_obj, &rmm);
894 (enum dsp_memtype)resource_type,
895 (struct dsp_memstat *)
896 &(resource_info->result.
904 case DSP_RESOURCE_PROCLOAD:
905 status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
908 p_proc_object->intf_fxns->
909 pfn_io_get_proc_load(hio_mgr,
910 (struct dsp_procloadstat *)
911 &(resource_info->result.
925 * ======== proc_exit ========
927 * Decrement reference count, and free resources when reference count is
932 DBC_REQUIRE(refs > 0);
936 DBC_ENSURE(refs >= 0);
940 * ======== proc_get_dev_object ========
942 * Return the Dev Object handle for a given Processor.
945 int proc_get_dev_object(void *hprocessor,
946 struct dev_object **device_obj)
949 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
951 DBC_REQUIRE(refs > 0);
952 DBC_REQUIRE(device_obj != NULL);
955 *device_obj = p_proc_object->hdev_obj;
962 DBC_ENSURE((!status && *device_obj != NULL) ||
963 (status && *device_obj == NULL));
969 * ======== proc_get_state ========
971 * Report the state of the specified DSP processor.
973 int proc_get_state(void *hprocessor,
974 struct dsp_processorstate *proc_state_obj,
978 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
981 DBC_REQUIRE(refs > 0);
982 DBC_REQUIRE(proc_state_obj != NULL);
983 DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
986 /* First, retrieve BRD state information */
987 status = (*p_proc_object->intf_fxns->pfn_brd_status)
988 (p_proc_object->hbridge_context, &brd_status);
990 switch (brd_status) {
992 proc_state_obj->proc_state = PROC_STOPPED;
994 case BRD_SLEEP_TRANSITION:
995 case BRD_DSP_HIBERNATION:
998 proc_state_obj->proc_state = PROC_RUNNING;
1001 proc_state_obj->proc_state = PROC_LOADED;
1004 proc_state_obj->proc_state = PROC_ERROR;
1007 proc_state_obj->proc_state = 0xFF;
1015 dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
1016 __func__, status, proc_state_obj->proc_state);
1021 * ======== proc_get_trace ========
1023 * Retrieve the current contents of the trace buffer, located on the
1024 * Processor. Predefined symbols for the trace buffer must have been
1025 * configured into the DSP executable.
1027 * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
1028 * trace buffer, only. Treat it as an undocumented feature.
1029 * This call is destructive, meaning the processor is placed in the monitor
1030 * state as a result of this function.
1032 int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
1040 * ======== proc_init ========
1042 * Initialize PROC's private state, keeping a reference count on each call
1044 bool proc_init(void)
1048 DBC_REQUIRE(refs >= 0);
1053 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
1059 * ======== proc_load ========
1061 * Reset a processor and load a new base program image.
1062 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
1063 * application developer's API.
1065 int proc_load(void *hprocessor, const s32 argc_index,
1066 const char **user_args, const char **user_envp)
1069 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1070 struct io_mgr *hio_mgr; /* IO manager handle */
1071 struct msg_mgr *hmsg_mgr;
1072 struct cod_manager *cod_mgr; /* Code manager handle */
1073 char *pargv0; /* temp argv[0] ptr */
1074 char **new_envp; /* Updated envp[] array. */
1075 char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */
1076 s32 envp_elems; /* Num elements in envp[]. */
1077 s32 cnew_envp; /* " " in new_envp[] */
1078 s32 nproc_id = 0; /* Anticipate MP version. */
1079 struct dcd_manager *hdcd_handle;
1083 struct drv_data *drv_datap = dev_get_drvdata(bridge);
1085 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1090 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1091 struct dspbridge_platform_data *pdata =
1092 omap_dspbridge_dev->dev.platform_data;
1095 DBC_REQUIRE(refs > 0);
1096 DBC_REQUIRE(argc_index > 0);
1097 DBC_REQUIRE(user_args != NULL);
1099 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1100 do_gettimeofday(&tv1);
1102 if (!p_proc_object) {
1106 dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
1111 status = proc_stop(hprocessor);
1115 /* Place the board in the monitor state. */
1116 status = proc_monitor(hprocessor);
1120 /* Save ptr to original argv[0]. */
1121 pargv0 = (char *)user_args[0];
1122 /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1123 envp_elems = get_envp_count((char **)user_envp);
1124 cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
1125 new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
1127 status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
1130 dev_dbg(bridge, "%s: Proc ID string overflow\n",
1135 prepend_envp(new_envp, (char **)user_envp,
1136 envp_elems, cnew_envp, sz_proc_id);
1137 /* Get the DCD Handle */
1138 status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
1139 (u32 *) &hdcd_handle);
1141 /* Before proceeding with new load,
1142 * check if a previously registered COFF
1144 * If yes, unregister nodes in previously
1145 * registered COFF. If any error occurred,
1146 * set previously registered COFF to NULL. */
1147 if (p_proc_object->psz_last_coff != NULL) {
1149 dcd_auto_unregister(hdcd_handle,
1152 /* Regardless of auto unregister status,
1153 * free previously allocated
1155 kfree(p_proc_object->psz_last_coff);
1156 p_proc_object->psz_last_coff = NULL;
1159 /* On success, do cod_open_base() */
1160 status = cod_open_base(cod_mgr, (char *)user_args[0],
1167 /* Auto-register data base */
1168 /* Get the DCD Handle */
1169 status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
1170 (u32 *) &hdcd_handle);
1172 /* Auto register nodes in specified COFF
1173 * file. If registration did not fail,
1174 * (status = 0 or -EACCES)
1175 * save the name of the COFF file for
1176 * de-registration in the future. */
1178 dcd_auto_register(hdcd_handle,
1179 (char *)user_args[0]);
1180 if (status == -EACCES)
1186 DBC_ASSERT(p_proc_object->psz_last_coff ==
1188 /* Allocate memory for pszLastCoff */
1189 p_proc_object->psz_last_coff =
1190 kzalloc((strlen(user_args[0]) +
1192 /* If memory allocated, save COFF file name */
1193 if (p_proc_object->psz_last_coff) {
1194 strncpy(p_proc_object->psz_last_coff,
1195 (char *)user_args[0],
1196 (strlen((char *)user_args[0]) +
1202 /* Update shared memory address and size */
1204 /* Create the message manager. This must be done
1205 * before calling the IOOnLoaded function. */
1206 dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
1208 status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj,
1209 (msg_onexit) node_on_exit);
1210 DBC_ASSERT(!status);
1211 dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr);
1215 /* Set the Device object's message manager */
1216 status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
1218 status = (*p_proc_object->intf_fxns->pfn_io_on_loaded)
1224 /* Now, attempt to load an exec: */
1226 /* Boost the OPP level to Maximum level supported by baseport */
1227 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1228 if (pdata->cpu_set_freq)
1229 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
1231 status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1233 p_proc_object->hdev_obj, NULL);
1235 if (status == -EBADF) {
1236 dev_dbg(bridge, "%s: Failure to Load the EXE\n",
1239 if (status == -ESPIPE) {
1240 pr_err("%s: Couldn't parse the file\n",
1244 /* Requesting the lowest opp supported */
1245 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1246 if (pdata->cpu_set_freq)
1247 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1252 /* Update the Processor status to loaded */
1253 status = (*p_proc_object->intf_fxns->pfn_brd_set_state)
1254 (p_proc_object->hbridge_context, BRD_LOADED);
1256 p_proc_object->proc_state = PROC_LOADED;
1257 if (p_proc_object->ntfy_obj)
1258 proc_notify_clients(p_proc_object,
1259 DSP_PROCESSORSTATECHANGE);
1263 status = proc_get_processor_id(hprocessor, &proc_id);
1264 if (proc_id == DSP_UNIT) {
1265 /* Use all available DSP address space after EXTMEM
1268 status = cod_get_sym_value(cod_mgr, EXTEND,
1272 /* Restore the original argv[0] */
1274 user_args[0] = pargv0;
1276 if (!((*p_proc_object->intf_fxns->pfn_brd_status)
1277 (p_proc_object->hbridge_context, &brd_state))) {
1278 pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1279 kfree(drv_datap->base_img);
1280 drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
1282 if (drv_datap->base_img)
1283 strncpy(drv_datap->base_img, pargv0,
1284 strlen(pargv0) + 1);
1287 DBC_ASSERT(brd_state == BRD_LOADED);
1293 pr_err("%s: Processor failed to load\n", __func__);
1294 proc_stop(p_proc_object);
1297 && p_proc_object->proc_state == PROC_LOADED)
1299 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1300 do_gettimeofday(&tv2);
1301 if (tv2.tv_usec < tv1.tv_usec) {
1302 tv2.tv_usec += 1000000;
1305 dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
1306 tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
1312 * ======== proc_map ========
1314 * Maps a MPU buffer to DSP address space.
1316 int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1317 void *req_addr, void **pp_map_addr, u32 ul_map_attr,
1318 struct process_context *pr_ctxt)
1324 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1325 struct dmm_map_object *map_obj;
1327 #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1328 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
1329 if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
1330 !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
1331 pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
1332 (u32)pmpu_addr, ul_size);
1338 /* Calculate the page-aligned PA, VA and size */
1339 va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
1340 pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
1341 size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
1344 if (!p_proc_object) {
1348 /* Critical section */
1349 mutex_lock(&proc_lock);
1351 /* Add mapping to the page tables. */
1353 /* mapped memory resource tracking */
1354 map_obj = add_mapping_info(pr_ctxt, pa_align, va_align,
1359 va_align = user_to_dsp_map(
1360 p_proc_object->hbridge_context->dsp_mmu,
1361 pa_align, va_align, size_align,
1363 if (IS_ERR_VALUE(va_align))
1364 status = (int)va_align;
1368 /* Mapped address = MSB of VA | LSB of PA */
1369 map_obj->dsp_addr = (va_align |
1370 ((u32)pmpu_addr & (PG_SIZE4K - 1)));
1371 *pp_map_addr = (void *)map_obj->dsp_addr;
1373 remove_mapping_information(pr_ctxt, va_align);
1375 mutex_unlock(&proc_lock);
1381 dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1382 "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1383 "pa_align %x, size_align %x status 0x%x\n", __func__,
1384 hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
1385 pp_map_addr, va_align, pa_align, size_align, status);
1391 * ======== proc_register_notify ========
1393 * Register to be notified of specific processor events.
1395 int proc_register_notify(void *hprocessor, u32 event_mask,
1396 u32 notify_type, struct dsp_notification
1400 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1401 struct deh_mgr *hdeh_mgr;
1403 DBC_REQUIRE(hnotification != NULL);
1404 DBC_REQUIRE(refs > 0);
1406 /* Check processor handle */
1407 if (!p_proc_object) {
1411 /* Check if event mask is a valid processor related event */
1412 if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
1413 DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
1414 DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
1418 /* Check if notify type is valid */
1419 if (notify_type != DSP_SIGNALEVENT)
1423 /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1424 * or DSP_PWRERROR then register event immediately. */
1426 ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
1428 status = ntfy_register(p_proc_object->ntfy_obj,
1429 hnotification, event_mask,
1431 /* Special case alert, special case alert!
1432 * If we're trying to *deregister* (i.e. event_mask
1433 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1434 * we have to deregister with the DEH manager.
1435 * There's no way to know, based on event_mask which
1436 * manager the notification event was registered with,
1437 * so if we're trying to deregister and ntfy_register
1438 * failed, we'll give the deh manager a shot.
1440 if ((event_mask == 0) && status) {
1442 dev_get_deh_mgr(p_proc_object->hdev_obj,
1445 bridge_deh_register_notify(hdeh_mgr,
1451 status = dev_get_deh_mgr(p_proc_object->hdev_obj,
1454 bridge_deh_register_notify(hdeh_mgr,
1466 * ======== proc_start ========
1468 * Start a processor running.
1470 int proc_start(void *hprocessor)
1473 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1474 struct cod_manager *cod_mgr; /* Code manager handle */
1475 u32 dw_dsp_addr; /* Loaded code's entry point. */
1478 DBC_REQUIRE(refs > 0);
1479 if (!p_proc_object) {
1483 /* Call the bridge_brd_start */
1484 if (p_proc_object->proc_state != PROC_LOADED) {
1488 status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
1494 status = cod_get_entry(cod_mgr, &dw_dsp_addr);
1498 status = (*p_proc_object->intf_fxns->pfn_brd_start)
1499 (p_proc_object->hbridge_context, dw_dsp_addr);
1503 /* Call dev_create2 */
1504 status = dev_create2(p_proc_object->hdev_obj);
1506 p_proc_object->proc_state = PROC_RUNNING;
1507 /* Deep sleep switces off the peripheral clocks.
1508 * we just put the DSP CPU in idle in the idle loop.
1509 * so there is no need to send a command to DSP */
1511 if (p_proc_object->ntfy_obj) {
1512 proc_notify_clients(p_proc_object,
1513 DSP_PROCESSORSTATECHANGE);
1516 /* Failed to Create Node Manager and DISP Object
1517 * Stop the Processor from running. Put it in STOPPED State */
1518 (void)(*p_proc_object->intf_fxns->
1519 pfn_brd_stop) (p_proc_object->hbridge_context);
1520 p_proc_object->proc_state = PROC_STOPPED;
1524 if (!((*p_proc_object->intf_fxns->pfn_brd_status)
1525 (p_proc_object->hbridge_context, &brd_state))) {
1526 pr_info("%s: dsp in running state\n", __func__);
1527 DBC_ASSERT(brd_state != BRD_HIBERNATION);
1530 pr_err("%s: Failed to start the dsp\n", __func__);
1531 proc_stop(p_proc_object);
1535 DBC_ENSURE((!status && p_proc_object->proc_state ==
1536 PROC_RUNNING) || status);
1541 * ======== proc_stop ========
1543 * Stop a processor running.
1545 int proc_stop(void *hprocessor)
1548 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1549 struct msg_mgr *hmsg_mgr;
1550 struct node_mgr *hnode_mgr;
1552 u32 node_tab_size = 1;
1554 u32 nodes_allocated = 0;
1557 DBC_REQUIRE(refs > 0);
1558 if (!p_proc_object) {
1562 /* check if there are any running nodes */
1563 status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr);
1564 if (!status && hnode_mgr) {
1565 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1566 &num_nodes, &nodes_allocated);
1567 if ((status == -EINVAL) || (nodes_allocated > 0)) {
1568 pr_err("%s: Can't stop device, active nodes = %d \n",
1569 __func__, nodes_allocated);
1573 /* Call the bridge_brd_stop */
1574 /* It is OK to stop a device that does n't have nodes OR not started */
1576 (*p_proc_object->intf_fxns->
1577 pfn_brd_stop) (p_proc_object->hbridge_context);
1579 dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1580 p_proc_object->proc_state = PROC_STOPPED;
1581 /* Destory the Node Manager, msg_ctrl Manager */
1582 if (!(dev_destroy2(p_proc_object->hdev_obj))) {
1583 /* Destroy the msg_ctrl by calling msg_delete */
1584 dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
1586 msg_delete(hmsg_mgr);
1587 dev_set_msg_mgr(p_proc_object->hdev_obj, NULL);
1589 if (!((*p_proc_object->
1590 intf_fxns->pfn_brd_status) (p_proc_object->
1593 DBC_ASSERT(brd_state == BRD_STOPPED);
1596 pr_err("%s: Failed to stop the processor\n", __func__);
1604 * ======== proc_un_map ========
1606 * Removes a MPU buffer mapping from the DSP address space.
1608 int proc_un_map(void *hprocessor, void *map_addr,
1609 struct process_context *pr_ctxt)
1612 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1615 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1616 if (!p_proc_object) {
1621 /* Critical section */
1622 mutex_lock(&proc_lock);
1623 /* Remove mapping from the page tables. */
1624 status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu,
1627 mutex_unlock(&proc_lock);
1632 * A successful unmap should be followed by removal of map_obj
1633 * from dmm_map_list, so that mapped memory resource tracking
1636 remove_mapping_information(pr_ctxt, (u32) map_addr);
1639 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1640 __func__, hprocessor, map_addr, status);
1645 * ======== = proc_monitor ======== ==
1647 * Place the Processor in Monitor State. This is an internal
1648 * function and a requirement before Processor is loaded.
1649 * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1650 * In dev_destroy2 we delete the node manager.
1652 * p_proc_object: Pointer to Processor Object
1654 * 0: Processor placed in monitor mode.
1655 * !0: Failed to place processor in monitor mode.
1657 * Valid Processor Handle
1659 * Success: ProcObject state is PROC_IDLE
1661 static int proc_monitor(struct proc_object *proc_obj)
1663 int status = -EPERM;
1664 struct msg_mgr *hmsg_mgr;
1667 DBC_REQUIRE(refs > 0);
1668 DBC_REQUIRE(proc_obj);
1670 /* This is needed only when Device is loaded when it is
1671 * already 'ACTIVE' */
1672 /* Destory the Node Manager, msg_ctrl Manager */
1673 if (!dev_destroy2(proc_obj->hdev_obj)) {
1674 /* Destroy the msg_ctrl by calling msg_delete */
1675 dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr);
1677 msg_delete(hmsg_mgr);
1678 dev_set_msg_mgr(proc_obj->hdev_obj, NULL);
1681 /* Place the Board in the Monitor State */
1682 if (!((*proc_obj->intf_fxns->pfn_brd_monitor)
1683 (proc_obj->hbridge_context))) {
1685 if (!((*proc_obj->intf_fxns->pfn_brd_status)
1686 (proc_obj->hbridge_context, &brd_state)))
1687 DBC_ASSERT(brd_state == BRD_IDLE);
1690 DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
1696 * ======== get_envp_count ========
1698 * Return the number of elements in the envp array, including the
1699 * terminating NULL element.
1701 static s32 get_envp_count(char **envp)
1708 ret += 1; /* Include the terminating NULL in the count. */
1715 * ======== prepend_envp ========
1717 * Prepend an environment variable=value pair to the new envp array, and
1718 * copy in the existing var=value pairs in the old envp array.
1720 static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
1721 s32 cnew_envp, char *sz_var)
1723 char **pp_envp = new_envp;
1725 DBC_REQUIRE(new_envp);
1727 /* Prepend new environ var=value string */
1728 *new_envp++ = sz_var;
1730 /* Copy user's environment into our own. */
1731 while (envp_elems--)
1732 *new_envp++ = *envp++;
1734 /* Ensure NULL terminates the new environment strings array. */
1735 if (envp_elems == 0)
1742 * ======== proc_notify_clients ========
1744 * Notify the processor the events.
1746 int proc_notify_clients(void *proc, u32 events)
1749 struct proc_object *p_proc_object = (struct proc_object *)proc;
1751 DBC_REQUIRE(p_proc_object);
1752 DBC_REQUIRE(is_valid_proc_event(events));
1753 DBC_REQUIRE(refs > 0);
1754 if (!p_proc_object) {
1759 ntfy_notify(p_proc_object->ntfy_obj, events);
1765 * ======== proc_notify_all_clients ========
1767 * Notify the processor the events. This includes notifying all clients
1768 * attached to a particulat DSP.
1770 int proc_notify_all_clients(void *proc, u32 events)
1773 struct proc_object *p_proc_object = (struct proc_object *)proc;
1775 DBC_REQUIRE(is_valid_proc_event(events));
1776 DBC_REQUIRE(refs > 0);
1778 if (!p_proc_object) {
1783 dev_notify_clients(p_proc_object->hdev_obj, events);
1790 * ======== proc_get_processor_id ========
1792 * Retrieves the processor ID.
1794 int proc_get_processor_id(void *proc, u32 * proc_id)
1797 struct proc_object *p_proc_object = (struct proc_object *)proc;
1800 *proc_id = p_proc_object->processor_id;