4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
21 #include <dspbridge/host_os.h>
23 #include <dspbridge/dbdefs.h>
25 #include <dspbridge/dbc.h>
27 /* Platform manager */
28 #include <dspbridge/cod.h>
29 #include <dspbridge/dev.h>
31 /* Resource manager */
32 #include <dspbridge/dbll.h>
33 #include <dspbridge/dbdcd.h>
34 #include <dspbridge/rmm.h>
35 #include <dspbridge/uuidutil.h>
37 #include <dspbridge/nldr.h>
38 #include <linux/gcd.h>
40 /* Name of section containing dynamic load mem */
41 #define DYNMEMSECT ".dspbridge_mem"
43 /* Name of section containing dependent library information */
44 #define DEPLIBSECT ".dspbridge_deplibs"
46 /* Max depth of recursion for loading node's dependent libraries */
49 /* Max number of persistent libraries kept by a node */
53 * Defines for extracting packed dynamic load memory requirements from two
55 * These defines must match node.cdb and dynm.cdb
56 * Format of data/code mask is:
57 * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
60 * cccccc = prefered/required dynamic mem segid for create phase data/code
61 * dddddd = prefered/required dynamic mem segid for delete phase data/code
62 * eeeeee = prefered/req. dynamic mem segid for execute phase data/code
63 * f = flag indicating if memory is preferred or required:
64 * f = 1 if required, f = 0 if preferred.
66 * The 6 bits of the segid are interpreted as follows:
68 * If the 6th bit (bit 5) is not set, then this specifies a memory segment
69 * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
70 * If the 6th bit (bit 5) is set, segid has the following interpretation:
71 * segid = 32 - Any internal memory segment can be used.
72 * segid = 33 - Any external memory segment can be used.
73 * segid = 63 - Any memory segment can be used (in this case the
74 * required/preferred flag is irrelevant).
77 /* Maximum allowed dynamic loading memory segments */
80 #define MAXSEGID 3 /* Largest possible (real) segid */
81 #define MEMINTERNALID 32 /* Segid meaning use internal mem */
82 #define MEMEXTERNALID 33 /* Segid meaning use external mem */
83 #define NULLID 63 /* Segid meaning no memory req/pref */
84 #define FLAGBIT 7 /* 7th bit is pref./req. flag */
85 #define SEGMASK 0x3f /* Bits 0 - 5 */
87 #define CREATEBIT 0 /* Create segid starts at bit 0 */
88 #define DELETEBIT 8 /* Delete segid starts at bit 8 */
89 #define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
92 * Masks that define memory type. Must match defines in dynm.cdb.
96 #define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
97 #define DYNM_INTERNAL 0x8
98 #define DYNM_EXTERNAL 0x10
101 * Defines for packing memory requirement/preference flags for code and
102 * data of each of the node's phases into one mask.
103 * The bit is set if the segid is required for loading code/data of the
104 * given phase. The bit is not set, if the segid is preferred only.
106 * These defines are also used as indeces into a segid array for the node.
107 * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
108 * create phase data is required or preferred to be loaded into.
110 #define CREATEDATAFLAGBIT 0
111 #define CREATECODEFLAGBIT 1
112 #define EXECUTEDATAFLAGBIT 2
113 #define EXECUTECODEFLAGBIT 3
114 #define DELETEDATAFLAGBIT 4
115 #define DELETECODEFLAGBIT 5
119 * These names may be embedded in overlay sections to identify which
120 * node phase the section should be overlayed.
122 #define PCREATE "create"
123 #define PDELETE "delete"
124 #define PEXECUTE "execute"
126 static inline bool is_equal_uuid(struct dsp_uuid *uuid1,
127 struct dsp_uuid *uuid2)
129 return !memcmp(uuid1, uuid2, sizeof(struct dsp_uuid));
133 * ======== mem_seg_info ========
134 * Format of dynamic loading memory segment info in coff file.
135 * Must match dynm.h55.
137 struct mem_seg_info {
138 u32 segid; /* Dynamic loading memory segment number */
141 u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
145 * ======== lib_node ========
146 * For maintaining a tree of library dependencies.
149 struct dbll_library_obj *lib; /* The library */
150 u16 dep_libs; /* Number of dependent libraries */
151 struct lib_node *dep_libs_tree; /* Dependent libraries of lib */
155 * ======== ovly_sect ========
156 * Information needed to overlay a section.
159 struct ovly_sect *next_sect;
160 u32 sect_load_addr; /* Load address of section */
161 u32 sect_run_addr; /* Run address of section */
162 u32 size; /* Size of section */
163 u16 page; /* DBL_CODE, DBL_DATA */
167 * ======== ovly_node ========
168 * For maintaining a list of overlay nodes, with sections that need to be
169 * overlayed for each of the nodes phases.
172 struct dsp_uuid uuid;
174 struct ovly_sect *create_sects_list;
175 struct ovly_sect *delete_sects_list;
176 struct ovly_sect *execute_sects_list;
177 struct ovly_sect *other_sects_list;
189 * ======== nldr_object ========
190 * Overlay loader object.
193 struct dev_object *hdev_obj; /* Device object */
194 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
195 struct dbll_tar_obj *dbll; /* The DBL loader */
196 struct dbll_library_obj *base_lib; /* Base image library */
197 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
198 struct dbll_fxns ldr_fxns; /* Loader function table */
199 struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */
200 nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */
201 nldr_writefxn write_fxn; /* "write" for dynamic nodes */
202 struct ovly_node *ovly_table; /* Table of overlay nodes */
203 u16 ovly_nodes; /* Number of overlay nodes in base */
204 u16 ovly_nid; /* Index for tracking overlay nodes */
205 u16 dload_segs; /* Number of dynamic load mem segs */
206 u32 *seg_table; /* memtypes of dynamic memory segs
209 u16 us_dsp_mau_size; /* Size of DSP MAU */
210 u16 us_dsp_word_size; /* Size of DSP word */
214 * ======== nldr_nodeobject ========
215 * Dynamic node object. This object is created when a node is allocated.
217 struct nldr_nodeobject {
218 struct nldr_object *nldr_obj; /* Dynamic loader handle */
219 void *priv_ref; /* Handle to pass to dbl_write_fxn */
220 struct dsp_uuid uuid; /* Node's UUID */
221 bool dynamic; /* Dynamically loaded node? */
222 bool overlay; /* Overlay node? */
223 bool *pf_phase_split; /* Multiple phase libraries? */
224 struct lib_node root; /* Library containing node phase */
225 struct lib_node create_lib; /* Library with create phase lib */
226 struct lib_node execute_lib; /* Library with execute phase lib */
227 struct lib_node delete_lib; /* Library with delete phase lib */
228 /* libs remain loaded until Delete */
229 struct lib_node pers_lib_table[MAXLIBS];
230 s32 pers_libs; /* Number of persistent libraries */
231 /* Path in lib dependency tree */
232 struct dbll_library_obj *lib_path[MAXDEPTH + 1];
233 enum nldr_phase phase; /* Node phase currently being loaded */
236 * Dynamic loading memory segments for data and code of each phase.
238 u16 seg_id[MAXFLAGS];
241 * Mask indicating whether each mem segment specified in seg_id[]
242 * is preferred or required.
244 * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
245 * then it is required to load execute phase data into the memory
246 * specified by seg_id[EXECUTEDATAFLAGBIT].
248 u32 code_data_flag_mask;
251 /* Dynamic loader function table */
252 static struct dbll_fxns ldr_fxns = {
253 (dbll_close_fxn) dbll_close,
254 (dbll_create_fxn) dbll_create,
255 (dbll_delete_fxn) dbll_delete,
256 (dbll_exit_fxn) dbll_exit,
257 (dbll_get_attrs_fxn) dbll_get_attrs,
258 (dbll_get_addr_fxn) dbll_get_addr,
259 (dbll_get_c_addr_fxn) dbll_get_c_addr,
260 (dbll_get_sect_fxn) dbll_get_sect,
261 (dbll_init_fxn) dbll_init,
262 (dbll_load_fxn) dbll_load,
263 (dbll_load_sect_fxn) dbll_load_sect,
264 (dbll_open_fxn) dbll_open,
265 (dbll_read_sect_fxn) dbll_read_sect,
266 (dbll_set_attrs_fxn) dbll_set_attrs,
267 (dbll_unload_fxn) dbll_unload,
268 (dbll_unload_sect_fxn) dbll_unload_sect,
271 static u32 refs; /* module reference count */
273 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
274 u32 addr, u32 bytes);
275 static int add_ovly_node(struct dsp_uuid *uuid_obj,
276 enum dsp_dcdobjtype obj_type, void *handle);
277 static int add_ovly_sect(struct nldr_object *nldr_obj,
278 struct ovly_sect **lst,
279 struct dbll_sect_info *sect_inf,
280 bool *exists, u32 addr, u32 bytes);
281 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
283 static void free_sects(struct nldr_object *nldr_obj,
284 struct ovly_sect *phase_sects, u16 alloc_num);
285 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
286 char *sym_name, struct dbll_sym_val **sym);
287 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
288 struct lib_node *root, struct dsp_uuid uuid,
290 struct dbll_library_obj **lib_path,
291 enum nldr_phase phase, u16 depth);
292 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
293 enum nldr_phase phase);
294 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
295 u32 align, u32 *dsp_address,
297 s32 req, bool reserve);
298 static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
301 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
302 struct lib_node *root);
303 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
304 enum nldr_phase phase);
305 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
306 struct dbll_library_obj *lib);
307 static u32 find_lcm(u32 a, u32 b);
310 * ======== nldr_allocate ========
312 int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
313 const struct dcd_nodeprops *node_props,
314 struct nldr_nodeobject **nldr_nodeobj,
315 bool *pf_phase_split)
317 struct nldr_nodeobject *nldr_node_obj = NULL;
320 DBC_REQUIRE(refs > 0);
321 DBC_REQUIRE(node_props != NULL);
322 DBC_REQUIRE(nldr_nodeobj != NULL);
323 DBC_REQUIRE(nldr_obj);
325 /* Initialize handle in case of failure */
326 *nldr_nodeobj = NULL;
327 /* Allocate node object */
328 nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
330 if (nldr_node_obj == NULL) {
333 nldr_node_obj->pf_phase_split = pf_phase_split;
334 nldr_node_obj->pers_libs = 0;
335 nldr_node_obj->nldr_obj = nldr_obj;
336 nldr_node_obj->priv_ref = priv_ref;
337 /* Save node's UUID. */
338 nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
340 * Determine if node is a dynamically loaded node from
343 if (node_props->us_load_type == NLDR_DYNAMICLOAD) {
345 nldr_node_obj->dynamic = true;
347 * Extract memory requirements from ndb_props masks
350 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
351 (node_props->ul_data_mem_seg_mask >> CREATEBIT) &
353 nldr_node_obj->code_data_flag_mask |=
354 ((node_props->ul_data_mem_seg_mask >>
355 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
356 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
357 (node_props->ul_code_mem_seg_mask >>
358 CREATEBIT) & SEGMASK;
359 nldr_node_obj->code_data_flag_mask |=
360 ((node_props->ul_code_mem_seg_mask >>
361 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
363 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
364 (node_props->ul_data_mem_seg_mask >>
365 EXECUTEBIT) & SEGMASK;
366 nldr_node_obj->code_data_flag_mask |=
367 ((node_props->ul_data_mem_seg_mask >>
368 (EXECUTEBIT + FLAGBIT)) & 1) <<
370 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
371 (node_props->ul_code_mem_seg_mask >>
372 EXECUTEBIT) & SEGMASK;
373 nldr_node_obj->code_data_flag_mask |=
374 ((node_props->ul_code_mem_seg_mask >>
375 (EXECUTEBIT + FLAGBIT)) & 1) <<
378 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
379 (node_props->ul_data_mem_seg_mask >> DELETEBIT) &
381 nldr_node_obj->code_data_flag_mask |=
382 ((node_props->ul_data_mem_seg_mask >>
383 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
384 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
385 (node_props->ul_code_mem_seg_mask >>
386 DELETEBIT) & SEGMASK;
387 nldr_node_obj->code_data_flag_mask |=
388 ((node_props->ul_code_mem_seg_mask >>
389 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
391 /* Non-dynamically loaded nodes are part of the
393 nldr_node_obj->root.lib = nldr_obj->base_lib;
394 /* Check for overlay node */
395 if (node_props->us_load_type == NLDR_OVLYLOAD)
396 nldr_node_obj->overlay = true;
399 *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
401 /* Cleanup on failure */
402 if (status && nldr_node_obj)
403 kfree(nldr_node_obj);
405 DBC_ENSURE((!status && *nldr_nodeobj)
406 || (status && *nldr_nodeobj == NULL));
411 * ======== nldr_create ========
413 int nldr_create(struct nldr_object **nldr,
414 struct dev_object *hdev_obj,
415 const struct nldr_attrs *pattrs)
417 struct cod_manager *cod_mgr; /* COD manager */
418 char *psz_coff_buf = NULL;
419 char sz_zl_file[COD_MAXPATHLENGTH];
420 struct nldr_object *nldr_obj = NULL;
421 struct dbll_attrs save_attrs;
422 struct dbll_attrs new_attrs;
426 struct mem_seg_info *mem_info_obj;
429 struct rmm_segment *rmm_segs = NULL;
432 DBC_REQUIRE(refs > 0);
433 DBC_REQUIRE(nldr != NULL);
434 DBC_REQUIRE(hdev_obj != NULL);
435 DBC_REQUIRE(pattrs != NULL);
436 DBC_REQUIRE(pattrs->pfn_ovly != NULL);
437 DBC_REQUIRE(pattrs->pfn_write != NULL);
439 /* Allocate dynamic loader object */
440 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
442 nldr_obj->hdev_obj = hdev_obj;
443 /* warning, lazy status checking alert! */
444 dev_get_cod_mgr(hdev_obj, &cod_mgr);
446 status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
448 status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
451 cod_get_base_name(cod_mgr, sz_zl_file,
456 /* end lazy status checking */
457 nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
458 nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
459 nldr_obj->ldr_fxns = ldr_fxns;
460 if (!(nldr_obj->ldr_fxns.init_fxn()))
466 /* Create the DCD Manager */
468 status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
470 /* Get dynamic loading memory sections from base lib */
473 nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
474 DYNMEMSECT, &ul_addr,
478 kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
483 /* Ok to not have dynamic loading memory */
486 dev_dbg(bridge, "%s: failed - no dynamic loading mem "
487 "segments: 0x%x\n", __func__, status);
490 if (!status && ul_len > 0) {
491 /* Read section containing dynamic load mem segments */
493 nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
494 DYNMEMSECT, psz_coff_buf,
497 if (!status && ul_len > 0) {
498 /* Parse memory segment data */
499 dload_segs = (u16) (*((u32 *) psz_coff_buf));
500 if (dload_segs > MAXMEMSEGS)
503 /* Parse dynamic load memory segments */
504 if (!status && dload_segs > 0) {
505 rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
507 nldr_obj->seg_table =
508 kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
509 if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
512 nldr_obj->dload_segs = dload_segs;
513 mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
515 for (i = 0; i < dload_segs; i++) {
516 rmm_segs[i].base = (mem_info_obj + i)->base;
517 rmm_segs[i].length = (mem_info_obj + i)->len;
518 rmm_segs[i].space = 0;
519 nldr_obj->seg_table[i] =
520 (mem_info_obj + i)->type;
522 "(proc) DLL MEMSEGMENT: %d, "
523 "Base: 0x%x, Length: 0x%x\n", i,
524 rmm_segs[i].base, rmm_segs[i].length);
528 /* Create Remote memory manager */
530 status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
533 /* set the alloc, free, write functions for loader */
534 nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
535 new_attrs = save_attrs;
536 new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
537 new_attrs.free = (dbll_free_fxn) remote_free;
538 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
539 new_attrs.sym_handle = nldr_obj;
540 new_attrs.write = (dbll_write_fxn) pattrs->pfn_write;
541 nldr_obj->ovly_fxn = pattrs->pfn_ovly;
542 nldr_obj->write_fxn = pattrs->pfn_write;
543 nldr_obj->ldr_attrs = new_attrs;
549 /* Get overlay nodes */
552 cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
555 /* First count number of overlay nodes */
557 dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
558 add_ovly_node, (void *)nldr_obj);
559 /* Now build table of overlay nodes */
560 if (!status && nldr_obj->ovly_nodes > 0) {
561 /* Allocate table for overlay nodes */
562 nldr_obj->ovly_table =
563 kzalloc(sizeof(struct ovly_node) *
564 nldr_obj->ovly_nodes, GFP_KERNEL);
565 /* Put overlay nodes in the table */
566 nldr_obj->ovly_nid = 0;
567 status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
572 /* Do a fake reload of the base image to get overlay section info */
573 if (!status && nldr_obj->ovly_nodes > 0) {
574 save_attrs.write = fake_ovly_write;
575 save_attrs.log_write = add_ovly_info;
576 save_attrs.log_write_handle = nldr_obj;
577 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
578 status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
579 &save_attrs, &ul_entry);
582 *nldr = (struct nldr_object *)nldr_obj;
585 nldr_delete((struct nldr_object *)nldr_obj);
589 /* FIXME:Temp. Fix. Must be removed */
590 DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
595 * ======== nldr_delete ========
597 void nldr_delete(struct nldr_object *nldr_obj)
599 struct ovly_sect *ovly_section;
600 struct ovly_sect *next;
602 DBC_REQUIRE(refs > 0);
603 DBC_REQUIRE(nldr_obj);
605 nldr_obj->ldr_fxns.exit_fxn();
607 rmm_delete(nldr_obj->rmm);
609 kfree(nldr_obj->seg_table);
611 if (nldr_obj->hdcd_mgr)
612 dcd_destroy_manager(nldr_obj->hdcd_mgr);
614 /* Free overlay node information */
615 if (nldr_obj->ovly_table) {
616 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
618 nldr_obj->ovly_table[i].create_sects_list;
619 while (ovly_section) {
620 next = ovly_section->next_sect;
625 nldr_obj->ovly_table[i].delete_sects_list;
626 while (ovly_section) {
627 next = ovly_section->next_sect;
632 nldr_obj->ovly_table[i].execute_sects_list;
633 while (ovly_section) {
634 next = ovly_section->next_sect;
638 ovly_section = nldr_obj->ovly_table[i].other_sects_list;
639 while (ovly_section) {
640 next = ovly_section->next_sect;
645 kfree(nldr_obj->ovly_table);
651 * ======== nldr_exit ========
652 * Discontinue usage of NLDR module.
656 DBC_REQUIRE(refs > 0);
663 DBC_ENSURE(refs >= 0);
667 * ======== nldr_get_fxn_addr ========
669 int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
670 char *str_fxn, u32 * addr)
672 struct dbll_sym_val *dbll_sym;
673 struct nldr_object *nldr_obj;
675 bool status1 = false;
677 struct lib_node root = { NULL, 0, NULL };
678 DBC_REQUIRE(refs > 0);
679 DBC_REQUIRE(nldr_node_obj);
680 DBC_REQUIRE(addr != NULL);
681 DBC_REQUIRE(str_fxn != NULL);
683 nldr_obj = nldr_node_obj->nldr_obj;
684 /* Called from node_create(), node_delete(), or node_run(). */
685 if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) {
686 switch (nldr_node_obj->phase) {
688 root = nldr_node_obj->create_lib;
691 root = nldr_node_obj->execute_lib;
694 root = nldr_node_obj->delete_lib;
701 /* for Overlay nodes or non-split Dynamic nodes */
702 root = nldr_node_obj->root;
705 nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
708 nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
711 /* If symbol not found, check dependent libraries */
713 for (i = 0; i < root.dep_libs; i++) {
715 nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
721 get_c_addr_fxn(root.dep_libs_tree[i].lib,
730 /* Check persistent libraries */
732 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
735 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
740 get_c_addr_fxn(nldr_node_obj->pers_lib_table
741 [i].lib, str_fxn, &dbll_sym);
751 *addr = dbll_sym->value;
759 * ======== nldr_get_rmm_manager ========
760 * Given a NLDR object, retrieve RMM Manager Handle
762 int nldr_get_rmm_manager(struct nldr_object *nldr,
763 struct rmm_target_obj **rmm_mgr)
766 struct nldr_object *nldr_obj = nldr;
767 DBC_REQUIRE(rmm_mgr != NULL);
770 *rmm_mgr = nldr_obj->rmm;
776 DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
782 * ======== nldr_init ========
783 * Initialize the NLDR module.
787 DBC_REQUIRE(refs >= 0);
794 DBC_ENSURE(refs > 0);
799 * ======== nldr_load ========
801 int nldr_load(struct nldr_nodeobject *nldr_node_obj,
802 enum nldr_phase phase)
804 struct nldr_object *nldr_obj;
805 struct dsp_uuid lib_uuid;
808 DBC_REQUIRE(refs > 0);
809 DBC_REQUIRE(nldr_node_obj);
811 nldr_obj = nldr_node_obj->nldr_obj;
813 if (nldr_node_obj->dynamic) {
814 nldr_node_obj->phase = phase;
816 lib_uuid = nldr_node_obj->uuid;
818 /* At this point, we may not know if node is split into
819 * different libraries. So we'll go ahead and load the
820 * library, and then save the pointer to the appropriate
821 * location after we know. */
824 load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
825 false, nldr_node_obj->lib_path, phase, 0);
828 if (*nldr_node_obj->pf_phase_split) {
831 nldr_node_obj->create_lib =
836 nldr_node_obj->execute_lib =
841 nldr_node_obj->delete_lib =
852 if (nldr_node_obj->overlay)
853 status = load_ovly(nldr_node_obj, phase);
861 * ======== nldr_unload ========
863 int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
864 enum nldr_phase phase)
867 struct lib_node *root_lib = NULL;
870 DBC_REQUIRE(refs > 0);
871 DBC_REQUIRE(nldr_node_obj);
873 if (nldr_node_obj != NULL) {
874 if (nldr_node_obj->dynamic) {
875 if (*nldr_node_obj->pf_phase_split) {
878 root_lib = &nldr_node_obj->create_lib;
881 root_lib = &nldr_node_obj->execute_lib;
884 root_lib = &nldr_node_obj->delete_lib;
885 /* Unload persistent libraries */
887 i < nldr_node_obj->pers_libs;
889 unload_lib(nldr_node_obj,
893 nldr_node_obj->pers_libs = 0;
900 /* Unload main library */
901 root_lib = &nldr_node_obj->root;
904 unload_lib(nldr_node_obj, root_lib);
906 if (nldr_node_obj->overlay)
907 unload_ovly(nldr_node_obj, phase);
915 * ======== add_ovly_info ========
917 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
921 char *sect_name = (char *)sect_info->name;
922 bool sect_exists = false;
926 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
929 /* Is this an overlay section (load address != run address)? */
930 if (sect_info->sect_load_addr == sect_info->sect_run_addr)
933 /* Find the node it belongs to */
934 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
935 node_name = nldr_obj->ovly_table[i].node_name;
936 DBC_REQUIRE(node_name);
937 if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
942 if (!(i < nldr_obj->ovly_nodes))
945 /* Determine which phase this section belongs to */
946 for (pch = sect_name + 1; *pch && *pch != seps; pch++)
950 pch++; /* Skip over the ':' */
951 if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
953 add_ovly_sect(nldr_obj,
955 ovly_table[i].create_sects_list,
956 sect_info, §_exists, addr, bytes);
957 if (!status && !sect_exists)
958 nldr_obj->ovly_table[i].create_sects++;
960 } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
962 add_ovly_sect(nldr_obj,
964 ovly_table[i].delete_sects_list,
965 sect_info, §_exists, addr, bytes);
966 if (!status && !sect_exists)
967 nldr_obj->ovly_table[i].delete_sects++;
969 } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
971 add_ovly_sect(nldr_obj,
973 ovly_table[i].execute_sects_list,
974 sect_info, §_exists, addr, bytes);
975 if (!status && !sect_exists)
976 nldr_obj->ovly_table[i].execute_sects++;
979 /* Put in "other" sectins */
981 add_ovly_sect(nldr_obj,
983 ovly_table[i].other_sects_list,
984 sect_info, §_exists, addr, bytes);
985 if (!status && !sect_exists)
986 nldr_obj->ovly_table[i].other_sects++;
995 * ======== add_ovly_node =========
996 * Callback function passed to dcd_get_objects.
998 static int add_ovly_node(struct dsp_uuid *uuid_obj,
999 enum dsp_dcdobjtype obj_type, void *handle)
1001 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1002 char *node_name = NULL;
1005 struct dcd_genericobj obj_def;
1008 if (obj_type != DSP_DCDNODETYPE)
1012 dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
1017 /* If overlay node, add to the list */
1018 if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) {
1019 if (nldr_obj->ovly_table == NULL) {
1020 nldr_obj->ovly_nodes++;
1022 /* Add node to table */
1023 nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
1025 DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
1028 strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
1029 node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
1030 pbuf = kzalloc(len + 1, GFP_KERNEL);
1034 strncpy(pbuf, node_name, len);
1035 nldr_obj->ovly_table[nldr_obj->ovly_nid].
1037 nldr_obj->ovly_nid++;
1041 /* These were allocated in dcd_get_object_def */
1042 kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn);
1044 kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn);
1046 kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn);
1048 kfree(obj_def.obj_data.node_obj.pstr_i_alg_name);
1055 * ======== add_ovly_sect ========
1057 static int add_ovly_sect(struct nldr_object *nldr_obj,
1058 struct ovly_sect **lst,
1059 struct dbll_sect_info *sect_inf,
1060 bool *exists, u32 addr, u32 bytes)
1062 struct ovly_sect *new_sect = NULL;
1063 struct ovly_sect *last_sect;
1064 struct ovly_sect *ovly_section;
1067 ovly_section = last_sect = *lst;
1069 while (ovly_section) {
1071 * Make sure section has not already been added. Multiple
1072 * 'write' calls may be made to load the section.
1074 if (ovly_section->sect_load_addr == addr) {
1079 last_sect = ovly_section;
1080 ovly_section = ovly_section->next_sect;
1083 if (!ovly_section) {
1085 new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
1086 if (new_sect == NULL) {
1089 new_sect->sect_load_addr = addr;
1090 new_sect->sect_run_addr = sect_inf->sect_run_addr +
1091 (addr - sect_inf->sect_load_addr);
1092 new_sect->size = bytes;
1093 new_sect->page = sect_inf->type;
1096 /* Add to the list */
1099 /* First in the list */
1102 last_sect->next_sect = new_sect;
1111 * ======== fake_ovly_write ========
1113 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
1120 * ======== free_sects ========
1122 static void free_sects(struct nldr_object *nldr_obj,
1123 struct ovly_sect *phase_sects, u16 alloc_num)
1125 struct ovly_sect *ovly_section = phase_sects;
1129 while (ovly_section && i < alloc_num) {
1131 /* segid - page not supported yet */
1132 /* Reserved memory */
1134 rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1135 ovly_section->size, true);
1137 ovly_section = ovly_section->next_sect;
1143 * ======== get_symbol_value ========
1144 * Find symbol in library's base image. If not there, check dependent
1147 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
1148 char *sym_name, struct dbll_sym_val **sym)
1150 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1151 struct nldr_nodeobject *nldr_node_obj =
1152 (struct nldr_nodeobject *)rmm_handle;
1153 struct lib_node *root = (struct lib_node *)parg;
1155 bool status = false;
1157 /* check the base image */
1158 status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
1162 nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
1166 * Check in root lib itself. If the library consists of
1167 * multiple object files linked together, some symbols in the
1168 * library may need to be resolved.
1171 status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
1175 nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
1181 * Check in root lib's dependent libraries, but not dependent
1182 * libraries' dependents.
1185 for (i = 0; i < root->dep_libs; i++) {
1187 nldr_obj->ldr_fxns.get_addr_fxn(root->
1194 get_c_addr_fxn(root->dep_libs_tree[i].lib,
1204 * Check in persistent libraries
1207 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1210 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
1213 status = nldr_obj->ldr_fxns.get_c_addr_fxn
1214 (nldr_node_obj->pers_lib_table[i].lib,
1228 * ======== load_lib ========
1229 * Recursively load library and all its dependent libraries. The library
1230 * we're loading is specified by a uuid.
1232 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1233 struct lib_node *root, struct dsp_uuid uuid,
1235 struct dbll_library_obj **lib_path,
1236 enum nldr_phase phase, u16 depth)
1238 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1239 u16 nd_libs = 0; /* Number of dependent libraries */
1240 u16 np_libs = 0; /* Number of persistent libraries */
1241 u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */
1244 u32 dw_buf_size = NLDR_MAXPATHLENGTH;
1245 dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
1246 struct dbll_attrs new_attrs;
1247 char *psz_file_name = NULL;
1248 struct dsp_uuid *dep_lib_uui_ds = NULL;
1249 bool *persistent_dep_libs = NULL;
1251 bool lib_status = false;
1252 struct lib_node *dep_lib;
1254 if (depth > MAXDEPTH) {
1259 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1260 psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
1261 if (psz_file_name == NULL)
1265 /* Get the name of the library */
1268 dcd_get_library_name(nldr_node_obj->nldr_obj->
1269 hdcd_mgr, &uuid, psz_file_name,
1270 &dw_buf_size, phase,
1271 nldr_node_obj->pf_phase_split);
1273 /* Dependent libraries are registered with a phase */
1275 dcd_get_library_name(nldr_node_obj->nldr_obj->
1276 hdcd_mgr, &uuid, psz_file_name,
1277 &dw_buf_size, NLDR_NOPHASE,
1282 /* Open the library, don't load symbols */
1284 nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
1285 DBLL_NOLOAD, &root->lib);
1287 /* Done with file name */
1288 kfree(psz_file_name);
1290 /* Check to see if library not already loaded */
1291 if (!status && root_prstnt) {
1293 find_in_persistent_lib_array(nldr_node_obj, root->lib);
1296 nldr_obj->ldr_fxns.close_fxn(root->lib);
1301 /* Check for circular dependencies. */
1302 for (i = 0; i < depth; i++) {
1303 if (root->lib == lib_path[i]) {
1304 /* This condition could be checked by a
1305 * tool at build time. */
1311 /* Add library to current path in dependency tree */
1312 lib_path[depth] = root->lib;
1314 /* Get number of dependent libraries */
1316 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
1317 &uuid, &nd_libs, &np_libs, phase);
1319 DBC_ASSERT(nd_libs >= np_libs);
1321 if (!(*nldr_node_obj->pf_phase_split))
1324 /* nd_libs = #of dependent libraries */
1325 root->dep_libs = nd_libs - np_libs;
1327 dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
1328 nd_libs, GFP_KERNEL);
1329 persistent_dep_libs =
1330 kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
1331 if (!dep_lib_uui_ds || !persistent_dep_libs)
1334 if (root->dep_libs > 0) {
1335 /* Allocate arrays for dependent lib UUIDs,
1337 root->dep_libs_tree = kzalloc
1338 (sizeof(struct lib_node) *
1339 (root->dep_libs), GFP_KERNEL);
1340 if (!(root->dep_libs_tree))
1346 /* Get the dependent library UUIDs */
1348 dcd_get_dep_libs(nldr_node_obj->
1349 nldr_obj->hdcd_mgr, &uuid,
1350 nd_libs, dep_lib_uui_ds,
1351 persistent_dep_libs,
1358 * Recursively load dependent libraries.
1361 for (i = 0; i < nd_libs; i++) {
1362 /* If root library is NOT persistent, and dep library
1363 * is, then record it. If root library IS persistent,
1364 * the deplib is already included */
1365 if (!root_prstnt && persistent_dep_libs[i] &&
1366 *nldr_node_obj->pf_phase_split) {
1367 if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1372 /* Allocate library outside of phase */
1374 &nldr_node_obj->pers_lib_table
1375 [nldr_node_obj->pers_libs];
1378 persistent_dep_libs[i] = true;
1380 /* Allocate library within phase */
1381 dep_lib = &root->dep_libs_tree[nd_libs_loaded];
1384 status = load_lib(nldr_node_obj, dep_lib,
1386 persistent_dep_libs[i], lib_path,
1390 if ((status != 0) &&
1391 !root_prstnt && persistent_dep_libs[i] &&
1392 *nldr_node_obj->pf_phase_split) {
1393 (nldr_node_obj->pers_libs)++;
1395 if (!persistent_dep_libs[i] ||
1396 !(*nldr_node_obj->pf_phase_split)) {
1406 /* Now we can load the root library */
1408 new_attrs = nldr_obj->ldr_attrs;
1409 new_attrs.sym_arg = root;
1410 new_attrs.rmm_handle = nldr_node_obj;
1411 new_attrs.input_params = nldr_node_obj->priv_ref;
1412 new_attrs.base_image = false;
1415 nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
1420 * In case of failure, unload any dependent libraries that
1421 * were loaded, and close the root library.
1422 * (Persistent libraries are unloaded from the very top)
1425 if (phase != NLDR_EXECUTE) {
1426 for (i = 0; i < nldr_node_obj->pers_libs; i++)
1427 unload_lib(nldr_node_obj,
1428 &nldr_node_obj->pers_lib_table[i]);
1430 nldr_node_obj->pers_libs = 0;
1432 for (i = 0; i < nd_libs_loaded; i++)
1433 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1436 nldr_obj->ldr_fxns.close_fxn(root->lib);
1440 /* Going up one node in the dependency tree */
1443 kfree(dep_lib_uui_ds);
1444 dep_lib_uui_ds = NULL;
1446 kfree(persistent_dep_libs);
1447 persistent_dep_libs = NULL;
1453 * ======== load_ovly ========
1455 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1456 enum nldr_phase phase)
1458 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1459 struct ovly_node *po_node = NULL;
1460 struct ovly_sect *phase_sects = NULL;
1461 struct ovly_sect *other_sects_list = NULL;
1464 u16 other_alloc = 0;
1465 u16 *ref_count = NULL;
1466 u16 *other_ref = NULL;
1468 struct ovly_sect *ovly_section;
1471 /* Find the node in the table */
1472 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1474 (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1476 po_node = &(nldr_obj->ovly_table[i]);
1481 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1490 ref_count = &(po_node->create_ref);
1491 other_ref = &(po_node->other_ref);
1492 phase_sects = po_node->create_sects_list;
1493 other_sects_list = po_node->other_sects_list;
1497 ref_count = &(po_node->execute_ref);
1498 phase_sects = po_node->execute_sects_list;
1502 ref_count = &(po_node->delete_ref);
1503 phase_sects = po_node->delete_sects_list;
1511 if (ref_count == NULL)
1514 if (*ref_count != 0)
1517 /* 'Allocate' memory for overlay sections of this phase */
1518 ovly_section = phase_sects;
1519 while (ovly_section) {
1520 /* allocate *//* page not supported yet */
1521 /* reserve *//* align */
1522 status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
1523 &(ovly_section->sect_run_addr), true);
1525 ovly_section = ovly_section->next_sect;
1531 if (other_ref && *other_ref == 0) {
1532 /* 'Allocate' memory for other overlay sections
1535 ovly_section = other_sects_list;
1536 while (ovly_section) {
1537 /* page not supported *//* align */
1540 rmm_alloc(nldr_obj->rmm, 0,
1541 ovly_section->size, 0,
1542 &(ovly_section->sect_run_addr),
1545 ovly_section = ovly_section->next_sect;
1553 if (*ref_count == 0) {
1555 /* Load sections for this phase */
1556 ovly_section = phase_sects;
1557 while (ovly_section && !status) {
1559 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1566 ovly_section->page);
1567 if (bytes != ovly_section->size)
1570 ovly_section = ovly_section->next_sect;
1574 if (other_ref && *other_ref == 0) {
1576 /* Load other sections (create phase) */
1577 ovly_section = other_sects_list;
1578 while (ovly_section && !status) {
1580 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1587 ovly_section->page);
1588 if (bytes != ovly_section->size)
1591 ovly_section = ovly_section->next_sect;
1596 /* 'Deallocate' memory */
1597 free_sects(nldr_obj, phase_sects, alloc_num);
1598 free_sects(nldr_obj, other_sects_list, other_alloc);
1601 if (!status && (ref_count != NULL)) {
1612 * ======== remote_alloc ========
1614 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1615 u32 align, u32 *dsp_address,
1616 s32 segmnt_id, s32 req,
1619 struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
1620 struct nldr_object *nldr_obj;
1621 struct rmm_target_obj *rmm;
1622 u16 mem_phase_bit = MAXFLAGS;
1627 struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
1628 bool mem_load_req = false;
1629 int status = -ENOMEM; /* Set to fail */
1631 DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
1632 mem_sect == DBLL_BSS);
1633 nldr_obj = hnode->nldr_obj;
1634 rmm = nldr_obj->rmm;
1635 /* Convert size to DSP words */
1637 (size + nldr_obj->us_dsp_word_size -
1638 1) / nldr_obj->us_dsp_word_size;
1639 /* Modify memory 'align' to account for DSP cache line size */
1640 align = find_lcm(GEM_CACHE_LINE_SIZE, align);
1641 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
1642 if (segmnt_id != -1) {
1643 rmm_addr_obj->segid = segmnt_id;
1647 switch (hnode->phase) {
1649 mem_phase_bit = CREATEDATAFLAGBIT;
1652 mem_phase_bit = DELETEDATAFLAGBIT;
1655 mem_phase_bit = EXECUTEDATAFLAGBIT;
1661 if (mem_sect == DBLL_CODE)
1664 if (mem_phase_bit < MAXFLAGS)
1665 segid = hnode->seg_id[mem_phase_bit];
1667 /* Determine if there is a memory loading requirement */
1668 if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
1669 mem_load_req = true;
1672 mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
1674 /* Find an appropriate segment based on mem_sect */
1675 if (segid == NULLID) {
1676 /* No memory requirements of preferences */
1677 DBC_ASSERT(!mem_load_req);
1680 if (segid <= MAXSEGID) {
1681 DBC_ASSERT(segid < nldr_obj->dload_segs);
1682 /* Attempt to allocate from segid first. */
1683 rmm_addr_obj->segid = segid;
1685 rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
1687 dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
1691 /* segid > MAXSEGID ==> Internal or external memory */
1692 DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
1693 /* Check for any internal or external memory segment,
1694 * depending on segid. */
1695 mem_sect_type |= segid == MEMINTERNALID ?
1696 DYNM_INTERNAL : DYNM_EXTERNAL;
1697 for (i = 0; i < nldr_obj->dload_segs; i++) {
1698 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1702 status = rmm_alloc(rmm, i, word_size, align,
1703 dsp_address, false);
1705 /* Save segid for freeing later */
1706 rmm_addr_obj->segid = i;
1712 /* Haven't found memory yet, attempt to find any segment that works */
1713 if (status == -ENOMEM && !mem_load_req) {
1714 dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
1715 "another\n", __func__);
1716 for (i = 0; i < nldr_obj->dload_segs; i++) {
1717 /* All bits of mem_sect_type must be set */
1718 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1722 status = rmm_alloc(rmm, i, word_size, align,
1723 dsp_address, false);
1726 rmm_addr_obj->segid = i;
1735 static int remote_free(void **ref, u16 space, u32 dsp_address,
1736 u32 size, bool reserve)
1738 struct nldr_object *nldr_obj = (struct nldr_object *)ref;
1739 struct rmm_target_obj *rmm;
1741 int status = -ENOMEM; /* Set to fail */
1743 DBC_REQUIRE(nldr_obj);
1745 rmm = nldr_obj->rmm;
1747 /* Convert size to DSP words */
1749 (size + nldr_obj->us_dsp_word_size -
1750 1) / nldr_obj->us_dsp_word_size;
1752 if (rmm_free(rmm, space, dsp_address, word_size, reserve))
1759 * ======== unload_lib ========
1761 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1762 struct lib_node *root)
1764 struct dbll_attrs new_attrs;
1765 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1768 DBC_ASSERT(root != NULL);
1770 /* Unload dependent libraries */
1771 for (i = 0; i < root->dep_libs; i++)
1772 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1776 new_attrs = nldr_obj->ldr_attrs;
1777 new_attrs.rmm_handle = nldr_obj->rmm;
1778 new_attrs.input_params = nldr_node_obj->priv_ref;
1779 new_attrs.base_image = false;
1780 new_attrs.sym_arg = root;
1783 /* Unload the root library */
1784 nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
1785 nldr_obj->ldr_fxns.close_fxn(root->lib);
1788 /* Free dependent library list */
1789 kfree(root->dep_libs_tree);
1790 root->dep_libs_tree = NULL;
1794 * ======== unload_ovly ========
1796 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1797 enum nldr_phase phase)
1799 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1800 struct ovly_node *po_node = NULL;
1801 struct ovly_sect *phase_sects = NULL;
1802 struct ovly_sect *other_sects_list = NULL;
1805 u16 other_alloc = 0;
1806 u16 *ref_count = NULL;
1807 u16 *other_ref = NULL;
1809 /* Find the node in the table */
1810 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1812 (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1814 po_node = &(nldr_obj->ovly_table[i]);
1819 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1822 /* TODO: Should we print warning here? */
1827 ref_count = &(po_node->create_ref);
1828 phase_sects = po_node->create_sects_list;
1829 alloc_num = po_node->create_sects;
1832 ref_count = &(po_node->execute_ref);
1833 phase_sects = po_node->execute_sects_list;
1834 alloc_num = po_node->execute_sects;
1837 ref_count = &(po_node->delete_ref);
1838 other_ref = &(po_node->other_ref);
1839 phase_sects = po_node->delete_sects_list;
1840 /* 'Other' overlay sections are unloaded in the delete phase */
1841 other_sects_list = po_node->other_sects_list;
1842 alloc_num = po_node->delete_sects;
1843 other_alloc = po_node->other_sects;
1849 DBC_ASSERT(ref_count && (*ref_count > 0));
1850 if (ref_count && (*ref_count > 0)) {
1853 DBC_ASSERT(*other_ref > 0);
1858 if (ref_count && *ref_count == 0) {
1859 /* 'Deallocate' memory */
1860 free_sects(nldr_obj, phase_sects, alloc_num);
1862 if (other_ref && *other_ref == 0)
1863 free_sects(nldr_obj, other_sects_list, other_alloc);
1867 * ======== find_in_persistent_lib_array ========
1869 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
1870 struct dbll_library_obj *lib)
1874 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1875 if (lib == nldr_node_obj->pers_lib_table[i].lib)
1884 * ================ Find LCM (Least Common Multiplier ===
1886 static u32 find_lcm(u32 a, u32 b)
1890 ret = a * b / gcd(a, b);
1895 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1897 * nldr_find_addr() - Find the closest symbol to the given address based on
1898 * dynamic node object.
1900 * @nldr_node: Dynamic node object
1901 * @sym_addr: Given address to find the dsp symbol
1902 * @offset_range: offset range to look for dsp symbol
1903 * @offset_output: Symbol Output address
1904 * @sym_name: String with the dsp symbol
1906 * This function finds the node library for a given address and
1907 * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1909 int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1910 u32 offset_range, void *offset_output, char *sym_name)
1913 bool status1 = false;
1915 struct lib_node root = { NULL, 0, NULL };
1916 DBC_REQUIRE(refs > 0);
1917 DBC_REQUIRE(offset_output != NULL);
1918 DBC_REQUIRE(sym_name != NULL);
1919 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
1920 sym_addr, offset_range, (u32) offset_output, sym_name);
1922 if (nldr_node->dynamic && *nldr_node->pf_phase_split) {
1923 switch (nldr_node->phase) {
1925 root = nldr_node->create_lib;
1928 root = nldr_node->execute_lib;
1931 root = nldr_node->delete_lib;
1938 /* for Overlay nodes or non-split Dynamic nodes */
1939 root = nldr_node->root;
1942 status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
1943 offset_range, offset_output, sym_name);
1945 /* If symbol not found, check dependent libraries */
1947 for (i = 0; i < root.dep_libs; i++) {
1948 status1 = dbll_find_dsp_symbol(
1949 root.dep_libs_tree[i].lib, sym_addr,
1950 offset_range, offset_output, sym_name);
1955 /* Check persistent libraries */
1957 for (i = 0; i < nldr_node->pers_libs; i++) {
1958 status1 = dbll_find_dsp_symbol(
1959 nldr_node->pers_lib_table[i].lib, sym_addr,
1960 offset_range, offset_output, sym_name);
1967 pr_debug("%s: Address 0x%x not found in range %d.\n",
1968 __func__, sym_addr, offset_range);