4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Copyright (C) 2005-2006 Texas Instruments, Inc.
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 * This memory manager provides general heap management and arbitrary
19 * alignment for any number of memory segments.
23 * Memory blocks are allocated from the end of the first free memory
24 * block large enough to satisfy the request. Alignment requirements
25 * are satisfied by "sliding" the block forward until its base satisfies
26 * the alignment specification; if this is not possible then the next
27 * free block large enough to hold the request is tried.
29 * Since alignment can cause the creation of a new free block - the
30 * unused memory formed between the start of the original free block
31 * and the start of the allocated block - the memory manager must free
32 * this memory to prevent a memory leak.
34 * Overlay memory is managed by reserving through rmm_alloc, and freeing
35 * it through rmm_free. The memory manager prevents DSP code/data that is
36 * overlayed from being overwritten as long as the memory it runs at has
37 * been allocated, and not yet freed.
40 #include <linux/types.h>
41 #include <linux/list.h>
43 /* ----------------------------------- Host OS */
44 #include <dspbridge/host_os.h>
46 /* ----------------------------------- DSP/BIOS Bridge */
47 #include <dspbridge/dbdefs.h>
49 /* ----------------------------------- This */
50 #include <dspbridge/rmm.h>
53 * ======== rmm_header ========
54 * This header is used to maintain a list of free memory blocks.
57 struct rmm_header *next; /* form a free memory link list */
58 u32 size; /* size of the free memory */
59 u32 addr; /* DSP address of memory block */
63 * ======== rmm_ovly_sect ========
64 * Keeps track of memory occupied by overlay section.
66 struct rmm_ovly_sect {
67 struct list_head list_elem;
68 u32 addr; /* Start of memory section */
69 u32 size; /* Length (target MAUs) of section */
70 s32 page; /* Memory page */
74 * ======== rmm_target_obj ========
76 struct rmm_target_obj {
77 struct rmm_segment *seg_tab;
78 struct rmm_header **free_list;
80 struct list_head ovly_list; /* List of overlay memory in use */
83 static u32 refs; /* module reference count */
85 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
86 u32 align, u32 *dsp_address);
87 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
91 * ======== rmm_alloc ========
93 int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
94 u32 align, u32 *dsp_address, bool reserve)
96 struct rmm_ovly_sect *sect, *prev_sect = NULL;
97 struct rmm_ovly_sect *new_sect;
102 if (!alloc_block(target, segid, size, align, dsp_address)) {
105 /* Increment the number of allocated blocks in this
107 target->seg_tab[segid].number++;
111 /* An overlay section - See if block is already in use. If not,
112 * insert into the list in ascending address size. */
114 /* Find place to insert new list element. List is sorted from
115 * smallest to largest address. */
116 list_for_each_entry(sect, &target->ovly_list, list_elem) {
117 if (addr <= sect->addr) {
118 /* Check for overlap with sect */
119 if ((addr + size > sect->addr) || (prev_sect &&
130 /* No overlap - allocate list element for new section. */
131 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
132 if (new_sect == NULL) {
135 new_sect->addr = addr;
136 new_sect->size = size;
137 new_sect->page = segid;
138 if (list_is_last(§->list_elem, &target->ovly_list))
139 /* Put new section at the end of the list */
140 list_add_tail(&new_sect->list_elem,
143 /* Put new section just before sect */
144 list_add_tail(&new_sect->list_elem,
153 * ======== rmm_create ========
155 int rmm_create(struct rmm_target_obj **target_obj,
156 struct rmm_segment seg_tab[], u32 num_segs)
158 struct rmm_header *hptr;
159 struct rmm_segment *sptr, *tmp;
160 struct rmm_target_obj *target;
164 /* Allocate DBL target object */
165 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
173 target->num_segs = num_segs;
177 /* Allocate the memory for freelist from host's memory */
178 target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
180 if (target->free_list == NULL) {
183 /* Allocate headers for each element on the free list */
184 for (i = 0; i < (s32) num_segs; i++) {
185 target->free_list[i] =
186 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
187 if (target->free_list[i] == NULL) {
192 /* Allocate memory for initial segment table */
193 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
195 if (target->seg_tab == NULL) {
198 /* Initialize segment table and free list */
199 sptr = target->seg_tab;
200 for (i = 0, tmp = seg_tab; num_segs > 0;
203 hptr = target->free_list[i];
204 hptr->addr = tmp->base;
205 hptr->size = tmp->length;
213 /* Initialize overlay memory list */
215 INIT_LIST_HEAD(&target->ovly_list);
218 *target_obj = target;
230 * ======== rmm_delete ========
232 void rmm_delete(struct rmm_target_obj *target)
234 struct rmm_ovly_sect *sect, *tmp;
235 struct rmm_header *hptr;
236 struct rmm_header *next;
239 kfree(target->seg_tab);
241 list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
242 list_del(§->list_elem);
246 if (target->free_list != NULL) {
247 /* Free elements on freelist */
248 for (i = 0; i < target->num_segs; i++) {
249 hptr = next = target->free_list[i];
256 kfree(target->free_list);
263 * ======== rmm_exit ========
271 * ======== rmm_free ========
273 bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
276 struct rmm_ovly_sect *sect, *tmp;
280 * Free or unreserve memory.
283 ret = free_block(target, segid, dsp_addr, size);
285 target->seg_tab[segid].number--;
288 /* Unreserve memory */
289 list_for_each_entry_safe(sect, tmp, &target->ovly_list,
291 if (dsp_addr == sect->addr) {
292 /* Remove from list */
293 list_del(§->list_elem);
303 * ======== rmm_init ========
313 * ======== rmm_stat ========
315 bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
316 struct dsp_memstat *mem_stat_buf)
318 struct rmm_header *head;
320 u32 max_free_size = 0;
321 u32 total_free_size = 0;
324 if ((u32) segid < target->num_segs) {
325 head = target->free_list[segid];
327 /* Collect data from free_list */
328 while (head != NULL) {
329 max_free_size = max(max_free_size, head->size);
330 total_free_size += head->size;
336 mem_stat_buf->size = target->seg_tab[segid].length;
338 /* num_free_blocks */
339 mem_stat_buf->num_free_blocks = free_blocks;
341 /* total_free_size */
342 mem_stat_buf->total_free_size = total_free_size;
344 /* len_max_free_block */
345 mem_stat_buf->len_max_free_block = max_free_size;
347 /* num_alloc_blocks */
348 mem_stat_buf->num_alloc_blocks =
349 target->seg_tab[segid].number;
358 * ======== balloc ========
359 * This allocation function allocates memory from the lowest addresses
362 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
363 u32 align, u32 *dsp_address)
365 struct rmm_header *head;
366 struct rmm_header *prevhead = NULL;
367 struct rmm_header *next;
374 alignbytes = (align == 0) ? 1 : align;
376 head = target->free_list[segid];
382 addr = head->addr; /* alloc from the bottom */
384 /* align allocation */
385 (tmpalign = (u32) addr % alignbytes);
387 tmpalign = alignbytes - tmpalign;
389 allocsize = size + tmpalign;
391 if (hsize >= allocsize) { /* big enough */
392 if (hsize == allocsize && prevhead != NULL) {
393 prevhead->next = next;
396 head->size = hsize - allocsize;
397 head->addr += allocsize;
400 /* free up any hole created by alignment */
402 free_block(target, segid, addr, tmpalign);
404 *dsp_address = addr + tmpalign;
411 } while (head != NULL);
417 * ======== free_block ========
418 * TO DO: free_block() allocates memory, which could result in failure.
419 * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
420 * free_block() could use an rmm_header from the pool, freeing as blocks
423 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
426 struct rmm_header *head;
427 struct rmm_header *thead;
428 struct rmm_header *rhead;
431 /* Create a memory header to hold the newly free'd block. */
432 rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
436 /* search down the free list to find the right place for addr */
437 head = target->free_list[segid];
439 if (addr >= head->addr) {
440 while (head->next != NULL && addr > head->next->addr)
457 /* join with upper block, if possible */
458 if (thead != NULL && (rhead->addr + rhead->size) ==
460 head->next = rhead->next;
461 thead->size = size + thead->size;
467 /* join with the lower block, if possible */
468 if ((head->addr + head->size) == rhead->addr) {
469 head->next = rhead->next;
470 head->size = head->size + rhead->size;