55acfcd80a840fce5213117078371a45ce26efa4
[pandora-kernel.git] / drivers / staging / tidspbridge / rmgr / rmm.c
1 /*
2  * rmm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Copyright (C) 2005-2006 Texas Instruments, Inc.
7  *
8  * This package is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 /*
18  *  This memory manager provides general heap management and arbitrary
19  *  alignment for any number of memory segments.
20  *
21  *  Notes:
22  *
23  *  Memory blocks are allocated from the end of the first free memory
24  *  block large enough to satisfy the request.  Alignment requirements
25  *  are satisfied by "sliding" the block forward until its base satisfies
26  *  the alignment specification; if this is not possible then the next
27  *  free block large enough to hold the request is tried.
28  *
29  *  Since alignment can cause the creation of a new free block - the
30  *  unused memory formed between the start of the original free block
31  *  and the start of the allocated block - the memory manager must free
32  *  this memory to prevent a memory leak.
33  *
34  *  Overlay memory is managed by reserving through rmm_alloc, and freeing
35  *  it through rmm_free. The memory manager prevents DSP code/data that is
36  *  overlayed from being overwritten as long as the memory it runs at has
37  *  been allocated, and not yet freed.
38  */
39
40 #include <linux/types.h>
41 #include <linux/list.h>
42
43 /*  ----------------------------------- Host OS */
44 #include <dspbridge/host_os.h>
45
46 /*  ----------------------------------- DSP/BIOS Bridge */
47 #include <dspbridge/dbdefs.h>
48
49 /*  ----------------------------------- This */
50 #include <dspbridge/rmm.h>
51
52 /*
53  *  ======== rmm_header ========
54  *  This header is used to maintain a list of free memory blocks.
55  */
56 struct rmm_header {
57         struct rmm_header *next;        /* form a free memory link list */
58         u32 size;               /* size of the free memory */
59         u32 addr;               /* DSP address of memory block */
60 };
61
62 /*
63  *  ======== rmm_ovly_sect ========
64  *  Keeps track of memory occupied by overlay section.
65  */
66 struct rmm_ovly_sect {
67         struct list_head list_elem;
68         u32 addr;               /* Start of memory section */
69         u32 size;               /* Length (target MAUs) of section */
70         s32 page;               /* Memory page */
71 };
72
73 /*
74  *  ======== rmm_target_obj ========
75  */
76 struct rmm_target_obj {
77         struct rmm_segment *seg_tab;
78         struct rmm_header **free_list;
79         u32 num_segs;
80         struct list_head ovly_list;     /* List of overlay memory in use */
81 };
82
83 static u32 refs;                /* module reference count */
84
85 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
86                         u32 align, u32 *dsp_address);
87 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
88                        u32 size);
89
90 /*
91  *  ======== rmm_alloc ========
92  */
93 int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
94                      u32 align, u32 *dsp_address, bool reserve)
95 {
96         struct rmm_ovly_sect *sect, *prev_sect = NULL;
97         struct rmm_ovly_sect *new_sect;
98         u32 addr;
99         int status = 0;
100
101         if (!reserve) {
102                 if (!alloc_block(target, segid, size, align, dsp_address)) {
103                         status = -ENOMEM;
104                 } else {
105                         /* Increment the number of allocated blocks in this
106                          * segment */
107                         target->seg_tab[segid].number++;
108                 }
109                 goto func_end;
110         }
111         /* An overlay section - See if block is already in use. If not,
112          * insert into the list in ascending address size. */
113         addr = *dsp_address;
114         /*  Find place to insert new list element. List is sorted from
115          *  smallest to largest address. */
116         list_for_each_entry(sect, &target->ovly_list, list_elem) {
117                 if (addr <= sect->addr) {
118                         /* Check for overlap with sect */
119                         if ((addr + size > sect->addr) || (prev_sect &&
120                                                            (prev_sect->addr +
121                                                             prev_sect->size >
122                                                             addr))) {
123                                 status = -ENXIO;
124                         }
125                         break;
126                 }
127                 prev_sect = sect;
128         }
129         if (!status) {
130                 /* No overlap - allocate list element for new section. */
131                 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
132                 if (new_sect == NULL) {
133                         status = -ENOMEM;
134                 } else {
135                         new_sect->addr = addr;
136                         new_sect->size = size;
137                         new_sect->page = segid;
138                         if (list_is_last(&sect->list_elem, &target->ovly_list))
139                                 /* Put new section at the end of the list */
140                                 list_add_tail(&new_sect->list_elem,
141                                                 &target->ovly_list);
142                         else
143                                 /* Put new section just before sect */
144                                 list_add_tail(&new_sect->list_elem,
145                                                 &sect->list_elem);
146                 }
147         }
148 func_end:
149         return status;
150 }
151
152 /*
153  *  ======== rmm_create ========
154  */
155 int rmm_create(struct rmm_target_obj **target_obj,
156                       struct rmm_segment seg_tab[], u32 num_segs)
157 {
158         struct rmm_header *hptr;
159         struct rmm_segment *sptr, *tmp;
160         struct rmm_target_obj *target;
161         s32 i;
162         int status = 0;
163
164         /* Allocate DBL target object */
165         target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
166
167         if (target == NULL)
168                 status = -ENOMEM;
169
170         if (status)
171                 goto func_cont;
172
173         target->num_segs = num_segs;
174         if (!(num_segs > 0))
175                 goto func_cont;
176
177         /* Allocate the memory for freelist from host's memory */
178         target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
179                                                         GFP_KERNEL);
180         if (target->free_list == NULL) {
181                 status = -ENOMEM;
182         } else {
183                 /* Allocate headers for each element on the free list */
184                 for (i = 0; i < (s32) num_segs; i++) {
185                         target->free_list[i] =
186                                 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
187                         if (target->free_list[i] == NULL) {
188                                 status = -ENOMEM;
189                                 break;
190                         }
191                 }
192                 /* Allocate memory for initial segment table */
193                 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
194                                                                 GFP_KERNEL);
195                 if (target->seg_tab == NULL) {
196                         status = -ENOMEM;
197                 } else {
198                         /* Initialize segment table and free list */
199                         sptr = target->seg_tab;
200                         for (i = 0, tmp = seg_tab; num_segs > 0;
201                              num_segs--, i++) {
202                                 *sptr = *tmp;
203                                 hptr = target->free_list[i];
204                                 hptr->addr = tmp->base;
205                                 hptr->size = tmp->length;
206                                 hptr->next = NULL;
207                                 tmp++;
208                                 sptr++;
209                         }
210                 }
211         }
212 func_cont:
213         /* Initialize overlay memory list */
214         if (!status)
215                 INIT_LIST_HEAD(&target->ovly_list);
216
217         if (!status) {
218                 *target_obj = target;
219         } else {
220                 *target_obj = NULL;
221                 if (target)
222                         rmm_delete(target);
223
224         }
225
226         return status;
227 }
228
229 /*
230  *  ======== rmm_delete ========
231  */
232 void rmm_delete(struct rmm_target_obj *target)
233 {
234         struct rmm_ovly_sect *sect, *tmp;
235         struct rmm_header *hptr;
236         struct rmm_header *next;
237         u32 i;
238
239         kfree(target->seg_tab);
240
241         list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
242                 list_del(&sect->list_elem);
243                 kfree(sect);
244         }
245
246         if (target->free_list != NULL) {
247                 /* Free elements on freelist */
248                 for (i = 0; i < target->num_segs; i++) {
249                         hptr = next = target->free_list[i];
250                         while (next) {
251                                 hptr = next;
252                                 next = hptr->next;
253                                 kfree(hptr);
254                         }
255                 }
256                 kfree(target->free_list);
257         }
258
259         kfree(target);
260 }
261
262 /*
263  *  ======== rmm_exit ========
264  */
265 void rmm_exit(void)
266 {
267         refs--;
268 }
269
270 /*
271  *  ======== rmm_free ========
272  */
273 bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
274               bool reserved)
275 {
276         struct rmm_ovly_sect *sect, *tmp;
277         bool ret = false;
278
279         /*
280          *  Free or unreserve memory.
281          */
282         if (!reserved) {
283                 ret = free_block(target, segid, dsp_addr, size);
284                 if (ret)
285                         target->seg_tab[segid].number--;
286
287         } else {
288                 /* Unreserve memory */
289                 list_for_each_entry_safe(sect, tmp, &target->ovly_list,
290                                 list_elem) {
291                         if (dsp_addr == sect->addr) {
292                                 /* Remove from list */
293                                 list_del(&sect->list_elem);
294                                 kfree(sect);
295                                 return true;
296                         }
297                 }
298         }
299         return ret;
300 }
301
302 /*
303  *  ======== rmm_init ========
304  */
305 bool rmm_init(void)
306 {
307         refs++;
308
309         return true;
310 }
311
312 /*
313  *  ======== rmm_stat ========
314  */
315 bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
316               struct dsp_memstat *mem_stat_buf)
317 {
318         struct rmm_header *head;
319         bool ret = false;
320         u32 max_free_size = 0;
321         u32 total_free_size = 0;
322         u32 free_blocks = 0;
323
324         if ((u32) segid < target->num_segs) {
325                 head = target->free_list[segid];
326
327                 /* Collect data from free_list */
328                 while (head != NULL) {
329                         max_free_size = max(max_free_size, head->size);
330                         total_free_size += head->size;
331                         free_blocks++;
332                         head = head->next;
333                 }
334
335                 /* ul_size */
336                 mem_stat_buf->size = target->seg_tab[segid].length;
337
338                 /* num_free_blocks */
339                 mem_stat_buf->num_free_blocks = free_blocks;
340
341                 /* total_free_size */
342                 mem_stat_buf->total_free_size = total_free_size;
343
344                 /* len_max_free_block */
345                 mem_stat_buf->len_max_free_block = max_free_size;
346
347                 /* num_alloc_blocks */
348                 mem_stat_buf->num_alloc_blocks =
349                     target->seg_tab[segid].number;
350
351                 ret = true;
352         }
353
354         return ret;
355 }
356
357 /*
358  *  ======== balloc ========
359  *  This allocation function allocates memory from the lowest addresses
360  *  first.
361  */
362 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
363                         u32 align, u32 *dsp_address)
364 {
365         struct rmm_header *head;
366         struct rmm_header *prevhead = NULL;
367         struct rmm_header *next;
368         u32 tmpalign;
369         u32 alignbytes;
370         u32 hsize;
371         u32 allocsize;
372         u32 addr;
373
374         alignbytes = (align == 0) ? 1 : align;
375         prevhead = NULL;
376         head = target->free_list[segid];
377
378         do {
379                 hsize = head->size;
380                 next = head->next;
381
382                 addr = head->addr;      /* alloc from the bottom */
383
384                 /* align allocation */
385                 (tmpalign = (u32) addr % alignbytes);
386                 if (tmpalign != 0)
387                         tmpalign = alignbytes - tmpalign;
388
389                 allocsize = size + tmpalign;
390
391                 if (hsize >= allocsize) {       /* big enough */
392                         if (hsize == allocsize && prevhead != NULL) {
393                                 prevhead->next = next;
394                                 kfree(head);
395                         } else {
396                                 head->size = hsize - allocsize;
397                                 head->addr += allocsize;
398                         }
399
400                         /* free up any hole created by alignment */
401                         if (tmpalign)
402                                 free_block(target, segid, addr, tmpalign);
403
404                         *dsp_address = addr + tmpalign;
405                         return true;
406                 }
407
408                 prevhead = head;
409                 head = next;
410
411         } while (head != NULL);
412
413         return false;
414 }
415
416 /*
417  *  ======== free_block ========
418  *  TO DO: free_block() allocates memory, which could result in failure.
419  *  Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
420  *  free_block() could use an rmm_header from the pool, freeing as blocks
421  *  are coalesced.
422  */
423 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
424                        u32 size)
425 {
426         struct rmm_header *head;
427         struct rmm_header *thead;
428         struct rmm_header *rhead;
429         bool ret = true;
430
431         /* Create a memory header to hold the newly free'd block. */
432         rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
433         if (rhead == NULL) {
434                 ret = false;
435         } else {
436                 /* search down the free list to find the right place for addr */
437                 head = target->free_list[segid];
438
439                 if (addr >= head->addr) {
440                         while (head->next != NULL && addr > head->next->addr)
441                                 head = head->next;
442
443                         thead = head->next;
444
445                         head->next = rhead;
446                         rhead->next = thead;
447                         rhead->addr = addr;
448                         rhead->size = size;
449                 } else {
450                         *rhead = *head;
451                         head->next = rhead;
452                         head->addr = addr;
453                         head->size = size;
454                         thead = rhead->next;
455                 }
456
457                 /* join with upper block, if possible */
458                 if (thead != NULL && (rhead->addr + rhead->size) ==
459                     thead->addr) {
460                         head->next = rhead->next;
461                         thead->size = size + thead->size;
462                         thead->addr = addr;
463                         kfree(rhead);
464                         rhead = thead;
465                 }
466
467                 /* join with the lower block, if possible */
468                 if ((head->addr + head->size) == rhead->addr) {
469                         head->next = rhead->next;
470                         head->size = head->size + rhead->size;
471                         kfree(rhead);
472                 }
473         }
474
475         return ret;
476 }