Merge branch 'for-2.6.32' into for-2.6.33
[pandora-kernel.git] / arch / blackfin / mm / sram-alloc.c
1 /*
2  * SRAM allocator for Blackfin on-chip memory
3  *
4  * Copyright 2004-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ioport.h>
14 #include <linux/fcntl.h>
15 #include <linux/init.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/spinlock.h>
19 #include <linux/rtc.h>
20 #include <asm/blackfin.h>
21 #include <asm/mem_map.h>
22 #include "blackfin_sram.h"
23
24 /* the data structure for L1 scratchpad and DATA SRAM */
25 struct sram_piece {
26         void *paddr;
27         int size;
28         pid_t pid;
29         struct sram_piece *next;
30 };
31
32 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
33 static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
34 static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
35
36 #if L1_DATA_A_LENGTH != 0
37 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
38 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
39 #endif
40
41 #if L1_DATA_B_LENGTH != 0
42 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
43 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
44 #endif
45
46 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
47 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
48 #endif
49
50 #if L1_CODE_LENGTH != 0
51 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
52 static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
53 static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
54 #endif
55
56 #if L2_LENGTH != 0
57 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
58 static struct sram_piece free_l2_sram_head, used_l2_sram_head;
59 #endif
60
61 static struct kmem_cache *sram_piece_cache;
62
63 /* L1 Scratchpad SRAM initialization function */
64 static void __init l1sram_init(void)
65 {
66         unsigned int cpu;
67         unsigned long reserve;
68
69 #ifdef CONFIG_SMP
70         reserve = 0;
71 #else
72         reserve = sizeof(struct l1_scratch_task_info);
73 #endif
74
75         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
76                 per_cpu(free_l1_ssram_head, cpu).next =
77                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
78                 if (!per_cpu(free_l1_ssram_head, cpu).next) {
79                         printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
80                         return;
81                 }
82
83                 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
84                 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
85                 per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
86                 per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
87
88                 per_cpu(used_l1_ssram_head, cpu).next = NULL;
89
90                 /* mutex initialize */
91                 spin_lock_init(&per_cpu(l1sram_lock, cpu));
92                 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
93                         L1_SCRATCH_LENGTH >> 10);
94         }
95 }
96
97 static void __init l1_data_sram_init(void)
98 {
99 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
100         unsigned int cpu;
101 #endif
102 #if L1_DATA_A_LENGTH != 0
103         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
104                 per_cpu(free_l1_data_A_sram_head, cpu).next =
105                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
106                 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
107                         printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
108                         return;
109                 }
110
111                 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
112                         (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
113                 per_cpu(free_l1_data_A_sram_head, cpu).next->size =
114                         L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
115                 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
116                 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
117
118                 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
119
120                 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
121                         L1_DATA_A_LENGTH >> 10,
122                         per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
123         }
124 #endif
125 #if L1_DATA_B_LENGTH != 0
126         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
127                 per_cpu(free_l1_data_B_sram_head, cpu).next =
128                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
129                 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
130                         printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
131                         return;
132                 }
133
134                 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
135                         (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
136                 per_cpu(free_l1_data_B_sram_head, cpu).next->size =
137                         L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
138                 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
139                 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
140
141                 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
142
143                 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
144                         L1_DATA_B_LENGTH >> 10,
145                         per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
146                 /* mutex initialize */
147         }
148 #endif
149
150 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
151         for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
152                 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
153 #endif
154 }
155
156 static void __init l1_inst_sram_init(void)
157 {
158 #if L1_CODE_LENGTH != 0
159         unsigned int cpu;
160         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
161                 per_cpu(free_l1_inst_sram_head, cpu).next =
162                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
163                 if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
164                         printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
165                         return;
166                 }
167
168                 per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
169                         (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
170                 per_cpu(free_l1_inst_sram_head, cpu).next->size =
171                         L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
172                 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
173                 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
174
175                 per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
176
177                 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
178                         L1_CODE_LENGTH >> 10,
179                         per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
180
181                 /* mutex initialize */
182                 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
183         }
184 #endif
185 }
186
187 static void __init l2_sram_init(void)
188 {
189 #if L2_LENGTH != 0
190         free_l2_sram_head.next =
191                 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
192         if (!free_l2_sram_head.next) {
193                 printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
194                 return;
195         }
196
197         free_l2_sram_head.next->paddr =
198                 (void *)L2_START + (_ebss_l2 - _stext_l2);
199         free_l2_sram_head.next->size =
200                 L2_LENGTH - (_ebss_l2 - _stext_l2);
201         free_l2_sram_head.next->pid = 0;
202         free_l2_sram_head.next->next = NULL;
203
204         used_l2_sram_head.next = NULL;
205
206         printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
207                 L2_LENGTH >> 10,
208                 free_l2_sram_head.next->size >> 10);
209
210         /* mutex initialize */
211         spin_lock_init(&l2_sram_lock);
212 #endif
213 }
214
215 static int __init bfin_sram_init(void)
216 {
217         sram_piece_cache = kmem_cache_create("sram_piece_cache",
218                                 sizeof(struct sram_piece),
219                                 0, SLAB_PANIC, NULL);
220
221         l1sram_init();
222         l1_data_sram_init();
223         l1_inst_sram_init();
224         l2_sram_init();
225
226         return 0;
227 }
228 pure_initcall(bfin_sram_init);
229
230 /* SRAM allocate function */
231 static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
232                 struct sram_piece *pused_head)
233 {
234         struct sram_piece *pslot, *plast, *pavail;
235
236         if (size <= 0 || !pfree_head || !pused_head)
237                 return NULL;
238
239         /* Align the size */
240         size = (size + 3) & ~3;
241
242         pslot = pfree_head->next;
243         plast = pfree_head;
244
245         /* search an available piece slot */
246         while (pslot != NULL && size > pslot->size) {
247                 plast = pslot;
248                 pslot = pslot->next;
249         }
250
251         if (!pslot)
252                 return NULL;
253
254         if (pslot->size == size) {
255                 plast->next = pslot->next;
256                 pavail = pslot;
257         } else {
258                 pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
259
260                 if (!pavail)
261                         return NULL;
262
263                 pavail->paddr = pslot->paddr;
264                 pavail->size = size;
265                 pslot->paddr += size;
266                 pslot->size -= size;
267         }
268
269         pavail->pid = current->pid;
270
271         pslot = pused_head->next;
272         plast = pused_head;
273
274         /* insert new piece into used piece list !!! */
275         while (pslot != NULL && pavail->paddr < pslot->paddr) {
276                 plast = pslot;
277                 pslot = pslot->next;
278         }
279
280         pavail->next = pslot;
281         plast->next = pavail;
282
283         return pavail->paddr;
284 }
285
286 /* Allocate the largest available block.  */
287 static void *_sram_alloc_max(struct sram_piece *pfree_head,
288                                 struct sram_piece *pused_head,
289                                 unsigned long *psize)
290 {
291         struct sram_piece *pslot, *pmax;
292
293         if (!pfree_head || !pused_head)
294                 return NULL;
295
296         pmax = pslot = pfree_head->next;
297
298         /* search an available piece slot */
299         while (pslot != NULL) {
300                 if (pslot->size > pmax->size)
301                         pmax = pslot;
302                 pslot = pslot->next;
303         }
304
305         if (!pmax)
306                 return NULL;
307
308         *psize = pmax->size;
309
310         return _sram_alloc(*psize, pfree_head, pused_head);
311 }
312
313 /* SRAM free function */
314 static int _sram_free(const void *addr,
315                         struct sram_piece *pfree_head,
316                         struct sram_piece *pused_head)
317 {
318         struct sram_piece *pslot, *plast, *pavail;
319
320         if (!pfree_head || !pused_head)
321                 return -1;
322
323         /* search the relevant memory slot */
324         pslot = pused_head->next;
325         plast = pused_head;
326
327         /* search an available piece slot */
328         while (pslot != NULL && pslot->paddr != addr) {
329                 plast = pslot;
330                 pslot = pslot->next;
331         }
332
333         if (!pslot)
334                 return -1;
335
336         plast->next = pslot->next;
337         pavail = pslot;
338         pavail->pid = 0;
339
340         /* insert free pieces back to the free list */
341         pslot = pfree_head->next;
342         plast = pfree_head;
343
344         while (pslot != NULL && addr > pslot->paddr) {
345                 plast = pslot;
346                 pslot = pslot->next;
347         }
348
349         if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
350                 plast->size += pavail->size;
351                 kmem_cache_free(sram_piece_cache, pavail);
352         } else {
353                 pavail->next = plast->next;
354                 plast->next = pavail;
355                 plast = pavail;
356         }
357
358         if (pslot && plast->paddr + plast->size == pslot->paddr) {
359                 plast->size += pslot->size;
360                 plast->next = pslot->next;
361                 kmem_cache_free(sram_piece_cache, pslot);
362         }
363
364         return 0;
365 }
366
367 int sram_free(const void *addr)
368 {
369
370 #if L1_CODE_LENGTH != 0
371         if (addr >= (void *)get_l1_code_start()
372                  && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
373                 return l1_inst_sram_free(addr);
374         else
375 #endif
376 #if L1_DATA_A_LENGTH != 0
377         if (addr >= (void *)get_l1_data_a_start()
378                  && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
379                 return l1_data_A_sram_free(addr);
380         else
381 #endif
382 #if L1_DATA_B_LENGTH != 0
383         if (addr >= (void *)get_l1_data_b_start()
384                  && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
385                 return l1_data_B_sram_free(addr);
386         else
387 #endif
388 #if L2_LENGTH != 0
389         if (addr >= (void *)L2_START
390                  && addr < (void *)(L2_START + L2_LENGTH))
391                 return l2_sram_free(addr);
392         else
393 #endif
394                 return -1;
395 }
396 EXPORT_SYMBOL(sram_free);
397
398 void *l1_data_A_sram_alloc(size_t size)
399 {
400 #if L1_DATA_A_LENGTH != 0
401         unsigned long flags;
402         void *addr;
403         unsigned int cpu;
404
405         cpu = get_cpu();
406         /* add mutex operation */
407         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
408
409         addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
410                         &per_cpu(used_l1_data_A_sram_head, cpu));
411
412         /* add mutex operation */
413         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
414         put_cpu();
415
416         pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
417                  (long unsigned int)addr, size);
418
419         return addr;
420 #else
421         return NULL;
422 #endif
423 }
424 EXPORT_SYMBOL(l1_data_A_sram_alloc);
425
426 int l1_data_A_sram_free(const void *addr)
427 {
428 #if L1_DATA_A_LENGTH != 0
429         unsigned long flags;
430         int ret;
431         unsigned int cpu;
432
433         cpu = get_cpu();
434         /* add mutex operation */
435         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
436
437         ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
438                         &per_cpu(used_l1_data_A_sram_head, cpu));
439
440         /* add mutex operation */
441         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
442         put_cpu();
443
444         return ret;
445 #else
446         return -1;
447 #endif
448 }
449 EXPORT_SYMBOL(l1_data_A_sram_free);
450
451 void *l1_data_B_sram_alloc(size_t size)
452 {
453 #if L1_DATA_B_LENGTH != 0
454         unsigned long flags;
455         void *addr;
456         unsigned int cpu;
457
458         cpu = get_cpu();
459         /* add mutex operation */
460         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
461
462         addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
463                         &per_cpu(used_l1_data_B_sram_head, cpu));
464
465         /* add mutex operation */
466         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
467         put_cpu();
468
469         pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
470                  (long unsigned int)addr, size);
471
472         return addr;
473 #else
474         return NULL;
475 #endif
476 }
477 EXPORT_SYMBOL(l1_data_B_sram_alloc);
478
479 int l1_data_B_sram_free(const void *addr)
480 {
481 #if L1_DATA_B_LENGTH != 0
482         unsigned long flags;
483         int ret;
484         unsigned int cpu;
485
486         cpu = get_cpu();
487         /* add mutex operation */
488         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
489
490         ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
491                         &per_cpu(used_l1_data_B_sram_head, cpu));
492
493         /* add mutex operation */
494         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
495         put_cpu();
496
497         return ret;
498 #else
499         return -1;
500 #endif
501 }
502 EXPORT_SYMBOL(l1_data_B_sram_free);
503
504 void *l1_data_sram_alloc(size_t size)
505 {
506         void *addr = l1_data_A_sram_alloc(size);
507
508         if (!addr)
509                 addr = l1_data_B_sram_alloc(size);
510
511         return addr;
512 }
513 EXPORT_SYMBOL(l1_data_sram_alloc);
514
515 void *l1_data_sram_zalloc(size_t size)
516 {
517         void *addr = l1_data_sram_alloc(size);
518
519         if (addr)
520                 memset(addr, 0x00, size);
521
522         return addr;
523 }
524 EXPORT_SYMBOL(l1_data_sram_zalloc);
525
526 int l1_data_sram_free(const void *addr)
527 {
528         int ret;
529         ret = l1_data_A_sram_free(addr);
530         if (ret == -1)
531                 ret = l1_data_B_sram_free(addr);
532         return ret;
533 }
534 EXPORT_SYMBOL(l1_data_sram_free);
535
536 void *l1_inst_sram_alloc(size_t size)
537 {
538 #if L1_CODE_LENGTH != 0
539         unsigned long flags;
540         void *addr;
541         unsigned int cpu;
542
543         cpu = get_cpu();
544         /* add mutex operation */
545         spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
546
547         addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
548                         &per_cpu(used_l1_inst_sram_head, cpu));
549
550         /* add mutex operation */
551         spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
552         put_cpu();
553
554         pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
555                  (long unsigned int)addr, size);
556
557         return addr;
558 #else
559         return NULL;
560 #endif
561 }
562 EXPORT_SYMBOL(l1_inst_sram_alloc);
563
564 int l1_inst_sram_free(const void *addr)
565 {
566 #if L1_CODE_LENGTH != 0
567         unsigned long flags;
568         int ret;
569         unsigned int cpu;
570
571         cpu = get_cpu();
572         /* add mutex operation */
573         spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
574
575         ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
576                         &per_cpu(used_l1_inst_sram_head, cpu));
577
578         /* add mutex operation */
579         spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
580         put_cpu();
581
582         return ret;
583 #else
584         return -1;
585 #endif
586 }
587 EXPORT_SYMBOL(l1_inst_sram_free);
588
589 /* L1 Scratchpad memory allocate function */
590 void *l1sram_alloc(size_t size)
591 {
592         unsigned long flags;
593         void *addr;
594         unsigned int cpu;
595
596         cpu = get_cpu();
597         /* add mutex operation */
598         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
599
600         addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
601                         &per_cpu(used_l1_ssram_head, cpu));
602
603         /* add mutex operation */
604         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
605         put_cpu();
606
607         return addr;
608 }
609
610 /* L1 Scratchpad memory allocate function */
611 void *l1sram_alloc_max(size_t *psize)
612 {
613         unsigned long flags;
614         void *addr;
615         unsigned int cpu;
616
617         cpu = get_cpu();
618         /* add mutex operation */
619         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
620
621         addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
622                         &per_cpu(used_l1_ssram_head, cpu), psize);
623
624         /* add mutex operation */
625         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
626         put_cpu();
627
628         return addr;
629 }
630
631 /* L1 Scratchpad memory free function */
632 int l1sram_free(const void *addr)
633 {
634         unsigned long flags;
635         int ret;
636         unsigned int cpu;
637
638         cpu = get_cpu();
639         /* add mutex operation */
640         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
641
642         ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
643                         &per_cpu(used_l1_ssram_head, cpu));
644
645         /* add mutex operation */
646         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
647         put_cpu();
648
649         return ret;
650 }
651
652 void *l2_sram_alloc(size_t size)
653 {
654 #if L2_LENGTH != 0
655         unsigned long flags;
656         void *addr;
657
658         /* add mutex operation */
659         spin_lock_irqsave(&l2_sram_lock, flags);
660
661         addr = _sram_alloc(size, &free_l2_sram_head,
662                         &used_l2_sram_head);
663
664         /* add mutex operation */
665         spin_unlock_irqrestore(&l2_sram_lock, flags);
666
667         pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
668                  (long unsigned int)addr, size);
669
670         return addr;
671 #else
672         return NULL;
673 #endif
674 }
675 EXPORT_SYMBOL(l2_sram_alloc);
676
677 void *l2_sram_zalloc(size_t size)
678 {
679         void *addr = l2_sram_alloc(size);
680
681         if (addr)
682                 memset(addr, 0x00, size);
683
684         return addr;
685 }
686 EXPORT_SYMBOL(l2_sram_zalloc);
687
688 int l2_sram_free(const void *addr)
689 {
690 #if L2_LENGTH != 0
691         unsigned long flags;
692         int ret;
693
694         /* add mutex operation */
695         spin_lock_irqsave(&l2_sram_lock, flags);
696
697         ret = _sram_free(addr, &free_l2_sram_head,
698                         &used_l2_sram_head);
699
700         /* add mutex operation */
701         spin_unlock_irqrestore(&l2_sram_lock, flags);
702
703         return ret;
704 #else
705         return -1;
706 #endif
707 }
708 EXPORT_SYMBOL(l2_sram_free);
709
710 int sram_free_with_lsl(const void *addr)
711 {
712         struct sram_list_struct *lsl, **tmp;
713         struct mm_struct *mm = current->mm;
714
715         for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
716                 if ((*tmp)->addr == addr)
717                         goto found;
718         return -1;
719 found:
720         lsl = *tmp;
721         sram_free(addr);
722         *tmp = lsl->next;
723         kfree(lsl);
724
725         return 0;
726 }
727 EXPORT_SYMBOL(sram_free_with_lsl);
728
729 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
730  * tracked.  These are designed for userspace so that when a process exits,
731  * we can safely reap their resources.
732  */
733 void *sram_alloc_with_lsl(size_t size, unsigned long flags)
734 {
735         void *addr = NULL;
736         struct sram_list_struct *lsl = NULL;
737         struct mm_struct *mm = current->mm;
738
739         lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
740         if (!lsl)
741                 return NULL;
742
743         if (flags & L1_INST_SRAM)
744                 addr = l1_inst_sram_alloc(size);
745
746         if (addr == NULL && (flags & L1_DATA_A_SRAM))
747                 addr = l1_data_A_sram_alloc(size);
748
749         if (addr == NULL && (flags & L1_DATA_B_SRAM))
750                 addr = l1_data_B_sram_alloc(size);
751
752         if (addr == NULL && (flags & L2_SRAM))
753                 addr = l2_sram_alloc(size);
754
755         if (addr == NULL) {
756                 kfree(lsl);
757                 return NULL;
758         }
759         lsl->addr = addr;
760         lsl->length = size;
761         lsl->next = mm->context.sram_list;
762         mm->context.sram_list = lsl;
763         return addr;
764 }
765 EXPORT_SYMBOL(sram_alloc_with_lsl);
766
767 #ifdef CONFIG_PROC_FS
768 /* Once we get a real allocator, we'll throw all of this away.
769  * Until then, we need some sort of visibility into the L1 alloc.
770  */
771 /* Need to keep line of output the same.  Currently, that is 44 bytes
772  * (including newline).
773  */
774 static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
775                 struct sram_piece *pfree_head,
776                 struct sram_piece *pused_head)
777 {
778         struct sram_piece *pslot;
779
780         if (!pfree_head || !pused_head)
781                 return -1;
782
783         *len += sprintf(&buf[*len], "--- SRAM %-14s Size   PID State     \n", desc);
784
785         /* search the relevant memory slot */
786         pslot = pused_head->next;
787
788         while (pslot != NULL) {
789                 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
790                         pslot->paddr, pslot->paddr + pslot->size,
791                         pslot->size, pslot->pid, "ALLOCATED");
792
793                 pslot = pslot->next;
794         }
795
796         pslot = pfree_head->next;
797
798         while (pslot != NULL) {
799                 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
800                         pslot->paddr, pslot->paddr + pslot->size,
801                         pslot->size, pslot->pid, "FREE");
802
803                 pslot = pslot->next;
804         }
805
806         return 0;
807 }
808 static int sram_proc_read(char *buf, char **start, off_t offset, int count,
809                 int *eof, void *data)
810 {
811         int len = 0;
812         unsigned int cpu;
813
814         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
815                 if (_sram_proc_read(buf, &len, count, "Scratchpad",
816                         &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
817                         goto not_done;
818 #if L1_DATA_A_LENGTH != 0
819                 if (_sram_proc_read(buf, &len, count, "L1 Data A",
820                         &per_cpu(free_l1_data_A_sram_head, cpu),
821                         &per_cpu(used_l1_data_A_sram_head, cpu)))
822                         goto not_done;
823 #endif
824 #if L1_DATA_B_LENGTH != 0
825                 if (_sram_proc_read(buf, &len, count, "L1 Data B",
826                         &per_cpu(free_l1_data_B_sram_head, cpu),
827                         &per_cpu(used_l1_data_B_sram_head, cpu)))
828                         goto not_done;
829 #endif
830 #if L1_CODE_LENGTH != 0
831                 if (_sram_proc_read(buf, &len, count, "L1 Instruction",
832                         &per_cpu(free_l1_inst_sram_head, cpu),
833                         &per_cpu(used_l1_inst_sram_head, cpu)))
834                         goto not_done;
835 #endif
836         }
837 #if L2_LENGTH != 0
838         if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
839                 &used_l2_sram_head))
840                 goto not_done;
841 #endif
842         *eof = 1;
843  not_done:
844         return len;
845 }
846
847 static int __init sram_proc_init(void)
848 {
849         struct proc_dir_entry *ptr;
850         ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
851         if (!ptr) {
852                 printk(KERN_WARNING "unable to create /proc/sram\n");
853                 return -1;
854         }
855         ptr->read_proc = sram_proc_read;
856         return 0;
857 }
858 late_initcall(sram_proc_init);
859 #endif