Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[pandora-kernel.git] / arch / blackfin / mm / sram-alloc.c
1 /*
2  * SRAM allocator for Blackfin on-chip memory
3  *
4  * Copyright 2004-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ioport.h>
14 #include <linux/fcntl.h>
15 #include <linux/init.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/spinlock.h>
19 #include <linux/rtc.h>
20 #include <asm/blackfin.h>
21 #include <asm/mem_map.h>
22 #include "blackfin_sram.h"
23
24 /* the data structure for L1 scratchpad and DATA SRAM */
25 struct sram_piece {
26         void *paddr;
27         int size;
28         pid_t pid;
29         struct sram_piece *next;
30 };
31
32 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
33 static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
34 static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
35
36 #if L1_DATA_A_LENGTH != 0
37 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
38 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
39 #endif
40
41 #if L1_DATA_B_LENGTH != 0
42 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
43 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
44 #endif
45
46 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
47 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
48 #endif
49
50 #if L1_CODE_LENGTH != 0
51 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
52 static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
53 static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
54 #endif
55
56 #if L2_LENGTH != 0
57 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
58 static struct sram_piece free_l2_sram_head, used_l2_sram_head;
59 #endif
60
61 static struct kmem_cache *sram_piece_cache;
62
63 /* L1 Scratchpad SRAM initialization function */
64 static void __init l1sram_init(void)
65 {
66         unsigned int cpu;
67         unsigned long reserve;
68
69 #ifdef CONFIG_SMP
70         reserve = 0;
71 #else
72         reserve = sizeof(struct l1_scratch_task_info);
73 #endif
74
75         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
76                 per_cpu(free_l1_ssram_head, cpu).next =
77                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
78                 if (!per_cpu(free_l1_ssram_head, cpu).next) {
79                         printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
80                         return;
81                 }
82
83                 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
84                 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
85                 per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
86                 per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
87
88                 per_cpu(used_l1_ssram_head, cpu).next = NULL;
89
90                 /* mutex initialize */
91                 spin_lock_init(&per_cpu(l1sram_lock, cpu));
92                 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
93                         L1_SCRATCH_LENGTH >> 10);
94         }
95 }
96
97 static void __init l1_data_sram_init(void)
98 {
99 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
100         unsigned int cpu;
101 #endif
102 #if L1_DATA_A_LENGTH != 0
103         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
104                 per_cpu(free_l1_data_A_sram_head, cpu).next =
105                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
106                 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
107                         printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
108                         return;
109                 }
110
111                 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
112                         (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
113                 per_cpu(free_l1_data_A_sram_head, cpu).next->size =
114                         L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
115                 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
116                 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
117
118                 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
119
120                 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
121                         L1_DATA_A_LENGTH >> 10,
122                         per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
123         }
124 #endif
125 #if L1_DATA_B_LENGTH != 0
126         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
127                 per_cpu(free_l1_data_B_sram_head, cpu).next =
128                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
129                 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
130                         printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
131                         return;
132                 }
133
134                 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
135                         (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
136                 per_cpu(free_l1_data_B_sram_head, cpu).next->size =
137                         L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
138                 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
139                 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
140
141                 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
142
143                 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
144                         L1_DATA_B_LENGTH >> 10,
145                         per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
146                 /* mutex initialize */
147         }
148 #endif
149
150 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
151         for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
152                 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
153 #endif
154 }
155
156 static void __init l1_inst_sram_init(void)
157 {
158 #if L1_CODE_LENGTH != 0
159         unsigned int cpu;
160         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
161                 per_cpu(free_l1_inst_sram_head, cpu).next =
162                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
163                 if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
164                         printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
165                         return;
166                 }
167
168                 per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
169                         (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
170                 per_cpu(free_l1_inst_sram_head, cpu).next->size =
171                         L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
172                 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
173                 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
174
175                 per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
176
177                 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
178                         L1_CODE_LENGTH >> 10,
179                         per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
180
181                 /* mutex initialize */
182                 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
183         }
184 #endif
185 }
186
187 static void __init l2_sram_init(void)
188 {
189 #if L2_LENGTH != 0
190         free_l2_sram_head.next =
191                 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
192         if (!free_l2_sram_head.next) {
193                 printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
194                 return;
195         }
196
197         free_l2_sram_head.next->paddr =
198                 (void *)L2_START + (_ebss_l2 - _stext_l2);
199         free_l2_sram_head.next->size =
200                 L2_LENGTH - (_ebss_l2 - _stext_l2);
201         free_l2_sram_head.next->pid = 0;
202         free_l2_sram_head.next->next = NULL;
203
204         used_l2_sram_head.next = NULL;
205
206         printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
207                 L2_LENGTH >> 10,
208                 free_l2_sram_head.next->size >> 10);
209
210         /* mutex initialize */
211         spin_lock_init(&l2_sram_lock);
212 #endif
213 }
214
215 static int __init bfin_sram_init(void)
216 {
217         sram_piece_cache = kmem_cache_create("sram_piece_cache",
218                                 sizeof(struct sram_piece),
219                                 0, SLAB_PANIC, NULL);
220
221         l1sram_init();
222         l1_data_sram_init();
223         l1_inst_sram_init();
224         l2_sram_init();
225
226         return 0;
227 }
228 pure_initcall(bfin_sram_init);
229
230 /* SRAM allocate function */
231 static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
232                 struct sram_piece *pused_head)
233 {
234         struct sram_piece *pslot, *plast, *pavail;
235
236         if (size <= 0 || !pfree_head || !pused_head)
237                 return NULL;
238
239         /* Align the size */
240         size = (size + 3) & ~3;
241
242         pslot = pfree_head->next;
243         plast = pfree_head;
244
245         /* search an available piece slot */
246         while (pslot != NULL && size > pslot->size) {
247                 plast = pslot;
248                 pslot = pslot->next;
249         }
250
251         if (!pslot)
252                 return NULL;
253
254         if (pslot->size == size) {
255                 plast->next = pslot->next;
256                 pavail = pslot;
257         } else {
258                 pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
259
260                 if (!pavail)
261                         return NULL;
262
263                 pavail->paddr = pslot->paddr;
264                 pavail->size = size;
265                 pslot->paddr += size;
266                 pslot->size -= size;
267         }
268
269         pavail->pid = current->pid;
270
271         pslot = pused_head->next;
272         plast = pused_head;
273
274         /* insert new piece into used piece list !!! */
275         while (pslot != NULL && pavail->paddr < pslot->paddr) {
276                 plast = pslot;
277                 pslot = pslot->next;
278         }
279
280         pavail->next = pslot;
281         plast->next = pavail;
282
283         return pavail->paddr;
284 }
285
286 /* Allocate the largest available block.  */
287 static void *_sram_alloc_max(struct sram_piece *pfree_head,
288                                 struct sram_piece *pused_head,
289                                 unsigned long *psize)
290 {
291         struct sram_piece *pslot, *pmax;
292
293         if (!pfree_head || !pused_head)
294                 return NULL;
295
296         pmax = pslot = pfree_head->next;
297
298         /* search an available piece slot */
299         while (pslot != NULL) {
300                 if (pslot->size > pmax->size)
301                         pmax = pslot;
302                 pslot = pslot->next;
303         }
304
305         if (!pmax)
306                 return NULL;
307
308         *psize = pmax->size;
309
310         return _sram_alloc(*psize, pfree_head, pused_head);
311 }
312
313 /* SRAM free function */
314 static int _sram_free(const void *addr,
315                         struct sram_piece *pfree_head,
316                         struct sram_piece *pused_head)
317 {
318         struct sram_piece *pslot, *plast, *pavail;
319
320         if (!pfree_head || !pused_head)
321                 return -1;
322
323         /* search the relevant memory slot */
324         pslot = pused_head->next;
325         plast = pused_head;
326
327         /* search an available piece slot */
328         while (pslot != NULL && pslot->paddr != addr) {
329                 plast = pslot;
330                 pslot = pslot->next;
331         }
332
333         if (!pslot)
334                 return -1;
335
336         plast->next = pslot->next;
337         pavail = pslot;
338         pavail->pid = 0;
339
340         /* insert free pieces back to the free list */
341         pslot = pfree_head->next;
342         plast = pfree_head;
343
344         while (pslot != NULL && addr > pslot->paddr) {
345                 plast = pslot;
346                 pslot = pslot->next;
347         }
348
349         if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
350                 plast->size += pavail->size;
351                 kmem_cache_free(sram_piece_cache, pavail);
352         } else {
353                 pavail->next = plast->next;
354                 plast->next = pavail;
355                 plast = pavail;
356         }
357
358         if (pslot && plast->paddr + plast->size == pslot->paddr) {
359                 plast->size += pslot->size;
360                 plast->next = pslot->next;
361                 kmem_cache_free(sram_piece_cache, pslot);
362         }
363
364         return 0;
365 }
366
367 int sram_free(const void *addr)
368 {
369
370 #if L1_CODE_LENGTH != 0
371         if (addr >= (void *)get_l1_code_start()
372                  && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
373                 return l1_inst_sram_free(addr);
374         else
375 #endif
376 #if L1_DATA_A_LENGTH != 0
377         if (addr >= (void *)get_l1_data_a_start()
378                  && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
379                 return l1_data_A_sram_free(addr);
380         else
381 #endif
382 #if L1_DATA_B_LENGTH != 0
383         if (addr >= (void *)get_l1_data_b_start()
384                  && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
385                 return l1_data_B_sram_free(addr);
386         else
387 #endif
388 #if L2_LENGTH != 0
389         if (addr >= (void *)L2_START
390                  && addr < (void *)(L2_START + L2_LENGTH))
391                 return l2_sram_free(addr);
392         else
393 #endif
394                 return -1;
395 }
396 EXPORT_SYMBOL(sram_free);
397
398 void *l1_data_A_sram_alloc(size_t size)
399 {
400 #if L1_DATA_A_LENGTH != 0
401         unsigned long flags;
402         void *addr;
403         unsigned int cpu;
404
405         cpu = smp_processor_id();
406         /* add mutex operation */
407         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
408
409         addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
410                         &per_cpu(used_l1_data_A_sram_head, cpu));
411
412         /* add mutex operation */
413         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
414
415         pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
416                  (long unsigned int)addr, size);
417
418         return addr;
419 #else
420         return NULL;
421 #endif
422 }
423 EXPORT_SYMBOL(l1_data_A_sram_alloc);
424
425 int l1_data_A_sram_free(const void *addr)
426 {
427 #if L1_DATA_A_LENGTH != 0
428         unsigned long flags;
429         int ret;
430         unsigned int cpu;
431
432         cpu = smp_processor_id();
433         /* add mutex operation */
434         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
435
436         ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
437                         &per_cpu(used_l1_data_A_sram_head, cpu));
438
439         /* add mutex operation */
440         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
441
442         return ret;
443 #else
444         return -1;
445 #endif
446 }
447 EXPORT_SYMBOL(l1_data_A_sram_free);
448
449 void *l1_data_B_sram_alloc(size_t size)
450 {
451 #if L1_DATA_B_LENGTH != 0
452         unsigned long flags;
453         void *addr;
454         unsigned int cpu;
455
456         cpu = smp_processor_id();
457         /* add mutex operation */
458         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
459
460         addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
461                         &per_cpu(used_l1_data_B_sram_head, cpu));
462
463         /* add mutex operation */
464         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
465
466         pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
467                  (long unsigned int)addr, size);
468
469         return addr;
470 #else
471         return NULL;
472 #endif
473 }
474 EXPORT_SYMBOL(l1_data_B_sram_alloc);
475
476 int l1_data_B_sram_free(const void *addr)
477 {
478 #if L1_DATA_B_LENGTH != 0
479         unsigned long flags;
480         int ret;
481         unsigned int cpu;
482
483         cpu = smp_processor_id();
484         /* add mutex operation */
485         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
486
487         ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
488                         &per_cpu(used_l1_data_B_sram_head, cpu));
489
490         /* add mutex operation */
491         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
492
493         return ret;
494 #else
495         return -1;
496 #endif
497 }
498 EXPORT_SYMBOL(l1_data_B_sram_free);
499
500 void *l1_data_sram_alloc(size_t size)
501 {
502         void *addr = l1_data_A_sram_alloc(size);
503
504         if (!addr)
505                 addr = l1_data_B_sram_alloc(size);
506
507         return addr;
508 }
509 EXPORT_SYMBOL(l1_data_sram_alloc);
510
511 void *l1_data_sram_zalloc(size_t size)
512 {
513         void *addr = l1_data_sram_alloc(size);
514
515         if (addr)
516                 memset(addr, 0x00, size);
517
518         return addr;
519 }
520 EXPORT_SYMBOL(l1_data_sram_zalloc);
521
522 int l1_data_sram_free(const void *addr)
523 {
524         int ret;
525         ret = l1_data_A_sram_free(addr);
526         if (ret == -1)
527                 ret = l1_data_B_sram_free(addr);
528         return ret;
529 }
530 EXPORT_SYMBOL(l1_data_sram_free);
531
532 void *l1_inst_sram_alloc(size_t size)
533 {
534 #if L1_CODE_LENGTH != 0
535         unsigned long flags;
536         void *addr;
537         unsigned int cpu;
538
539         cpu = smp_processor_id();
540         /* add mutex operation */
541         spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
542
543         addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
544                         &per_cpu(used_l1_inst_sram_head, cpu));
545
546         /* add mutex operation */
547         spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
548
549         pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
550                  (long unsigned int)addr, size);
551
552         return addr;
553 #else
554         return NULL;
555 #endif
556 }
557 EXPORT_SYMBOL(l1_inst_sram_alloc);
558
559 int l1_inst_sram_free(const void *addr)
560 {
561 #if L1_CODE_LENGTH != 0
562         unsigned long flags;
563         int ret;
564         unsigned int cpu;
565
566         cpu = smp_processor_id();
567         /* add mutex operation */
568         spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
569
570         ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
571                         &per_cpu(used_l1_inst_sram_head, cpu));
572
573         /* add mutex operation */
574         spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
575
576         return ret;
577 #else
578         return -1;
579 #endif
580 }
581 EXPORT_SYMBOL(l1_inst_sram_free);
582
583 /* L1 Scratchpad memory allocate function */
584 void *l1sram_alloc(size_t size)
585 {
586         unsigned long flags;
587         void *addr;
588         unsigned int cpu;
589
590         cpu = smp_processor_id();
591         /* add mutex operation */
592         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
593
594         addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
595                         &per_cpu(used_l1_ssram_head, cpu));
596
597         /* add mutex operation */
598         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
599
600         return addr;
601 }
602
603 /* L1 Scratchpad memory allocate function */
604 void *l1sram_alloc_max(size_t *psize)
605 {
606         unsigned long flags;
607         void *addr;
608         unsigned int cpu;
609
610         cpu = smp_processor_id();
611         /* add mutex operation */
612         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
613
614         addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
615                         &per_cpu(used_l1_ssram_head, cpu), psize);
616
617         /* add mutex operation */
618         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
619
620         return addr;
621 }
622
623 /* L1 Scratchpad memory free function */
624 int l1sram_free(const void *addr)
625 {
626         unsigned long flags;
627         int ret;
628         unsigned int cpu;
629
630         cpu = smp_processor_id();
631         /* add mutex operation */
632         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
633
634         ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
635                         &per_cpu(used_l1_ssram_head, cpu));
636
637         /* add mutex operation */
638         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
639
640         return ret;
641 }
642
643 void *l2_sram_alloc(size_t size)
644 {
645 #if L2_LENGTH != 0
646         unsigned long flags;
647         void *addr;
648
649         /* add mutex operation */
650         spin_lock_irqsave(&l2_sram_lock, flags);
651
652         addr = _sram_alloc(size, &free_l2_sram_head,
653                         &used_l2_sram_head);
654
655         /* add mutex operation */
656         spin_unlock_irqrestore(&l2_sram_lock, flags);
657
658         pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
659                  (long unsigned int)addr, size);
660
661         return addr;
662 #else
663         return NULL;
664 #endif
665 }
666 EXPORT_SYMBOL(l2_sram_alloc);
667
668 void *l2_sram_zalloc(size_t size)
669 {
670         void *addr = l2_sram_alloc(size);
671
672         if (addr)
673                 memset(addr, 0x00, size);
674
675         return addr;
676 }
677 EXPORT_SYMBOL(l2_sram_zalloc);
678
679 int l2_sram_free(const void *addr)
680 {
681 #if L2_LENGTH != 0
682         unsigned long flags;
683         int ret;
684
685         /* add mutex operation */
686         spin_lock_irqsave(&l2_sram_lock, flags);
687
688         ret = _sram_free(addr, &free_l2_sram_head,
689                         &used_l2_sram_head);
690
691         /* add mutex operation */
692         spin_unlock_irqrestore(&l2_sram_lock, flags);
693
694         return ret;
695 #else
696         return -1;
697 #endif
698 }
699 EXPORT_SYMBOL(l2_sram_free);
700
701 int sram_free_with_lsl(const void *addr)
702 {
703         struct sram_list_struct *lsl, **tmp;
704         struct mm_struct *mm = current->mm;
705
706         for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
707                 if ((*tmp)->addr == addr)
708                         goto found;
709         return -1;
710 found:
711         lsl = *tmp;
712         sram_free(addr);
713         *tmp = lsl->next;
714         kfree(lsl);
715
716         return 0;
717 }
718 EXPORT_SYMBOL(sram_free_with_lsl);
719
720 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
721  * tracked.  These are designed for userspace so that when a process exits,
722  * we can safely reap their resources.
723  */
724 void *sram_alloc_with_lsl(size_t size, unsigned long flags)
725 {
726         void *addr = NULL;
727         struct sram_list_struct *lsl = NULL;
728         struct mm_struct *mm = current->mm;
729
730         lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
731         if (!lsl)
732                 return NULL;
733
734         if (flags & L1_INST_SRAM)
735                 addr = l1_inst_sram_alloc(size);
736
737         if (addr == NULL && (flags & L1_DATA_A_SRAM))
738                 addr = l1_data_A_sram_alloc(size);
739
740         if (addr == NULL && (flags & L1_DATA_B_SRAM))
741                 addr = l1_data_B_sram_alloc(size);
742
743         if (addr == NULL && (flags & L2_SRAM))
744                 addr = l2_sram_alloc(size);
745
746         if (addr == NULL) {
747                 kfree(lsl);
748                 return NULL;
749         }
750         lsl->addr = addr;
751         lsl->length = size;
752         lsl->next = mm->context.sram_list;
753         mm->context.sram_list = lsl;
754         return addr;
755 }
756 EXPORT_SYMBOL(sram_alloc_with_lsl);
757
758 #ifdef CONFIG_PROC_FS
759 /* Once we get a real allocator, we'll throw all of this away.
760  * Until then, we need some sort of visibility into the L1 alloc.
761  */
762 /* Need to keep line of output the same.  Currently, that is 44 bytes
763  * (including newline).
764  */
765 static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
766                 struct sram_piece *pfree_head,
767                 struct sram_piece *pused_head)
768 {
769         struct sram_piece *pslot;
770
771         if (!pfree_head || !pused_head)
772                 return -1;
773
774         *len += sprintf(&buf[*len], "--- SRAM %-14s Size   PID State     \n", desc);
775
776         /* search the relevant memory slot */
777         pslot = pused_head->next;
778
779         while (pslot != NULL) {
780                 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
781                         pslot->paddr, pslot->paddr + pslot->size,
782                         pslot->size, pslot->pid, "ALLOCATED");
783
784                 pslot = pslot->next;
785         }
786
787         pslot = pfree_head->next;
788
789         while (pslot != NULL) {
790                 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
791                         pslot->paddr, pslot->paddr + pslot->size,
792                         pslot->size, pslot->pid, "FREE");
793
794                 pslot = pslot->next;
795         }
796
797         return 0;
798 }
799 static int sram_proc_read(char *buf, char **start, off_t offset, int count,
800                 int *eof, void *data)
801 {
802         int len = 0;
803         unsigned int cpu;
804
805         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
806                 if (_sram_proc_read(buf, &len, count, "Scratchpad",
807                         &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
808                         goto not_done;
809 #if L1_DATA_A_LENGTH != 0
810                 if (_sram_proc_read(buf, &len, count, "L1 Data A",
811                         &per_cpu(free_l1_data_A_sram_head, cpu),
812                         &per_cpu(used_l1_data_A_sram_head, cpu)))
813                         goto not_done;
814 #endif
815 #if L1_DATA_B_LENGTH != 0
816                 if (_sram_proc_read(buf, &len, count, "L1 Data B",
817                         &per_cpu(free_l1_data_B_sram_head, cpu),
818                         &per_cpu(used_l1_data_B_sram_head, cpu)))
819                         goto not_done;
820 #endif
821 #if L1_CODE_LENGTH != 0
822                 if (_sram_proc_read(buf, &len, count, "L1 Instruction",
823                         &per_cpu(free_l1_inst_sram_head, cpu),
824                         &per_cpu(used_l1_inst_sram_head, cpu)))
825                         goto not_done;
826 #endif
827         }
828 #if L2_LENGTH != 0
829         if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
830                 &used_l2_sram_head))
831                 goto not_done;
832 #endif
833         *eof = 1;
834  not_done:
835         return len;
836 }
837
838 static int __init sram_proc_init(void)
839 {
840         struct proc_dir_entry *ptr;
841         ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
842         if (!ptr) {
843                 printk(KERN_WARNING "unable to create /proc/sram\n");
844                 return -1;
845         }
846         ptr->read_proc = sram_proc_read;
847         return 0;
848 }
849 late_initcall(sram_proc_init);
850 #endif