Merge branch 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[pandora-kernel.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48
49 #define MM_UNUSED_TARGET 4
50
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 {
53         struct drm_mm_node *child;
54
55         if (atomic)
56                 child = kzalloc(sizeof(*child), GFP_ATOMIC);
57         else
58                 child = kzalloc(sizeof(*child), GFP_KERNEL);
59
60         if (unlikely(child == NULL)) {
61                 spin_lock(&mm->unused_lock);
62                 if (list_empty(&mm->unused_nodes))
63                         child = NULL;
64                 else {
65                         child =
66                             list_entry(mm->unused_nodes.next,
67                                        struct drm_mm_node, free_stack);
68                         list_del(&child->free_stack);
69                         --mm->num_unused;
70                 }
71                 spin_unlock(&mm->unused_lock);
72         }
73         return child;
74 }
75
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77  * drm_mm:      memory manager struct we are pre-allocating for
78  *
79  * Returns 0 on success or -ENOMEM if allocation fails.
80  */
81 int drm_mm_pre_get(struct drm_mm *mm)
82 {
83         struct drm_mm_node *node;
84
85         spin_lock(&mm->unused_lock);
86         while (mm->num_unused < MM_UNUSED_TARGET) {
87                 spin_unlock(&mm->unused_lock);
88                 node = kzalloc(sizeof(*node), GFP_KERNEL);
89                 spin_lock(&mm->unused_lock);
90
91                 if (unlikely(node == NULL)) {
92                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93                         spin_unlock(&mm->unused_lock);
94                         return ret;
95                 }
96                 ++mm->num_unused;
97                 list_add_tail(&node->free_stack, &mm->unused_nodes);
98         }
99         spin_unlock(&mm->unused_lock);
100         return 0;
101 }
102 EXPORT_SYMBOL(drm_mm_pre_get);
103
104 static int drm_mm_create_tail_node(struct drm_mm *mm,
105                                    unsigned long start,
106                                    unsigned long size, int atomic)
107 {
108         struct drm_mm_node *child;
109
110         child = drm_mm_kmalloc(mm, atomic);
111         if (unlikely(child == NULL))
112                 return -ENOMEM;
113
114         child->free = 1;
115         child->size = size;
116         child->start = start;
117         child->mm = mm;
118
119         list_add_tail(&child->node_list, &mm->node_list);
120         list_add_tail(&child->free_stack, &mm->free_stack);
121
122         return 0;
123 }
124
125 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
126                                                  unsigned long size,
127                                                  int atomic)
128 {
129         struct drm_mm_node *child;
130
131         child = drm_mm_kmalloc(parent->mm, atomic);
132         if (unlikely(child == NULL))
133                 return NULL;
134
135         INIT_LIST_HEAD(&child->free_stack);
136
137         child->size = size;
138         child->start = parent->start;
139         child->mm = parent->mm;
140
141         list_add_tail(&child->node_list, &parent->node_list);
142         INIT_LIST_HEAD(&child->free_stack);
143
144         parent->size -= size;
145         parent->start += size;
146         return child;
147 }
148
149
150 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
151                                              unsigned long size,
152                                              unsigned alignment,
153                                              int atomic)
154 {
155
156         struct drm_mm_node *align_splitoff = NULL;
157         unsigned tmp = 0;
158
159         if (alignment)
160                 tmp = node->start % alignment;
161
162         if (tmp) {
163                 align_splitoff =
164                     drm_mm_split_at_start(node, alignment - tmp, atomic);
165                 if (unlikely(align_splitoff == NULL))
166                         return NULL;
167         }
168
169         if (node->size == size) {
170                 list_del_init(&node->free_stack);
171                 node->free = 0;
172         } else {
173                 node = drm_mm_split_at_start(node, size, atomic);
174         }
175
176         if (align_splitoff)
177                 drm_mm_put_block(align_splitoff);
178
179         return node;
180 }
181 EXPORT_SYMBOL(drm_mm_get_block_generic);
182
183 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
184                                                 unsigned long size,
185                                                 unsigned alignment,
186                                                 unsigned long start,
187                                                 unsigned long end,
188                                                 int atomic)
189 {
190         struct drm_mm_node *align_splitoff = NULL;
191         unsigned tmp = 0;
192         unsigned wasted = 0;
193
194         if (node->start < start)
195                 wasted += start - node->start;
196         if (alignment)
197                 tmp = ((node->start + wasted) % alignment);
198
199         if (tmp)
200                 wasted += alignment - tmp;
201         if (wasted) {
202                 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
203                 if (unlikely(align_splitoff == NULL))
204                         return NULL;
205         }
206
207         if (node->size == size) {
208                 list_del_init(&node->free_stack);
209                 node->free = 0;
210         } else {
211                 node = drm_mm_split_at_start(node, size, atomic);
212         }
213
214         if (align_splitoff)
215                 drm_mm_put_block(align_splitoff);
216
217         return node;
218 }
219 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
220
221 /*
222  * Put a block. Merge with the previous and / or next block if they are free.
223  * Otherwise add to the free stack.
224  */
225
226 void drm_mm_put_block(struct drm_mm_node *cur)
227 {
228
229         struct drm_mm *mm = cur->mm;
230         struct list_head *cur_head = &cur->node_list;
231         struct list_head *root_head = &mm->node_list;
232         struct drm_mm_node *prev_node = NULL;
233         struct drm_mm_node *next_node;
234
235         int merged = 0;
236
237         BUG_ON(cur->scanned_block || cur->scanned_prev_free
238                                   || cur->scanned_next_free);
239
240         if (cur_head->prev != root_head) {
241                 prev_node =
242                     list_entry(cur_head->prev, struct drm_mm_node, node_list);
243                 if (prev_node->free) {
244                         prev_node->size += cur->size;
245                         merged = 1;
246                 }
247         }
248         if (cur_head->next != root_head) {
249                 next_node =
250                     list_entry(cur_head->next, struct drm_mm_node, node_list);
251                 if (next_node->free) {
252                         if (merged) {
253                                 prev_node->size += next_node->size;
254                                 list_del(&next_node->node_list);
255                                 list_del(&next_node->free_stack);
256                                 spin_lock(&mm->unused_lock);
257                                 if (mm->num_unused < MM_UNUSED_TARGET) {
258                                         list_add(&next_node->free_stack,
259                                                  &mm->unused_nodes);
260                                         ++mm->num_unused;
261                                 } else
262                                         kfree(next_node);
263                                 spin_unlock(&mm->unused_lock);
264                         } else {
265                                 next_node->size += cur->size;
266                                 next_node->start = cur->start;
267                                 merged = 1;
268                         }
269                 }
270         }
271         if (!merged) {
272                 cur->free = 1;
273                 list_add(&cur->free_stack, &mm->free_stack);
274         } else {
275                 list_del(&cur->node_list);
276                 spin_lock(&mm->unused_lock);
277                 if (mm->num_unused < MM_UNUSED_TARGET) {
278                         list_add(&cur->free_stack, &mm->unused_nodes);
279                         ++mm->num_unused;
280                 } else
281                         kfree(cur);
282                 spin_unlock(&mm->unused_lock);
283         }
284 }
285
286 EXPORT_SYMBOL(drm_mm_put_block);
287
288 static int check_free_hole(unsigned long start, unsigned long end,
289                            unsigned long size, unsigned alignment)
290 {
291         unsigned wasted = 0;
292
293         if (end - start < size)
294                 return 0;
295
296         if (alignment) {
297                 unsigned tmp = start % alignment;
298                 if (tmp)
299                         wasted = alignment - tmp;
300         }
301
302         if (end >= start + size + wasted) {
303                 return 1;
304         }
305
306         return 0;
307 }
308
309 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
310                                        unsigned long size,
311                                        unsigned alignment, int best_match)
312 {
313         struct drm_mm_node *entry;
314         struct drm_mm_node *best;
315         unsigned long best_size;
316
317         BUG_ON(mm->scanned_blocks);
318
319         best = NULL;
320         best_size = ~0UL;
321
322         list_for_each_entry(entry, &mm->free_stack, free_stack) {
323                 if (!check_free_hole(entry->start, entry->start + entry->size,
324                                      size, alignment))
325                         continue;
326
327                 if (!best_match)
328                         return entry;
329
330                 if (entry->size < best_size) {
331                         best = entry;
332                         best_size = entry->size;
333                 }
334         }
335
336         return best;
337 }
338 EXPORT_SYMBOL(drm_mm_search_free);
339
340 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
341                                                 unsigned long size,
342                                                 unsigned alignment,
343                                                 unsigned long start,
344                                                 unsigned long end,
345                                                 int best_match)
346 {
347         struct drm_mm_node *entry;
348         struct drm_mm_node *best;
349         unsigned long best_size;
350
351         BUG_ON(mm->scanned_blocks);
352
353         best = NULL;
354         best_size = ~0UL;
355
356         list_for_each_entry(entry, &mm->free_stack, free_stack) {
357                 unsigned long adj_start = entry->start < start ?
358                         start : entry->start;
359                 unsigned long adj_end = entry->start + entry->size > end ?
360                         end : entry->start + entry->size;
361
362                 if (!check_free_hole(adj_start, adj_end, size, alignment))
363                         continue;
364
365                 if (!best_match)
366                         return entry;
367
368                 if (entry->size < best_size) {
369                         best = entry;
370                         best_size = entry->size;
371                 }
372         }
373
374         return best;
375 }
376 EXPORT_SYMBOL(drm_mm_search_free_in_range);
377
378 /**
379  * Initializa lru scanning.
380  *
381  * This simply sets up the scanning routines with the parameters for the desired
382  * hole.
383  *
384  * Warning: As long as the scan list is non-empty, no other operations than
385  * adding/removing nodes to/from the scan list are allowed.
386  */
387 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
388                       unsigned alignment)
389 {
390         mm->scan_alignment = alignment;
391         mm->scan_size = size;
392         mm->scanned_blocks = 0;
393         mm->scan_hit_start = 0;
394         mm->scan_hit_size = 0;
395         mm->scan_check_range = 0;
396 }
397 EXPORT_SYMBOL(drm_mm_init_scan);
398
399 /**
400  * Initializa lru scanning.
401  *
402  * This simply sets up the scanning routines with the parameters for the desired
403  * hole. This version is for range-restricted scans.
404  *
405  * Warning: As long as the scan list is non-empty, no other operations than
406  * adding/removing nodes to/from the scan list are allowed.
407  */
408 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
409                                  unsigned alignment,
410                                  unsigned long start,
411                                  unsigned long end)
412 {
413         mm->scan_alignment = alignment;
414         mm->scan_size = size;
415         mm->scanned_blocks = 0;
416         mm->scan_hit_start = 0;
417         mm->scan_hit_size = 0;
418         mm->scan_start = start;
419         mm->scan_end = end;
420         mm->scan_check_range = 1;
421 }
422 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
423
424 /**
425  * Add a node to the scan list that might be freed to make space for the desired
426  * hole.
427  *
428  * Returns non-zero, if a hole has been found, zero otherwise.
429  */
430 int drm_mm_scan_add_block(struct drm_mm_node *node)
431 {
432         struct drm_mm *mm = node->mm;
433         struct list_head *prev_free, *next_free;
434         struct drm_mm_node *prev_node, *next_node;
435         unsigned long adj_start;
436         unsigned long adj_end;
437
438         mm->scanned_blocks++;
439
440         prev_free = next_free = NULL;
441
442         BUG_ON(node->free);
443         node->scanned_block = 1;
444         node->free = 1;
445
446         if (node->node_list.prev != &mm->node_list) {
447                 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
448                                        node_list);
449
450                 if (prev_node->free) {
451                         list_del(&prev_node->node_list);
452
453                         node->start = prev_node->start;
454                         node->size += prev_node->size;
455
456                         prev_node->scanned_prev_free = 1;
457
458                         prev_free = &prev_node->free_stack;
459                 }
460         }
461
462         if (node->node_list.next != &mm->node_list) {
463                 next_node = list_entry(node->node_list.next, struct drm_mm_node,
464                                        node_list);
465
466                 if (next_node->free) {
467                         list_del(&next_node->node_list);
468
469                         node->size += next_node->size;
470
471                         next_node->scanned_next_free = 1;
472
473                         next_free = &next_node->free_stack;
474                 }
475         }
476
477         /* The free_stack list is not used for allocated objects, so these two
478          * pointers can be abused (as long as no allocations in this memory
479          * manager happens). */
480         node->free_stack.prev = prev_free;
481         node->free_stack.next = next_free;
482
483         if (mm->scan_check_range) {
484                 adj_start = node->start < mm->scan_start ?
485                         mm->scan_start : node->start;
486                 adj_end = node->start + node->size > mm->scan_end ?
487                         mm->scan_end : node->start + node->size;
488         } else {
489                 adj_start = node->start;
490                 adj_end = node->start + node->size;
491         }
492
493         if (check_free_hole(adj_start , adj_end,
494                             mm->scan_size, mm->scan_alignment)) {
495                 mm->scan_hit_start = node->start;
496                 mm->scan_hit_size = node->size;
497
498                 return 1;
499         }
500
501         return 0;
502 }
503 EXPORT_SYMBOL(drm_mm_scan_add_block);
504
505 /**
506  * Remove a node from the scan list.
507  *
508  * Nodes _must_ be removed in the exact same order from the scan list as they
509  * have been added, otherwise the internal state of the memory manager will be
510  * corrupted.
511  *
512  * When the scan list is empty, the selected memory nodes can be freed. An
513  * immediatly following drm_mm_search_free with best_match = 0 will then return
514  * the just freed block (because its at the top of the free_stack list).
515  *
516  * Returns one if this block should be evicted, zero otherwise. Will always
517  * return zero when no hole has been found.
518  */
519 int drm_mm_scan_remove_block(struct drm_mm_node *node)
520 {
521         struct drm_mm *mm = node->mm;
522         struct drm_mm_node *prev_node, *next_node;
523
524         mm->scanned_blocks--;
525
526         BUG_ON(!node->scanned_block);
527         node->scanned_block = 0;
528         node->free = 0;
529
530         prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
531                                free_stack);
532         next_node = list_entry(node->free_stack.next, struct drm_mm_node,
533                                free_stack);
534
535         if (prev_node) {
536                 BUG_ON(!prev_node->scanned_prev_free);
537                 prev_node->scanned_prev_free = 0;
538
539                 list_add_tail(&prev_node->node_list, &node->node_list);
540
541                 node->start = prev_node->start + prev_node->size;
542                 node->size -= prev_node->size;
543         }
544
545         if (next_node) {
546                 BUG_ON(!next_node->scanned_next_free);
547                 next_node->scanned_next_free = 0;
548
549                 list_add(&next_node->node_list, &node->node_list);
550
551                 node->size -= next_node->size;
552         }
553
554         INIT_LIST_HEAD(&node->free_stack);
555
556         /* Only need to check for containement because start&size for the
557          * complete resulting free block (not just the desired part) is
558          * stored. */
559         if (node->start >= mm->scan_hit_start &&
560             node->start + node->size
561                         <= mm->scan_hit_start + mm->scan_hit_size) {
562                 return 1;
563         }
564
565         return 0;
566 }
567 EXPORT_SYMBOL(drm_mm_scan_remove_block);
568
569 int drm_mm_clean(struct drm_mm * mm)
570 {
571         struct list_head *head = &mm->node_list;
572
573         return (head->next->next == head);
574 }
575 EXPORT_SYMBOL(drm_mm_clean);
576
577 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
578 {
579         INIT_LIST_HEAD(&mm->node_list);
580         INIT_LIST_HEAD(&mm->free_stack);
581         INIT_LIST_HEAD(&mm->unused_nodes);
582         mm->num_unused = 0;
583         mm->scanned_blocks = 0;
584         spin_lock_init(&mm->unused_lock);
585
586         return drm_mm_create_tail_node(mm, start, size, 0);
587 }
588 EXPORT_SYMBOL(drm_mm_init);
589
590 void drm_mm_takedown(struct drm_mm * mm)
591 {
592         struct list_head *bnode = mm->free_stack.next;
593         struct drm_mm_node *entry;
594         struct drm_mm_node *next;
595
596         entry = list_entry(bnode, struct drm_mm_node, free_stack);
597
598         if (entry->node_list.next != &mm->node_list ||
599             entry->free_stack.next != &mm->free_stack) {
600                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
601                 return;
602         }
603
604         list_del(&entry->free_stack);
605         list_del(&entry->node_list);
606         kfree(entry);
607
608         spin_lock(&mm->unused_lock);
609         list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
610                 list_del(&entry->free_stack);
611                 kfree(entry);
612                 --mm->num_unused;
613         }
614         spin_unlock(&mm->unused_lock);
615
616         BUG_ON(mm->num_unused != 0);
617 }
618 EXPORT_SYMBOL(drm_mm_takedown);
619
620 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
621 {
622         struct drm_mm_node *entry;
623         int total_used = 0, total_free = 0, total = 0;
624
625         list_for_each_entry(entry, &mm->node_list, node_list) {
626                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
627                         prefix, entry->start, entry->start + entry->size,
628                         entry->size, entry->free ? "free" : "used");
629                 total += entry->size;
630                 if (entry->free)
631                         total_free += entry->size;
632                 else
633                         total_used += entry->size;
634         }
635         printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
636                 total_used, total_free);
637 }
638 EXPORT_SYMBOL(drm_mm_debug_table);
639
640 #if defined(CONFIG_DEBUG_FS)
641 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
642 {
643         struct drm_mm_node *entry;
644         int total_used = 0, total_free = 0, total = 0;
645
646         list_for_each_entry(entry, &mm->node_list, node_list) {
647                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
648                 total += entry->size;
649                 if (entry->free)
650                         total_free += entry->size;
651                 else
652                         total_used += entry->size;
653         }
654         seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
655         return 0;
656 }
657 EXPORT_SYMBOL(drm_mm_dump_table);
658 #endif