Merge branch 'lguest' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux...
[pandora-kernel.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48
49 #define MM_UNUSED_TARGET 4
50
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 {
53         struct drm_mm_node *child;
54
55         if (atomic)
56                 child = kzalloc(sizeof(*child), GFP_ATOMIC);
57         else
58                 child = kzalloc(sizeof(*child), GFP_KERNEL);
59
60         if (unlikely(child == NULL)) {
61                 spin_lock(&mm->unused_lock);
62                 if (list_empty(&mm->unused_nodes))
63                         child = NULL;
64                 else {
65                         child =
66                             list_entry(mm->unused_nodes.next,
67                                        struct drm_mm_node, free_stack);
68                         list_del(&child->free_stack);
69                         --mm->num_unused;
70                 }
71                 spin_unlock(&mm->unused_lock);
72         }
73         return child;
74 }
75
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77  * drm_mm:      memory manager struct we are pre-allocating for
78  *
79  * Returns 0 on success or -ENOMEM if allocation fails.
80  */
81 int drm_mm_pre_get(struct drm_mm *mm)
82 {
83         struct drm_mm_node *node;
84
85         spin_lock(&mm->unused_lock);
86         while (mm->num_unused < MM_UNUSED_TARGET) {
87                 spin_unlock(&mm->unused_lock);
88                 node = kzalloc(sizeof(*node), GFP_KERNEL);
89                 spin_lock(&mm->unused_lock);
90
91                 if (unlikely(node == NULL)) {
92                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93                         spin_unlock(&mm->unused_lock);
94                         return ret;
95                 }
96                 ++mm->num_unused;
97                 list_add_tail(&node->free_stack, &mm->unused_nodes);
98         }
99         spin_unlock(&mm->unused_lock);
100         return 0;
101 }
102 EXPORT_SYMBOL(drm_mm_pre_get);
103
104 static int drm_mm_create_tail_node(struct drm_mm *mm,
105                                    unsigned long start,
106                                    unsigned long size, int atomic)
107 {
108         struct drm_mm_node *child;
109
110         child = drm_mm_kmalloc(mm, atomic);
111         if (unlikely(child == NULL))
112                 return -ENOMEM;
113
114         child->free = 1;
115         child->size = size;
116         child->start = start;
117         child->mm = mm;
118
119         list_add_tail(&child->node_list, &mm->node_list);
120         list_add_tail(&child->free_stack, &mm->free_stack);
121
122         return 0;
123 }
124
125 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
126                                                  unsigned long size,
127                                                  int atomic)
128 {
129         struct drm_mm_node *child;
130
131         child = drm_mm_kmalloc(parent->mm, atomic);
132         if (unlikely(child == NULL))
133                 return NULL;
134
135         INIT_LIST_HEAD(&child->free_stack);
136
137         child->size = size;
138         child->start = parent->start;
139         child->mm = parent->mm;
140
141         list_add_tail(&child->node_list, &parent->node_list);
142         INIT_LIST_HEAD(&child->free_stack);
143
144         parent->size -= size;
145         parent->start += size;
146         return child;
147 }
148
149
150 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
151                                              unsigned long size,
152                                              unsigned alignment,
153                                              int atomic)
154 {
155
156         struct drm_mm_node *align_splitoff = NULL;
157         unsigned tmp = 0;
158
159         if (alignment)
160                 tmp = node->start % alignment;
161
162         if (tmp) {
163                 align_splitoff =
164                     drm_mm_split_at_start(node, alignment - tmp, atomic);
165                 if (unlikely(align_splitoff == NULL))
166                         return NULL;
167         }
168
169         if (node->size == size) {
170                 list_del_init(&node->free_stack);
171                 node->free = 0;
172         } else {
173                 node = drm_mm_split_at_start(node, size, atomic);
174         }
175
176         if (align_splitoff)
177                 drm_mm_put_block(align_splitoff);
178
179         return node;
180 }
181 EXPORT_SYMBOL(drm_mm_get_block_generic);
182
183 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
184                                                 unsigned long size,
185                                                 unsigned alignment,
186                                                 unsigned long start,
187                                                 unsigned long end,
188                                                 int atomic)
189 {
190         struct drm_mm_node *align_splitoff = NULL;
191         unsigned tmp = 0;
192         unsigned wasted = 0;
193
194         if (node->start < start)
195                 wasted += start - node->start;
196         if (alignment)
197                 tmp = ((node->start + wasted) % alignment);
198
199         if (tmp)
200                 wasted += alignment - tmp;
201         if (wasted) {
202                 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
203                 if (unlikely(align_splitoff == NULL))
204                         return NULL;
205         }
206
207         if (node->size == size) {
208                 list_del_init(&node->free_stack);
209                 node->free = 0;
210         } else {
211                 node = drm_mm_split_at_start(node, size, atomic);
212         }
213
214         if (align_splitoff)
215                 drm_mm_put_block(align_splitoff);
216
217         return node;
218 }
219 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
220
221 /*
222  * Put a block. Merge with the previous and / or next block if they are free.
223  * Otherwise add to the free stack.
224  */
225
226 void drm_mm_put_block(struct drm_mm_node *cur)
227 {
228
229         struct drm_mm *mm = cur->mm;
230         struct list_head *cur_head = &cur->node_list;
231         struct list_head *root_head = &mm->node_list;
232         struct drm_mm_node *prev_node = NULL;
233         struct drm_mm_node *next_node;
234
235         int merged = 0;
236
237         BUG_ON(cur->scanned_block || cur->scanned_prev_free
238                                   || cur->scanned_next_free);
239
240         if (cur_head->prev != root_head) {
241                 prev_node =
242                     list_entry(cur_head->prev, struct drm_mm_node, node_list);
243                 if (prev_node->free) {
244                         prev_node->size += cur->size;
245                         merged = 1;
246                 }
247         }
248         if (cur_head->next != root_head) {
249                 next_node =
250                     list_entry(cur_head->next, struct drm_mm_node, node_list);
251                 if (next_node->free) {
252                         if (merged) {
253                                 prev_node->size += next_node->size;
254                                 list_del(&next_node->node_list);
255                                 list_del(&next_node->free_stack);
256                                 spin_lock(&mm->unused_lock);
257                                 if (mm->num_unused < MM_UNUSED_TARGET) {
258                                         list_add(&next_node->free_stack,
259                                                  &mm->unused_nodes);
260                                         ++mm->num_unused;
261                                 } else
262                                         kfree(next_node);
263                                 spin_unlock(&mm->unused_lock);
264                         } else {
265                                 next_node->size += cur->size;
266                                 next_node->start = cur->start;
267                                 merged = 1;
268                         }
269                 }
270         }
271         if (!merged) {
272                 cur->free = 1;
273                 list_add(&cur->free_stack, &mm->free_stack);
274         } else {
275                 list_del(&cur->node_list);
276                 spin_lock(&mm->unused_lock);
277                 if (mm->num_unused < MM_UNUSED_TARGET) {
278                         list_add(&cur->free_stack, &mm->unused_nodes);
279                         ++mm->num_unused;
280                 } else
281                         kfree(cur);
282                 spin_unlock(&mm->unused_lock);
283         }
284 }
285
286 EXPORT_SYMBOL(drm_mm_put_block);
287
288 static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
289                               unsigned alignment)
290 {
291         unsigned wasted = 0;
292
293         if (entry->size < size)
294                 return 0;
295
296         if (alignment) {
297                 register unsigned tmp = entry->start % alignment;
298                 if (tmp)
299                         wasted = alignment - tmp;
300         }
301
302         if (entry->size >= size + wasted) {
303                 return 1;
304         }
305
306         return 0;
307 }
308
309 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
310                                        unsigned long size,
311                                        unsigned alignment, int best_match)
312 {
313         struct drm_mm_node *entry;
314         struct drm_mm_node *best;
315         unsigned long best_size;
316
317         BUG_ON(mm->scanned_blocks);
318
319         best = NULL;
320         best_size = ~0UL;
321
322         list_for_each_entry(entry, &mm->free_stack, free_stack) {
323                 if (!check_free_mm_node(entry, size, alignment))
324                         continue;
325
326                 if (!best_match)
327                         return entry;
328
329                 if (entry->size < best_size) {
330                         best = entry;
331                         best_size = entry->size;
332                 }
333         }
334
335         return best;
336 }
337 EXPORT_SYMBOL(drm_mm_search_free);
338
339 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
340                                                 unsigned long size,
341                                                 unsigned alignment,
342                                                 unsigned long start,
343                                                 unsigned long end,
344                                                 int best_match)
345 {
346         struct drm_mm_node *entry;
347         struct drm_mm_node *best;
348         unsigned long best_size;
349
350         BUG_ON(mm->scanned_blocks);
351
352         best = NULL;
353         best_size = ~0UL;
354
355         list_for_each_entry(entry, &mm->free_stack, free_stack) {
356                 if (entry->start > end || (entry->start+entry->size) < start)
357                         continue;
358
359                 if (!check_free_mm_node(entry, size, alignment))
360                         continue;
361
362                 if (!best_match)
363                         return entry;
364
365                 if (entry->size < best_size) {
366                         best = entry;
367                         best_size = entry->size;
368                 }
369         }
370
371         return best;
372 }
373 EXPORT_SYMBOL(drm_mm_search_free_in_range);
374
375 /**
376  * Initializa lru scanning.
377  *
378  * This simply sets up the scanning routines with the parameters for the desired
379  * hole.
380  *
381  * Warning: As long as the scan list is non-empty, no other operations than
382  * adding/removing nodes to/from the scan list are allowed.
383  */
384 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
385                       unsigned alignment)
386 {
387         mm->scan_alignment = alignment;
388         mm->scan_size = size;
389         mm->scanned_blocks = 0;
390         mm->scan_hit_start = 0;
391         mm->scan_hit_size = 0;
392 }
393 EXPORT_SYMBOL(drm_mm_init_scan);
394
395 /**
396  * Add a node to the scan list that might be freed to make space for the desired
397  * hole.
398  *
399  * Returns non-zero, if a hole has been found, zero otherwise.
400  */
401 int drm_mm_scan_add_block(struct drm_mm_node *node)
402 {
403         struct drm_mm *mm = node->mm;
404         struct list_head *prev_free, *next_free;
405         struct drm_mm_node *prev_node, *next_node;
406
407         mm->scanned_blocks++;
408
409         prev_free = next_free = NULL;
410
411         BUG_ON(node->free);
412         node->scanned_block = 1;
413         node->free = 1;
414
415         if (node->node_list.prev != &mm->node_list) {
416                 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
417                                        node_list);
418
419                 if (prev_node->free) {
420                         list_del(&prev_node->node_list);
421
422                         node->start = prev_node->start;
423                         node->size += prev_node->size;
424
425                         prev_node->scanned_prev_free = 1;
426
427                         prev_free = &prev_node->free_stack;
428                 }
429         }
430
431         if (node->node_list.next != &mm->node_list) {
432                 next_node = list_entry(node->node_list.next, struct drm_mm_node,
433                                        node_list);
434
435                 if (next_node->free) {
436                         list_del(&next_node->node_list);
437
438                         node->size += next_node->size;
439
440                         next_node->scanned_next_free = 1;
441
442                         next_free = &next_node->free_stack;
443                 }
444         }
445
446         /* The free_stack list is not used for allocated objects, so these two
447          * pointers can be abused (as long as no allocations in this memory
448          * manager happens). */
449         node->free_stack.prev = prev_free;
450         node->free_stack.next = next_free;
451
452         if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
453                 mm->scan_hit_start = node->start;
454                 mm->scan_hit_size = node->size;
455
456                 return 1;
457         }
458
459         return 0;
460 }
461 EXPORT_SYMBOL(drm_mm_scan_add_block);
462
463 /**
464  * Remove a node from the scan list.
465  *
466  * Nodes _must_ be removed in the exact same order from the scan list as they
467  * have been added, otherwise the internal state of the memory manager will be
468  * corrupted.
469  *
470  * When the scan list is empty, the selected memory nodes can be freed. An
471  * immediatly following drm_mm_search_free with best_match = 0 will then return
472  * the just freed block (because its at the top of the free_stack list).
473  *
474  * Returns one if this block should be evicted, zero otherwise. Will always
475  * return zero when no hole has been found.
476  */
477 int drm_mm_scan_remove_block(struct drm_mm_node *node)
478 {
479         struct drm_mm *mm = node->mm;
480         struct drm_mm_node *prev_node, *next_node;
481
482         mm->scanned_blocks--;
483
484         BUG_ON(!node->scanned_block);
485         node->scanned_block = 0;
486         node->free = 0;
487
488         prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
489                                free_stack);
490         next_node = list_entry(node->free_stack.next, struct drm_mm_node,
491                                free_stack);
492
493         if (prev_node) {
494                 BUG_ON(!prev_node->scanned_prev_free);
495                 prev_node->scanned_prev_free = 0;
496
497                 list_add_tail(&prev_node->node_list, &node->node_list);
498
499                 node->start = prev_node->start + prev_node->size;
500                 node->size -= prev_node->size;
501         }
502
503         if (next_node) {
504                 BUG_ON(!next_node->scanned_next_free);
505                 next_node->scanned_next_free = 0;
506
507                 list_add(&next_node->node_list, &node->node_list);
508
509                 node->size -= next_node->size;
510         }
511
512         INIT_LIST_HEAD(&node->free_stack);
513
514         /* Only need to check for containement because start&size for the
515          * complete resulting free block (not just the desired part) is
516          * stored. */
517         if (node->start >= mm->scan_hit_start &&
518             node->start + node->size
519                         <= mm->scan_hit_start + mm->scan_hit_size) {
520                 return 1;
521         }
522
523         return 0;
524 }
525 EXPORT_SYMBOL(drm_mm_scan_remove_block);
526
527 int drm_mm_clean(struct drm_mm * mm)
528 {
529         struct list_head *head = &mm->node_list;
530
531         return (head->next->next == head);
532 }
533 EXPORT_SYMBOL(drm_mm_clean);
534
535 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
536 {
537         INIT_LIST_HEAD(&mm->node_list);
538         INIT_LIST_HEAD(&mm->free_stack);
539         INIT_LIST_HEAD(&mm->unused_nodes);
540         mm->num_unused = 0;
541         mm->scanned_blocks = 0;
542         spin_lock_init(&mm->unused_lock);
543
544         return drm_mm_create_tail_node(mm, start, size, 0);
545 }
546 EXPORT_SYMBOL(drm_mm_init);
547
548 void drm_mm_takedown(struct drm_mm * mm)
549 {
550         struct list_head *bnode = mm->free_stack.next;
551         struct drm_mm_node *entry;
552         struct drm_mm_node *next;
553
554         entry = list_entry(bnode, struct drm_mm_node, free_stack);
555
556         if (entry->node_list.next != &mm->node_list ||
557             entry->free_stack.next != &mm->free_stack) {
558                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
559                 return;
560         }
561
562         list_del(&entry->free_stack);
563         list_del(&entry->node_list);
564         kfree(entry);
565
566         spin_lock(&mm->unused_lock);
567         list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
568                 list_del(&entry->free_stack);
569                 kfree(entry);
570                 --mm->num_unused;
571         }
572         spin_unlock(&mm->unused_lock);
573
574         BUG_ON(mm->num_unused != 0);
575 }
576 EXPORT_SYMBOL(drm_mm_takedown);
577
578 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
579 {
580         struct drm_mm_node *entry;
581         int total_used = 0, total_free = 0, total = 0;
582
583         list_for_each_entry(entry, &mm->node_list, node_list) {
584                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
585                         prefix, entry->start, entry->start + entry->size,
586                         entry->size, entry->free ? "free" : "used");
587                 total += entry->size;
588                 if (entry->free)
589                         total_free += entry->size;
590                 else
591                         total_used += entry->size;
592         }
593         printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
594                 total_used, total_free);
595 }
596 EXPORT_SYMBOL(drm_mm_debug_table);
597
598 #if defined(CONFIG_DEBUG_FS)
599 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
600 {
601         struct drm_mm_node *entry;
602         int total_used = 0, total_free = 0, total = 0;
603
604         list_for_each_entry(entry, &mm->node_list, node_list) {
605                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
606                 total += entry->size;
607                 if (entry->free)
608                         total_free += entry->size;
609                 else
610                         total_used += entry->size;
611         }
612         seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
613         return 0;
614 }
615 EXPORT_SYMBOL(drm_mm_dump_table);
616 #endif