drm/ttm: Use set_pages_array_wc instead of set_memory_wc.
[pandora-kernel.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27
28 /* simple list based uncached page pool
29  * - Pool collects resently freed pages for reuse
30  * - Use page->lru to keep a free list
31  * - doesn't track currently in use pages
32  */
33 #include <linux/list.h>
34 #include <linux/spinlock.h>
35 #include <linux/highmem.h>
36 #include <linux/mm_types.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39
40 #include <asm/atomic.h>
41 #include <asm/agp.h>
42
43 #include "ttm/ttm_bo_driver.h"
44 #include "ttm/ttm_page_alloc.h"
45
46
47 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
48 #define SMALL_ALLOCATION                16
49 #define FREE_ALL_PAGES                  (~0U)
50 /* times are in msecs */
51 #define PAGE_FREE_INTERVAL              1000
52
53 /**
54  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
55  *
56  * @lock: Protects the shared pool from concurrnet access. Must be used with
57  * irqsave/irqrestore variants because pool allocator maybe called from
58  * delayed work.
59  * @fill_lock: Prevent concurrent calls to fill.
60  * @list: Pool of free uc/wc pages for fast reuse.
61  * @gfp_flags: Flags to pass for alloc_page.
62  * @npages: Number of pages in pool.
63  */
64 struct ttm_page_pool {
65         spinlock_t              lock;
66         bool                    fill_lock;
67         struct list_head        list;
68         int                     gfp_flags;
69         unsigned                npages;
70         char                    *name;
71         unsigned long           nfrees;
72         unsigned long           nrefills;
73 };
74
75 struct ttm_pool_opts {
76         unsigned        alloc_size;
77         unsigned        max_size;
78         unsigned        small;
79 };
80
81 #define NUM_POOLS 4
82
83 /**
84  * struct ttm_pool_manager - Holds memory pools for fst allocation
85  *
86  * Manager is read only object for pool code so it doesn't need locking.
87  *
88  * @free_interval: minimum number of jiffies between freeing pages from pool.
89  * @page_alloc_inited: reference counting for pool allocation.
90  * @work: Work that is used to shrink the pool. Work is only run when there is
91  * some pages to free.
92  * @small_allocation: Limit in number of pages what is small allocation.
93  *
94  * @pools: All pool objects in use.
95  **/
96 struct ttm_pool_manager {
97         struct shrinker         mm_shrink;
98         atomic_t                page_alloc_inited;
99         struct ttm_pool_opts    options;
100
101         union {
102                 struct ttm_page_pool    pools[NUM_POOLS];
103                 struct {
104                         struct ttm_page_pool    wc_pool;
105                         struct ttm_page_pool    uc_pool;
106                         struct ttm_page_pool    wc_pool_dma32;
107                         struct ttm_page_pool    uc_pool_dma32;
108                 } ;
109         };
110 };
111
112 static struct ttm_pool_manager _manager = {
113         .page_alloc_inited      = ATOMIC_INIT(0)
114 };
115
116 #ifndef CONFIG_X86
117 static int set_pages_array_wb(struct page **pages, int addrinarray)
118 {
119 #ifdef TTM_HAS_AGP
120         int i;
121
122         for (i = 0; i < addrinarray; i++)
123                 unmap_page_from_agp(pages[i]);
124 #endif
125         return 0;
126 }
127
128 static int set_pages_array_wc(struct page **pages, int addrinarray)
129 {
130 #ifdef TTM_HAS_AGP
131         int i;
132
133         for (i = 0; i < addrinarray; i++)
134                 map_page_into_agp(pages[i]);
135 #endif
136         return 0;
137 }
138
139 static int set_pages_array_uc(struct page **pages, int addrinarray)
140 {
141 #ifdef TTM_HAS_AGP
142         int i;
143
144         for (i = 0; i < addrinarray; i++)
145                 map_page_into_agp(pages[i]);
146 #endif
147         return 0;
148 }
149 #endif
150
151 /**
152  * Select the right pool or requested caching state and ttm flags. */
153 static struct ttm_page_pool *ttm_get_pool(int flags,
154                 enum ttm_caching_state cstate)
155 {
156         int pool_index;
157
158         if (cstate == tt_cached)
159                 return NULL;
160
161         if (cstate == tt_wc)
162                 pool_index = 0x0;
163         else
164                 pool_index = 0x1;
165
166         if (flags & TTM_PAGE_FLAG_DMA32)
167                 pool_index |= 0x2;
168
169         return &_manager.pools[pool_index];
170 }
171
172 /* set memory back to wb and free the pages. */
173 static void ttm_pages_put(struct page *pages[], unsigned npages)
174 {
175         unsigned i;
176         if (set_pages_array_wb(pages, npages))
177                 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
178                                 npages);
179         for (i = 0; i < npages; ++i)
180                 __free_page(pages[i]);
181 }
182
183 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
184                 unsigned freed_pages)
185 {
186         pool->npages -= freed_pages;
187         pool->nfrees += freed_pages;
188 }
189
190 /**
191  * Free pages from pool.
192  *
193  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
194  * number of pages in one go.
195  *
196  * @pool: to free the pages from
197  * @free_all: If set to true will free all pages in pool
198  **/
199 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
200 {
201         unsigned long irq_flags;
202         struct page *p;
203         struct page **pages_to_free;
204         unsigned freed_pages = 0,
205                  npages_to_free = nr_free;
206
207         if (NUM_PAGES_TO_ALLOC < nr_free)
208                 npages_to_free = NUM_PAGES_TO_ALLOC;
209
210         pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
211                         GFP_KERNEL);
212         if (!pages_to_free) {
213                 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
214                 return 0;
215         }
216
217 restart:
218         spin_lock_irqsave(&pool->lock, irq_flags);
219
220         list_for_each_entry_reverse(p, &pool->list, lru) {
221                 if (freed_pages >= npages_to_free)
222                         break;
223
224                 pages_to_free[freed_pages++] = p;
225                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
226                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
227                         /* remove range of pages from the pool */
228                         __list_del(p->lru.prev, &pool->list);
229
230                         ttm_pool_update_free_locked(pool, freed_pages);
231                         /**
232                          * Because changing page caching is costly
233                          * we unlock the pool to prevent stalling.
234                          */
235                         spin_unlock_irqrestore(&pool->lock, irq_flags);
236
237                         ttm_pages_put(pages_to_free, freed_pages);
238                         if (likely(nr_free != FREE_ALL_PAGES))
239                                 nr_free -= freed_pages;
240
241                         if (NUM_PAGES_TO_ALLOC >= nr_free)
242                                 npages_to_free = nr_free;
243                         else
244                                 npages_to_free = NUM_PAGES_TO_ALLOC;
245
246                         freed_pages = 0;
247
248                         /* free all so restart the processing */
249                         if (nr_free)
250                                 goto restart;
251
252                         /* Not allowed to fall tough or break because
253                          * following context is inside spinlock while we are
254                          * outside here.
255                          */
256                         goto out;
257
258                 }
259         }
260
261         /* remove range of pages from the pool */
262         if (freed_pages) {
263                 __list_del(&p->lru, &pool->list);
264
265                 ttm_pool_update_free_locked(pool, freed_pages);
266                 nr_free -= freed_pages;
267         }
268
269         spin_unlock_irqrestore(&pool->lock, irq_flags);
270
271         if (freed_pages)
272                 ttm_pages_put(pages_to_free, freed_pages);
273 out:
274         kfree(pages_to_free);
275         return nr_free;
276 }
277
278 /* Get good estimation how many pages are free in pools */
279 static int ttm_pool_get_num_unused_pages(void)
280 {
281         unsigned i;
282         int total = 0;
283         for (i = 0; i < NUM_POOLS; ++i)
284                 total += _manager.pools[i].npages;
285
286         return total;
287 }
288
289 /**
290  * Calback for mm to request pool to reduce number of page held.
291  */
292 static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
293 {
294         static atomic_t start_pool = ATOMIC_INIT(0);
295         unsigned i;
296         unsigned pool_offset = atomic_add_return(1, &start_pool);
297         struct ttm_page_pool *pool;
298
299         pool_offset = pool_offset % NUM_POOLS;
300         /* select start pool in round robin fashion */
301         for (i = 0; i < NUM_POOLS; ++i) {
302                 unsigned nr_free = shrink_pages;
303                 if (shrink_pages == 0)
304                         break;
305                 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
306                 shrink_pages = ttm_page_pool_free(pool, nr_free);
307         }
308         /* return estimated number of unused pages in pool */
309         return ttm_pool_get_num_unused_pages();
310 }
311
312 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
313 {
314         manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
315         manager->mm_shrink.seeks = 1;
316         register_shrinker(&manager->mm_shrink);
317 }
318
319 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
320 {
321         unregister_shrinker(&manager->mm_shrink);
322 }
323
324 static int ttm_set_pages_caching(struct page **pages,
325                 enum ttm_caching_state cstate, unsigned cpages)
326 {
327         int r = 0;
328         /* Set page caching */
329         switch (cstate) {
330         case tt_uncached:
331                 r = set_pages_array_uc(pages, cpages);
332                 if (r)
333                         printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
334                                         cpages);
335                 break;
336         case tt_wc:
337                 r = set_pages_array_wc(pages, cpages);
338                 if (r)
339                         printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
340                                         cpages);
341                 break;
342         default:
343                 break;
344         }
345         return r;
346 }
347
348 /**
349  * Free pages the pages that failed to change the caching state. If there is
350  * any pages that have changed their caching state already put them to the
351  * pool.
352  */
353 static void ttm_handle_caching_state_failure(struct list_head *pages,
354                 int ttm_flags, enum ttm_caching_state cstate,
355                 struct page **failed_pages, unsigned cpages)
356 {
357         unsigned i;
358         /* Failed pages has to be reed */
359         for (i = 0; i < cpages; ++i) {
360                 list_del(&failed_pages[i]->lru);
361                 __free_page(failed_pages[i]);
362         }
363 }
364
365 /**
366  * Allocate new pages with correct caching.
367  *
368  * This function is reentrant if caller updates count depending on number of
369  * pages returned in pages array.
370  */
371 static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
372                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
373 {
374         struct page **caching_array;
375         struct page *p;
376         int r = 0;
377         unsigned i, cpages;
378         unsigned max_cpages = min(count,
379                         (unsigned)(PAGE_SIZE/sizeof(struct page *)));
380
381         /* allocate array for page caching change */
382         caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
383
384         if (!caching_array) {
385                 printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
386                 return -ENOMEM;
387         }
388
389         for (i = 0, cpages = 0; i < count; ++i) {
390                 p = alloc_page(gfp_flags);
391
392                 if (!p) {
393                         printk(KERN_ERR "[ttm] unable to get page %u\n", i);
394
395                         /* store already allocated pages in the pool after
396                          * setting the caching state */
397                         if (cpages) {
398                                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
399                                 if (r)
400                                         ttm_handle_caching_state_failure(pages,
401                                                 ttm_flags, cstate,
402                                                 caching_array, cpages);
403                         }
404                         r = -ENOMEM;
405                         goto out;
406                 }
407
408 #ifdef CONFIG_HIGHMEM
409                 /* gfp flags of highmem page should never be dma32 so we
410                  * we should be fine in such case
411                  */
412                 if (!PageHighMem(p))
413 #endif
414                 {
415                         caching_array[cpages++] = p;
416                         if (cpages == max_cpages) {
417
418                                 r = ttm_set_pages_caching(caching_array,
419                                                 cstate, cpages);
420                                 if (r) {
421                                         ttm_handle_caching_state_failure(pages,
422                                                 ttm_flags, cstate,
423                                                 caching_array, cpages);
424                                         goto out;
425                                 }
426                                 cpages = 0;
427                         }
428                 }
429
430                 list_add(&p->lru, pages);
431         }
432
433         if (cpages) {
434                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
435                 if (r)
436                         ttm_handle_caching_state_failure(pages,
437                                         ttm_flags, cstate,
438                                         caching_array, cpages);
439         }
440 out:
441         kfree(caching_array);
442
443         return r;
444 }
445
446 /**
447  * Fill the given pool if there isn't enough pages and requested number of
448  * pages is small.
449  */
450 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
451                 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
452                 unsigned long *irq_flags)
453 {
454         struct page *p;
455         int r;
456         unsigned cpages = 0;
457         /**
458          * Only allow one pool fill operation at a time.
459          * If pool doesn't have enough pages for the allocation new pages are
460          * allocated from outside of pool.
461          */
462         if (pool->fill_lock)
463                 return;
464
465         pool->fill_lock = true;
466
467         /* If allocation request is small and there is not enough
468          * pages in pool we fill the pool first */
469         if (count < _manager.options.small
470                 && count > pool->npages) {
471                 struct list_head new_pages;
472                 unsigned alloc_size = _manager.options.alloc_size;
473
474                 /**
475                  * Can't change page caching if in irqsave context. We have to
476                  * drop the pool->lock.
477                  */
478                 spin_unlock_irqrestore(&pool->lock, *irq_flags);
479
480                 INIT_LIST_HEAD(&new_pages);
481                 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
482                                 cstate, alloc_size);
483                 spin_lock_irqsave(&pool->lock, *irq_flags);
484
485                 if (!r) {
486                         list_splice(&new_pages, &pool->list);
487                         ++pool->nrefills;
488                         pool->npages += alloc_size;
489                 } else {
490                         printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
491                         /* If we have any pages left put them to the pool. */
492                         list_for_each_entry(p, &pool->list, lru) {
493                                 ++cpages;
494                         }
495                         list_splice(&new_pages, &pool->list);
496                         pool->npages += cpages;
497                 }
498
499         }
500         pool->fill_lock = false;
501 }
502
503 /**
504  * Cut count nubmer of pages from the pool and put them to return list
505  *
506  * @return count of pages still to allocate to fill the request.
507  */
508 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
509                 struct list_head *pages, int ttm_flags,
510                 enum ttm_caching_state cstate, unsigned count)
511 {
512         unsigned long irq_flags;
513         struct list_head *p;
514         unsigned i;
515
516         spin_lock_irqsave(&pool->lock, irq_flags);
517         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
518
519         if (count >= pool->npages) {
520                 /* take all pages from the pool */
521                 list_splice_init(&pool->list, pages);
522                 count -= pool->npages;
523                 pool->npages = 0;
524                 goto out;
525         }
526         /* find the last pages to include for requested number of pages. Split
527          * pool to begin and halves to reduce search space. */
528         if (count <= pool->npages/2) {
529                 i = 0;
530                 list_for_each(p, &pool->list) {
531                         if (++i == count)
532                                 break;
533                 }
534         } else {
535                 i = pool->npages + 1;
536                 list_for_each_prev(p, &pool->list) {
537                         if (--i == count)
538                                 break;
539                 }
540         }
541         /* Cut count number of pages from pool */
542         list_cut_position(pages, &pool->list, p);
543         pool->npages -= count;
544         count = 0;
545 out:
546         spin_unlock_irqrestore(&pool->lock, irq_flags);
547         return count;
548 }
549
550 /*
551  * On success pages list will hold count number of correctly
552  * cached pages.
553  */
554 int ttm_get_pages(struct list_head *pages, int flags,
555                 enum ttm_caching_state cstate, unsigned count)
556 {
557         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
558         struct page *p = NULL;
559         int gfp_flags = 0;
560         int r;
561
562         /* set zero flag for page allocation if required */
563         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
564                 gfp_flags |= __GFP_ZERO;
565
566         /* No pool for cached pages */
567         if (pool == NULL) {
568                 if (flags & TTM_PAGE_FLAG_DMA32)
569                         gfp_flags |= GFP_DMA32;
570                 else
571                         gfp_flags |= __GFP_HIGHMEM;
572
573                 for (r = 0; r < count; ++r) {
574                         p = alloc_page(gfp_flags);
575                         if (!p) {
576
577                                 printk(KERN_ERR "[ttm] unable to allocate page.");
578                                 return -ENOMEM;
579                         }
580
581                         list_add(&p->lru, pages);
582                 }
583                 return 0;
584         }
585
586
587         /* combine zero flag to pool flags */
588         gfp_flags |= pool->gfp_flags;
589
590         /* First we take pages from the pool */
591         count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
592
593         /* clear the pages coming from the pool if requested */
594         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
595                 list_for_each_entry(p, pages, lru) {
596                         clear_page(page_address(p));
597                 }
598         }
599
600         /* If pool didn't have enough pages allocate new one. */
601         if (count > 0) {
602                 /* ttm_alloc_new_pages doesn't reference pool so we can run
603                  * multiple requests in parallel.
604                  **/
605                 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
606                 if (r) {
607                         /* If there is any pages in the list put them back to
608                          * the pool. */
609                         printk(KERN_ERR "[ttm] Failed to allocate extra pages "
610                                         "for large request.");
611                         ttm_put_pages(pages, 0, flags, cstate);
612                         return r;
613                 }
614         }
615
616
617         return 0;
618 }
619
620 /* Put all pages in pages list to correct pool to wait for reuse */
621 void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
622                 enum ttm_caching_state cstate)
623 {
624         unsigned long irq_flags;
625         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
626         struct page *p, *tmp;
627
628         if (pool == NULL) {
629                 /* No pool for this memory type so free the pages */
630
631                 list_for_each_entry_safe(p, tmp, pages, lru) {
632                         __free_page(p);
633                 }
634                 /* Make the pages list empty */
635                 INIT_LIST_HEAD(pages);
636                 return;
637         }
638         if (page_count == 0) {
639                 list_for_each_entry_safe(p, tmp, pages, lru) {
640                         ++page_count;
641                 }
642         }
643
644         spin_lock_irqsave(&pool->lock, irq_flags);
645         list_splice_init(pages, &pool->list);
646         pool->npages += page_count;
647         /* Check that we don't go over the pool limit */
648         page_count = 0;
649         if (pool->npages > _manager.options.max_size) {
650                 page_count = pool->npages - _manager.options.max_size;
651                 /* free at least NUM_PAGES_TO_ALLOC number of pages
652                  * to reduce calls to set_memory_wb */
653                 if (page_count < NUM_PAGES_TO_ALLOC)
654                         page_count = NUM_PAGES_TO_ALLOC;
655         }
656         spin_unlock_irqrestore(&pool->lock, irq_flags);
657         if (page_count)
658                 ttm_page_pool_free(pool, page_count);
659 }
660
661 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
662                 char *name)
663 {
664         spin_lock_init(&pool->lock);
665         pool->fill_lock = false;
666         INIT_LIST_HEAD(&pool->list);
667         pool->npages = pool->nfrees = 0;
668         pool->gfp_flags = flags;
669         pool->name = name;
670 }
671
672 int ttm_page_alloc_init(unsigned max_pages)
673 {
674         if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
675                 return 0;
676
677         printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
678
679         ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
680
681         ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
682
683         ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
684                         "wc dma");
685
686         ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
687                         "uc dma");
688
689         _manager.options.max_size = max_pages;
690         _manager.options.small = SMALL_ALLOCATION;
691         _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
692
693         ttm_pool_mm_shrink_init(&_manager);
694
695         return 0;
696 }
697
698 void ttm_page_alloc_fini()
699 {
700         int i;
701
702         if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
703                 return;
704
705         printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
706         ttm_pool_mm_shrink_fini(&_manager);
707
708         for (i = 0; i < NUM_POOLS; ++i)
709                 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
710 }
711
712 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
713 {
714         struct ttm_page_pool *p;
715         unsigned i;
716         char *h[] = {"pool", "refills", "pages freed", "size"};
717         if (atomic_read(&_manager.page_alloc_inited) == 0) {
718                 seq_printf(m, "No pool allocator running.\n");
719                 return 0;
720         }
721         seq_printf(m, "%6s %12s %13s %8s\n",
722                         h[0], h[1], h[2], h[3]);
723         for (i = 0; i < NUM_POOLS; ++i) {
724                 p = &_manager.pools[i];
725
726                 seq_printf(m, "%6s %12ld %13ld %8d\n",
727                                 p->name, p->nrefills,
728                                 p->nfrees, p->npages);
729         }
730         return 0;
731 }
732 EXPORT_SYMBOL(ttm_page_alloc_debugfs);