1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/vmalloc.h>
32 #include <linux/sched.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include "ttm/ttm_module.h"
38 #include "ttm/ttm_bo_driver.h"
39 #include "ttm/ttm_placement.h"
41 static int ttm_tt_swapin(struct ttm_tt *ttm);
43 #if defined(CONFIG_X86)
44 static void ttm_tt_clflush_page(struct page *page)
46 uint8_t *page_virtual;
49 if (unlikely(page == NULL))
52 page_virtual = kmap_atomic(page, KM_USER0);
54 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
55 clflush(page_virtual + i);
57 kunmap_atomic(page_virtual, KM_USER0);
60 static void ttm_tt_cache_flush_clflush(struct page *pages[],
61 unsigned long num_pages)
66 for (i = 0; i < num_pages; ++i)
67 ttm_tt_clflush_page(*pages++);
70 #elif !defined(__powerpc__)
71 static void ttm_tt_ipi_handler(void *null)
77 void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
80 #if defined(CONFIG_X86)
81 if (cpu_has_clflush) {
82 ttm_tt_cache_flush_clflush(pages, num_pages);
85 #elif defined(__powerpc__)
88 for (i = 0; i < num_pages; ++i) {
90 unsigned long start = (unsigned long)page_address(pages[i]);
91 flush_dcache_range(start, start + PAGE_SIZE);
95 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
96 printk(KERN_ERR TTM_PFX
97 "Timed out waiting for drm cache flush.\n");
102 * Allocates storage for pointers to the pages that back the ttm.
104 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
106 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
108 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
111 if (size <= PAGE_SIZE)
112 ttm->pages = kzalloc(size, GFP_KERNEL);
115 ttm->pages = vmalloc_user(size);
117 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
121 static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
123 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
125 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
132 static struct page *ttm_tt_alloc_page(unsigned page_flags)
134 gfp_t gfp_flags = GFP_HIGHUSER;
136 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
137 gfp_flags |= __GFP_ZERO;
139 if (page_flags & TTM_PAGE_FLAG_DMA32)
140 gfp_flags |= __GFP_DMA32;
142 return alloc_page(gfp_flags);
145 static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
151 struct ttm_backend *be = ttm->be;
153 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
154 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
155 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
160 for (i = 0; i < ttm->num_pages; ++i) {
161 page = ttm->pages[i];
165 if (page == ttm->dummy_read_page) {
170 if (write && dirty && !PageReserved(page))
171 set_page_dirty_lock(page);
173 ttm->pages[i] = NULL;
174 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
177 ttm->state = tt_unpopulated;
178 ttm->first_himem_page = ttm->num_pages;
179 ttm->last_lomem_page = -1;
182 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
185 struct ttm_bo_device *bdev = ttm->bdev;
186 struct ttm_mem_global *mem_glob = bdev->mem_glob;
189 while (NULL == (p = ttm->pages[index])) {
190 p = ttm_tt_alloc_page(ttm->page_flags);
195 if (PageHighMem(p)) {
197 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
199 if (unlikely(ret != 0))
201 ttm->pages[--ttm->first_himem_page] = p;
204 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
205 false, false, false);
206 if (unlikely(ret != 0))
208 ttm->pages[++ttm->last_lomem_page] = p;
217 struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
221 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
222 ret = ttm_tt_swapin(ttm);
223 if (unlikely(ret != 0))
226 return __ttm_tt_get_page(ttm, index);
229 int ttm_tt_populate(struct ttm_tt *ttm)
233 struct ttm_backend *be;
236 if (ttm->state != tt_unpopulated)
239 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
240 ret = ttm_tt_swapin(ttm);
241 if (unlikely(ret != 0))
247 for (i = 0; i < ttm->num_pages; ++i) {
248 page = __ttm_tt_get_page(ttm, i);
253 be->func->populate(be, ttm->num_pages, ttm->pages,
254 ttm->dummy_read_page);
255 ttm->state = tt_unbound;
260 static inline int ttm_tt_set_page_caching(struct page *p,
261 enum ttm_caching_state c_state)
268 return set_pages_wb(p, 1);
270 return set_memory_wc((unsigned long) page_address(p), 1);
272 return set_pages_uc(p, 1);
275 #else /* CONFIG_X86 */
276 static inline int ttm_tt_set_page_caching(struct page *p,
277 enum ttm_caching_state c_state)
281 #endif /* CONFIG_X86 */
284 * Change caching policy for the linear kernel map
285 * for range of pages in a ttm.
288 static int ttm_tt_set_caching(struct ttm_tt *ttm,
289 enum ttm_caching_state c_state)
292 struct page *cur_page;
295 if (ttm->caching_state == c_state)
298 if (c_state != tt_cached) {
299 ret = ttm_tt_populate(ttm);
300 if (unlikely(ret != 0))
304 if (ttm->caching_state == tt_cached)
305 ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
307 for (i = 0; i < ttm->num_pages; ++i) {
308 cur_page = ttm->pages[i];
309 if (likely(cur_page != NULL)) {
310 ret = ttm_tt_set_page_caching(cur_page, c_state);
311 if (unlikely(ret != 0))
316 ttm->caching_state = c_state;
321 for (j = 0; j < i; ++j) {
322 cur_page = ttm->pages[j];
323 if (likely(cur_page != NULL)) {
324 (void)ttm_tt_set_page_caching(cur_page,
332 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
334 enum ttm_caching_state state;
336 if (placement & TTM_PL_FLAG_WC)
338 else if (placement & TTM_PL_FLAG_UNCACHED)
343 return ttm_tt_set_caching(ttm, state);
346 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
349 struct page *cur_page;
350 struct ttm_backend *be = ttm->be;
354 (void)ttm_tt_set_caching(ttm, tt_cached);
355 for (i = 0; i < ttm->num_pages; ++i) {
356 cur_page = ttm->pages[i];
357 ttm->pages[i] = NULL;
359 if (page_count(cur_page) != 1)
360 printk(KERN_ERR TTM_PFX
361 "Erroneous page count. "
363 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
364 PageHighMem(cur_page));
365 __free_page(cur_page);
368 ttm->state = tt_unpopulated;
369 ttm->first_himem_page = ttm->num_pages;
370 ttm->last_lomem_page = -1;
373 void ttm_tt_destroy(struct ttm_tt *ttm)
375 struct ttm_backend *be;
377 if (unlikely(ttm == NULL))
381 if (likely(be != NULL)) {
382 be->func->destroy(be);
386 if (likely(ttm->pages != NULL)) {
387 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
388 ttm_tt_free_user_pages(ttm);
390 ttm_tt_free_alloced_pages(ttm);
392 ttm_tt_free_page_directory(ttm);
395 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
397 fput(ttm->swap_storage);
402 int ttm_tt_set_user(struct ttm_tt *ttm,
403 struct task_struct *tsk,
404 unsigned long start, unsigned long num_pages)
406 struct mm_struct *mm = tsk->mm;
408 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
409 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
411 BUG_ON(num_pages != ttm->num_pages);
412 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
415 * Account user pages as lowmem pages for now.
418 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
419 false, false, false);
420 if (unlikely(ret != 0))
423 down_read(&mm->mmap_sem);
424 ret = get_user_pages(tsk, mm, start, num_pages,
425 write, 0, ttm->pages, NULL);
426 up_read(&mm->mmap_sem);
428 if (ret != num_pages && write) {
429 ttm_tt_free_user_pages(ttm);
430 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
436 ttm->state = tt_unbound;
441 struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
442 uint32_t page_flags, struct page *dummy_read_page)
444 struct ttm_bo_driver *bo_driver = bdev->driver;
450 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
456 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
457 ttm->first_himem_page = ttm->num_pages;
458 ttm->last_lomem_page = -1;
459 ttm->caching_state = tt_cached;
460 ttm->page_flags = page_flags;
462 ttm->dummy_read_page = dummy_read_page;
464 ttm_tt_alloc_page_directory(ttm);
467 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
470 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
473 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
476 ttm->state = tt_unpopulated;
480 void ttm_tt_unbind(struct ttm_tt *ttm)
483 struct ttm_backend *be = ttm->be;
485 if (ttm->state == tt_bound) {
486 ret = be->func->unbind(be);
488 ttm->state = tt_unbound;
492 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
495 struct ttm_backend *be;
500 if (ttm->state == tt_bound)
505 ret = ttm_tt_populate(ttm);
509 ret = be->func->bind(be, bo_mem);
511 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
515 ttm->state = tt_bound;
517 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
518 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
521 EXPORT_SYMBOL(ttm_tt_bind);
523 static int ttm_tt_swapin(struct ttm_tt *ttm)
525 struct address_space *swap_space;
526 struct file *swap_storage;
527 struct page *from_page;
528 struct page *to_page;
534 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
535 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
537 if (unlikely(ret != 0))
540 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
544 swap_storage = ttm->swap_storage;
545 BUG_ON(swap_storage == NULL);
547 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
549 for (i = 0; i < ttm->num_pages; ++i) {
550 from_page = read_mapping_page(swap_space, i, NULL);
551 if (IS_ERR(from_page))
553 to_page = __ttm_tt_get_page(ttm, i);
554 if (unlikely(to_page == NULL))
558 from_virtual = kmap_atomic(from_page, KM_USER0);
559 to_virtual = kmap_atomic(to_page, KM_USER1);
560 memcpy(to_virtual, from_virtual, PAGE_SIZE);
561 kunmap_atomic(to_virtual, KM_USER1);
562 kunmap_atomic(from_virtual, KM_USER0);
564 page_cache_release(from_page);
567 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
569 ttm->swap_storage = NULL;
570 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
574 ttm_tt_free_alloced_pages(ttm);
578 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
580 struct address_space *swap_space;
581 struct file *swap_storage;
582 struct page *from_page;
583 struct page *to_page;
588 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
589 BUG_ON(ttm->caching_state != tt_cached);
592 * For user buffers, just unpin the pages, as there should be
596 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
597 ttm_tt_free_user_pages(ttm);
598 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
599 ttm->swap_storage = NULL;
603 if (!persistant_swap_storage) {
604 swap_storage = shmem_file_setup("ttm swap",
605 ttm->num_pages << PAGE_SHIFT,
607 if (unlikely(IS_ERR(swap_storage))) {
608 printk(KERN_ERR "Failed allocating swap storage.\n");
612 swap_storage = persistant_swap_storage;
614 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
616 for (i = 0; i < ttm->num_pages; ++i) {
617 from_page = ttm->pages[i];
618 if (unlikely(from_page == NULL))
620 to_page = read_mapping_page(swap_space, i, NULL);
621 if (unlikely(to_page == NULL))
625 from_virtual = kmap_atomic(from_page, KM_USER0);
626 to_virtual = kmap_atomic(to_page, KM_USER1);
627 memcpy(to_virtual, from_virtual, PAGE_SIZE);
628 kunmap_atomic(to_virtual, KM_USER1);
629 kunmap_atomic(from_virtual, KM_USER0);
631 set_page_dirty(to_page);
632 mark_page_accessed(to_page);
633 page_cache_release(to_page);
636 ttm_tt_free_alloced_pages(ttm);
637 ttm->swap_storage = swap_storage;
638 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
639 if (persistant_swap_storage)
640 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
644 if (!persistant_swap_storage)