Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
[pandora-kernel.git] / arch / arm / mm / copypage-v6.c
1 /*
2  *  linux/arch/arm/mm/copypage-v6.c
3  *
4  *  Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/init.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/highmem.h>
14
15 #include <asm/pgtable.h>
16 #include <asm/shmparam.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20
21 #include "mm.h"
22
23 #if SHMLBA > 16384
24 #error FIX ME
25 #endif
26
27 #define from_address    (0xffff8000)
28 #define to_address      (0xffffc000)
29
30 static DEFINE_SPINLOCK(v6_lock);
31
32 /*
33  * Copy the user page.  No aliasing to deal with so we can just
34  * attack the kernel's existing mapping of these pages.
35  */
36 static void v6_copy_user_highpage_nonaliasing(struct page *to,
37         struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
38 {
39         void *kto, *kfrom;
40
41         kfrom = kmap_atomic(from, KM_USER0);
42         kto = kmap_atomic(to, KM_USER1);
43         copy_page(kto, kfrom);
44         __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45         kunmap_atomic(kto, KM_USER1);
46         kunmap_atomic(kfrom, KM_USER0);
47 }
48
49 /*
50  * Clear the user page.  No aliasing to deal with so we can just
51  * attack the kernel's existing mapping of this page.
52  */
53 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
54 {
55         void *kaddr = kmap_atomic(page, KM_USER0);
56         clear_page(kaddr);
57         kunmap_atomic(kaddr, KM_USER0);
58 }
59
60 /*
61  * Discard data in the kernel mapping for the new page.
62  * FIXME: needs this MCRR to be supported.
63  */
64 static void discard_old_kernel_data(void *kto)
65 {
66         __asm__("mcrr   p15, 0, %1, %0, c6      @ 0xec401f06"
67            :
68            : "r" (kto),
69              "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
70            : "cc");
71 }
72
73 /*
74  * Copy the page, taking account of the cache colour.
75  */
76 static void v6_copy_user_highpage_aliasing(struct page *to,
77         struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
78 {
79         unsigned int offset = CACHE_COLOUR(vaddr);
80         unsigned long kfrom, kto;
81
82         if (!test_and_set_bit(PG_dcache_clean, &from->flags))
83                 __flush_dcache_page(page_mapping(from), from);
84
85         /* FIXME: not highmem safe */
86         discard_old_kernel_data(page_address(to));
87
88         /*
89          * Now copy the page using the same cache colour as the
90          * pages ultimate destination.
91          */
92         spin_lock(&v6_lock);
93
94         set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
95         set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
96
97         kfrom = from_address + (offset << PAGE_SHIFT);
98         kto   = to_address + (offset << PAGE_SHIFT);
99
100         flush_tlb_kernel_page(kfrom);
101         flush_tlb_kernel_page(kto);
102
103         copy_page((void *)kto, (void *)kfrom);
104
105         spin_unlock(&v6_lock);
106 }
107
108 /*
109  * Clear the user page.  We need to deal with the aliasing issues,
110  * so remap the kernel page into the same cache colour as the user
111  * page.
112  */
113 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
114 {
115         unsigned int offset = CACHE_COLOUR(vaddr);
116         unsigned long to = to_address + (offset << PAGE_SHIFT);
117
118         /* FIXME: not highmem safe */
119         discard_old_kernel_data(page_address(page));
120
121         /*
122          * Now clear the page using the same cache colour as
123          * the pages ultimate destination.
124          */
125         spin_lock(&v6_lock);
126
127         set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
128         flush_tlb_kernel_page(to);
129         clear_page((void *)to);
130
131         spin_unlock(&v6_lock);
132 }
133
134 struct cpu_user_fns v6_user_fns __initdata = {
135         .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
136         .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
137 };
138
139 static int __init v6_userpage_init(void)
140 {
141         if (cache_is_vipt_aliasing()) {
142                 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
143                 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
144         }
145
146         return 0;
147 }
148
149 core_initcall(v6_userpage_init);