Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[pandora-kernel.git] / include / asm-parisc / cacheflush.h
1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
3
4 #include <linux/mm.h>
5 #include <asm/cache.h>  /* for flush_user_dcache_range_asm() proto */
6
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8  * Unfortunately, that doesn't apply to PA-RISC. */
9
10 /* Cache flush operations */
11
12 #ifdef CONFIG_SMP
13 #define flush_cache_mm(mm) flush_cache_all()
14 #else
15 #define flush_cache_mm(mm) flush_cache_all_local()
16 #endif
17
18 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
19
20 #define flush_kernel_dcache_range(start,size) \
21         flush_kernel_dcache_range_asm((start), (start)+(size));
22
23 extern void flush_cache_all_local(void);
24
25 static inline void cacheflush_h_tmp_function(void *dummy)
26 {
27         flush_cache_all_local();
28 }
29
30 static inline void flush_cache_all(void)
31 {
32         on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
33 }
34
35 #define flush_cache_vmap(start, end)            flush_cache_all()
36 #define flush_cache_vunmap(start, end)          flush_cache_all()
37
38 extern int parisc_cache_flush_threshold;
39 void parisc_setup_cache_timing(void);
40
41 static inline void
42 flush_user_dcache_range(unsigned long start, unsigned long end)
43 {
44         if ((end - start) < parisc_cache_flush_threshold)
45                 flush_user_dcache_range_asm(start,end);
46         else
47                 flush_data_cache();
48 }
49
50 static inline void
51 flush_user_icache_range(unsigned long start, unsigned long end)
52 {
53         if ((end - start) < parisc_cache_flush_threshold)
54                 flush_user_icache_range_asm(start,end);
55         else
56                 flush_instruction_cache();
57 }
58
59 extern void flush_dcache_page(struct page *page);
60
61 #define flush_dcache_mmap_lock(mapping) \
62         write_lock_irq(&(mapping)->tree_lock)
63 #define flush_dcache_mmap_unlock(mapping) \
64         write_unlock_irq(&(mapping)->tree_lock)
65
66 #define flush_icache_page(vma,page)     do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)
67
68 #define flush_icache_range(s,e)         do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
69
70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
71 do { \
72         flush_cache_page(vma, vaddr, page_to_pfn(page)); \
73         memcpy(dst, src, len); \
74         flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
75 } while (0)
76
77 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
78 do { \
79         flush_cache_page(vma, vaddr, page_to_pfn(page)); \
80         memcpy(dst, src, len); \
81 } while (0)
82
83 static inline void flush_cache_range(struct vm_area_struct *vma,
84                 unsigned long start, unsigned long end)
85 {
86         int sr3;
87
88         if (!vma->vm_mm->context) {
89                 BUG();
90                 return;
91         }
92
93         sr3 = mfsp(3);
94         if (vma->vm_mm->context == sr3) {
95                 flush_user_dcache_range(start,end);
96                 flush_user_icache_range(start,end);
97         } else {
98                 flush_cache_all();
99         }
100 }
101
102 /* Simple function to work out if we have an existing address translation
103  * for a user space vma. */
104 static inline int translation_exists(struct vm_area_struct *vma,
105                                 unsigned long addr, unsigned long pfn)
106 {
107         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
108         pmd_t *pmd;
109         pte_t pte;
110
111         if(pgd_none(*pgd))
112                 return 0;
113
114         pmd = pmd_offset(pgd, addr);
115         if(pmd_none(*pmd) || pmd_bad(*pmd))
116                 return 0;
117
118         /* We cannot take the pte lock here: flush_cache_page is usually
119          * called with pte lock already held.  Whereas flush_dcache_page
120          * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
121          * the vma itself is secure, but the pte might come or go racily.
122          */
123         pte = *pte_offset_map(pmd, addr);
124         /* But pte_unmap() does nothing on this architecture */
125
126         /* Filter out coincidental file entries and swap entries */
127         if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
128                 return 0;
129
130         return pte_pfn(pte) == pfn;
131 }
132
133 /* Private function to flush a page from the cache of a non-current
134  * process.  cr25 contains the Page Directory of the current user
135  * process; we're going to hijack both it and the user space %sr3 to
136  * temporarily make the non-current process current.  We have to do
137  * this because cache flushing may cause a non-access tlb miss which
138  * the handlers have to fill in from the pgd of the non-current
139  * process. */
140 static inline void
141 flush_user_cache_page_non_current(struct vm_area_struct *vma,
142                                   unsigned long vmaddr)
143 {
144         /* save the current process space and pgd */
145         unsigned long space = mfsp(3), pgd = mfctl(25);
146
147         /* we don't mind taking interrups since they may not
148          * do anything with user space, but we can't
149          * be preempted here */
150         preempt_disable();
151
152         /* make us current */
153         mtctl(__pa(vma->vm_mm->pgd), 25);
154         mtsp(vma->vm_mm->context, 3);
155
156         flush_user_dcache_page(vmaddr);
157         if(vma->vm_flags & VM_EXEC)
158                 flush_user_icache_page(vmaddr);
159
160         /* put the old current process back */
161         mtsp(space, 3);
162         mtctl(pgd, 25);
163         preempt_enable();
164 }
165
166 static inline void
167 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
168 {
169         if (likely(vma->vm_mm->context == mfsp(3))) {
170                 flush_user_dcache_page(vmaddr);
171                 if (vma->vm_flags & VM_EXEC)
172                         flush_user_icache_page(vmaddr);
173         } else {
174                 flush_user_cache_page_non_current(vma, vmaddr);
175         }
176 }
177
178 static inline void
179 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
180 {
181         BUG_ON(!vma->vm_mm->context);
182
183         if (likely(translation_exists(vma, vmaddr, pfn)))
184                 __flush_cache_page(vma, vmaddr);
185
186 }
187
188 static inline void
189 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
190 {
191         if (PageAnon(page))
192                 flush_user_dcache_page(vmaddr);
193 }
194 #define ARCH_HAS_FLUSH_ANON_PAGE
195
196 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
197 void flush_kernel_dcache_page_addr(void *addr);
198 static inline void flush_kernel_dcache_page(struct page *page)
199 {
200         flush_kernel_dcache_page_addr(page_address(page));
201 }
202
203 #ifdef CONFIG_DEBUG_RODATA
204 void mark_rodata_ro(void);
205 #endif
206
207 #ifdef CONFIG_PA8X00
208 /* Only pa8800, pa8900 needs this */
209 #define ARCH_HAS_KMAP
210
211 void kunmap_parisc(void *addr);
212
213 static inline void *kmap(struct page *page)
214 {
215         might_sleep();
216         return page_address(page);
217 }
218
219 #define kunmap(page)                    kunmap_parisc(page_address(page))
220
221 #define kmap_atomic(page, idx)          page_address(page)
222
223 #define kunmap_atomic(addr, idx)        kunmap_parisc(addr)
224
225 #define kmap_atomic_pfn(pfn, idx)       page_address(pfn_to_page(pfn))
226 #define kmap_atomic_to_page(ptr)        virt_to_page(ptr)
227 #endif
228
229 #endif /* _PARISC_CACHEFLUSH_H */
230