2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003 Ralf Baechle
12 #include <asm/pgtable-32.h>
15 #include <asm/pgtable-64.h>
19 #include <asm/pgtable-bits.h>
22 struct vm_area_struct;
24 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
25 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
26 _page_cachable_default)
27 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
28 _page_cachable_default)
29 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
30 _page_cachable_default)
31 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
32 _PAGE_GLOBAL | _page_cachable_default)
33 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
34 _page_cachable_default)
35 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
36 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
39 * MIPS can't do page protection for execute, and considers that the same like
40 * read. Also, write permissions imply read permissions. This is the closest
41 * we can get by reasonable means..
45 * Dummy values to fill the table in mmap.c
46 * The real values will be generated at runtime
48 #define __P000 __pgprot(0)
49 #define __P001 __pgprot(0)
50 #define __P010 __pgprot(0)
51 #define __P011 __pgprot(0)
52 #define __P100 __pgprot(0)
53 #define __P101 __pgprot(0)
54 #define __P110 __pgprot(0)
55 #define __P111 __pgprot(0)
57 #define __S000 __pgprot(0)
58 #define __S001 __pgprot(0)
59 #define __S010 __pgprot(0)
60 #define __S011 __pgprot(0)
61 #define __S100 __pgprot(0)
62 #define __S101 __pgprot(0)
63 #define __S110 __pgprot(0)
64 #define __S111 __pgprot(0)
66 extern unsigned long _page_cachable_default;
69 * ZERO_PAGE is a global shared page that is always zero; used
70 * for zero-mapped memory areas etc..
73 extern unsigned long empty_zero_page;
74 extern unsigned long zero_page_mask;
76 #define ZERO_PAGE(vaddr) \
77 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
79 #define is_zero_pfn is_zero_pfn
80 static inline int is_zero_pfn(unsigned long pfn)
82 extern unsigned long zero_pfn;
83 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
84 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
87 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
89 extern void paging_init(void);
92 * Conversion functions: convert a page and protection to a page entry,
93 * and a page entry and page directory to the page they refer to.
95 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
96 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
97 #define pmd_page_vaddr(pmd) pmd_val(pmd)
99 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
101 #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
102 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
104 static inline void set_pte(pte_t *ptep, pte_t pte)
106 ptep->pte_high = pte.pte_high;
108 ptep->pte_low = pte.pte_low;
109 //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
111 if (pte.pte_low & _PAGE_GLOBAL) {
112 pte_t *buddy = ptep_buddy(ptep);
114 * Make sure the buddy is global too (if it's !none,
115 * it better already be global)
117 if (pte_none(*buddy)) {
118 buddy->pte_low |= _PAGE_GLOBAL;
119 buddy->pte_high |= _PAGE_GLOBAL;
123 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
125 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
127 pte_t null = __pte(0);
129 /* Preserve global status for the pair */
130 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
131 null.pte_low = null.pte_high = _PAGE_GLOBAL;
133 set_pte_at(mm, addr, ptep, null);
137 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
138 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
141 * Certain architectures need to do special things when pte's
142 * within a page table are directly modified. Thus, the following
143 * hook is made available.
145 static inline void set_pte(pte_t *ptep, pte_t pteval)
148 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
149 if (pte_val(pteval) & _PAGE_GLOBAL) {
150 pte_t *buddy = ptep_buddy(ptep);
152 * Make sure the buddy is global too (if it's !none,
153 * it better already be global)
155 if (pte_none(*buddy))
156 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
160 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
162 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
164 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
165 /* Preserve global status for the pair */
166 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
167 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
170 set_pte_at(mm, addr, ptep, __pte(0));
175 * (pmds are folded into puds so this doesn't get actually called,
176 * but the define is needed for a generic inline function.)
178 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
182 * (puds are folded into pgds so this doesn't get actually called,
183 * but the define is needed for a generic inline function.)
185 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
188 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
189 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
190 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
193 * We used to declare this array with size but gcc 3.3 and older are not able
194 * to find that this expression is a constant, so the size is dropped.
196 extern pgd_t swapper_pg_dir[];
199 * The following only work if pte_present() is true.
200 * Undefined behaviour if not..
202 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
203 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
204 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
205 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
206 static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
208 static inline pte_t pte_wrprotect(pte_t pte)
210 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
211 pte.pte_high &= ~_PAGE_SILENT_WRITE;
215 static inline pte_t pte_mkclean(pte_t pte)
217 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
218 pte.pte_high &= ~_PAGE_SILENT_WRITE;
222 static inline pte_t pte_mkold(pte_t pte)
224 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
225 pte.pte_high &= ~_PAGE_SILENT_READ;
229 static inline pte_t pte_mkwrite(pte_t pte)
231 pte.pte_low |= _PAGE_WRITE;
232 if (pte.pte_low & _PAGE_MODIFIED) {
233 pte.pte_low |= _PAGE_SILENT_WRITE;
234 pte.pte_high |= _PAGE_SILENT_WRITE;
239 static inline pte_t pte_mkdirty(pte_t pte)
241 pte.pte_low |= _PAGE_MODIFIED;
242 if (pte.pte_low & _PAGE_WRITE) {
243 pte.pte_low |= _PAGE_SILENT_WRITE;
244 pte.pte_high |= _PAGE_SILENT_WRITE;
249 static inline pte_t pte_mkyoung(pte_t pte)
251 pte.pte_low |= _PAGE_ACCESSED;
252 if (pte.pte_low & _PAGE_READ) {
253 pte.pte_low |= _PAGE_SILENT_READ;
254 pte.pte_high |= _PAGE_SILENT_READ;
259 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
260 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
261 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
262 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
264 static inline pte_t pte_wrprotect(pte_t pte)
266 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
270 static inline pte_t pte_mkclean(pte_t pte)
272 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
276 static inline pte_t pte_mkold(pte_t pte)
278 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
282 static inline pte_t pte_mkwrite(pte_t pte)
284 pte_val(pte) |= _PAGE_WRITE;
285 if (pte_val(pte) & _PAGE_MODIFIED)
286 pte_val(pte) |= _PAGE_SILENT_WRITE;
290 static inline pte_t pte_mkdirty(pte_t pte)
292 pte_val(pte) |= _PAGE_MODIFIED;
293 if (pte_val(pte) & _PAGE_WRITE)
294 pte_val(pte) |= _PAGE_SILENT_WRITE;
298 static inline pte_t pte_mkyoung(pte_t pte)
300 pte_val(pte) |= _PAGE_ACCESSED;
301 if (pte_val(pte) & _PAGE_READ)
302 pte_val(pte) |= _PAGE_SILENT_READ;
307 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
309 static inline pte_t pte_mkhuge(pte_t pte)
311 pte_val(pte) |= _PAGE_HUGE;
314 #endif /* _PAGE_HUGE */
316 static inline int pte_special(pte_t pte) { return 0; }
317 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
320 * Macro to make mark a page protection value as "uncacheable". Note
321 * that "protection" is really a misnomer here as the protection value
322 * contains the memory attribute bits, dirty bits, and various other
325 #define pgprot_noncached pgprot_noncached
327 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
329 unsigned long prot = pgprot_val(_prot);
331 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
333 return __pgprot(prot);
337 * Conversion functions: convert a page and protection to a page entry,
338 * and a page entry and page directory to the page they refer to.
340 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
342 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
343 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
345 pte.pte_low &= _PAGE_CHG_MASK;
346 pte.pte_high &= ~0x3f;
347 pte.pte_low |= pgprot_val(newprot);
348 pte.pte_high |= pgprot_val(newprot) & 0x3f;
352 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
354 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
359 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
361 extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
364 static inline void update_mmu_cache(struct vm_area_struct *vma,
365 unsigned long address, pte_t pte)
367 __update_tlb(vma, address, pte);
368 __update_cache(vma, address, pte);
371 #define kern_addr_valid(addr) (1)
373 #ifdef CONFIG_64BIT_PHYS_ADDR
374 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
376 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
382 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
383 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
386 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
387 remap_pfn_range(vma, vaddr, pfn, size, prot)
390 #include <asm-generic/pgtable.h>
393 * uncached accelerated TLB map for video memory access
395 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
396 #define __HAVE_PHYS_MEM_ACCESS_PROT
399 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
400 unsigned long size, pgprot_t vma_prot);
401 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
402 unsigned long size, pgprot_t *vma_prot);
406 * We provide our own get_unmapped area to cope with the virtual aliasing
407 * constraints placed on us by the cache architecture.
409 #define HAVE_ARCH_UNMAPPED_AREA
412 * No page table caches to initialise
414 #define pgtable_cache_init() do { } while (0)
416 #endif /* _ASM_PGTABLE_H */