2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003 Ralf Baechle
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
14 #include <asm/pgtable-32.h>
17 #include <asm/pgtable-64.h>
21 #include <asm/pgtable-bits.h>
24 struct vm_area_struct;
26 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
27 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
28 _page_cachable_default)
29 #define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
30 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
31 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
32 _page_cachable_default)
33 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
34 _PAGE_GLOBAL | _page_cachable_default)
35 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
37 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
38 _page_cachable_default)
39 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44 * execute, and consider it to be the same as read. Also, write
45 * permissions imply read permissions. This is the closest we can get
46 * by reasonable means..
50 * Dummy values to fill the table in mmap.c
51 * The real values will be generated at runtime
53 #define __P000 __pgprot(0)
54 #define __P001 __pgprot(0)
55 #define __P010 __pgprot(0)
56 #define __P011 __pgprot(0)
57 #define __P100 __pgprot(0)
58 #define __P101 __pgprot(0)
59 #define __P110 __pgprot(0)
60 #define __P111 __pgprot(0)
62 #define __S000 __pgprot(0)
63 #define __S001 __pgprot(0)
64 #define __S010 __pgprot(0)
65 #define __S011 __pgprot(0)
66 #define __S100 __pgprot(0)
67 #define __S101 __pgprot(0)
68 #define __S110 __pgprot(0)
69 #define __S111 __pgprot(0)
71 extern unsigned long _page_cachable_default;
74 * ZERO_PAGE is a global shared page that is always zero; used
75 * for zero-mapped memory areas etc..
78 extern unsigned long empty_zero_page;
79 extern unsigned long zero_page_mask;
81 #define ZERO_PAGE(vaddr) \
82 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
83 #define __HAVE_COLOR_ZERO_PAGE
85 extern void paging_init(void);
88 * Conversion functions: convert a page and protection to a page entry,
89 * and a page entry and page directory to the page they refer to.
91 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
93 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
95 #define pmd_page(pmd) __pmd_page(pmd)
96 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
98 #define pmd_page_vaddr(pmd) pmd_val(pmd)
103 write_c0_pwctl(read_c0_pwctl() & \
104 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
107 #define htw_start() \
110 write_c0_pwctl(read_c0_pwctl() | \
111 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
115 #define htw_reset() \
119 back_to_back_c0_hazard(); \
121 back_to_back_c0_hazard(); \
125 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
128 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
130 #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
131 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
133 static inline void set_pte(pte_t *ptep, pte_t pte)
135 ptep->pte_high = pte.pte_high;
137 ptep->pte_low = pte.pte_low;
139 if (pte.pte_low & _PAGE_GLOBAL) {
140 pte_t *buddy = ptep_buddy(ptep);
142 * Make sure the buddy is global too (if it's !none,
143 * it better already be global)
145 if (pte_none(*buddy)) {
146 buddy->pte_low |= _PAGE_GLOBAL;
147 buddy->pte_high |= _PAGE_GLOBAL;
152 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
154 pte_t null = __pte(0);
156 /* Preserve global status for the pair */
157 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
158 null.pte_low = null.pte_high = _PAGE_GLOBAL;
160 set_pte_at(mm, addr, ptep, null);
165 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
166 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
169 * Certain architectures need to do special things when pte's
170 * within a page table are directly modified. Thus, the following
171 * hook is made available.
173 static inline void set_pte(pte_t *ptep, pte_t pteval)
176 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
177 if (pte_val(pteval) & _PAGE_GLOBAL) {
178 pte_t *buddy = ptep_buddy(ptep);
180 * Make sure the buddy is global too (if it's !none,
181 * it better already be global)
183 if (pte_none(*buddy))
184 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
189 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
191 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
192 /* Preserve global status for the pair */
193 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
194 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
197 set_pte_at(mm, addr, ptep, __pte(0));
203 * (pmds are folded into puds so this doesn't get actually called,
204 * but the define is needed for a generic inline function.)
206 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
208 #ifndef __PAGETABLE_PMD_FOLDED
210 * (puds are folded into pgds so this doesn't get actually called,
211 * but the define is needed for a generic inline function.)
213 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
216 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
217 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
218 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
221 * We used to declare this array with size but gcc 3.3 and older are not able
222 * to find that this expression is a constant, so the size is dropped.
224 extern pgd_t swapper_pg_dir[];
227 * The following only work if pte_present() is true.
228 * Undefined behaviour if not..
230 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
231 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
232 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
233 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
234 static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
236 static inline pte_t pte_wrprotect(pte_t pte)
238 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
239 pte.pte_high &= ~_PAGE_SILENT_WRITE;
243 static inline pte_t pte_mkclean(pte_t pte)
245 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
246 pte.pte_high &= ~_PAGE_SILENT_WRITE;
250 static inline pte_t pte_mkold(pte_t pte)
252 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
253 pte.pte_high &= ~_PAGE_SILENT_READ;
257 static inline pte_t pte_mkwrite(pte_t pte)
259 pte.pte_low |= _PAGE_WRITE;
260 if (pte.pte_low & _PAGE_MODIFIED) {
261 pte.pte_low |= _PAGE_SILENT_WRITE;
262 pte.pte_high |= _PAGE_SILENT_WRITE;
267 static inline pte_t pte_mkdirty(pte_t pte)
269 pte.pte_low |= _PAGE_MODIFIED;
270 if (pte.pte_low & _PAGE_WRITE) {
271 pte.pte_low |= _PAGE_SILENT_WRITE;
272 pte.pte_high |= _PAGE_SILENT_WRITE;
277 static inline pte_t pte_mkyoung(pte_t pte)
279 pte.pte_low |= _PAGE_ACCESSED;
280 if (pte.pte_low & _PAGE_READ) {
281 pte.pte_low |= _PAGE_SILENT_READ;
282 pte.pte_high |= _PAGE_SILENT_READ;
287 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
288 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
289 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
290 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
292 static inline pte_t pte_wrprotect(pte_t pte)
294 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
298 static inline pte_t pte_mkclean(pte_t pte)
300 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
304 static inline pte_t pte_mkold(pte_t pte)
306 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
310 static inline pte_t pte_mkwrite(pte_t pte)
312 pte_val(pte) |= _PAGE_WRITE;
313 if (pte_val(pte) & _PAGE_MODIFIED)
314 pte_val(pte) |= _PAGE_SILENT_WRITE;
318 static inline pte_t pte_mkdirty(pte_t pte)
320 pte_val(pte) |= _PAGE_MODIFIED;
321 if (pte_val(pte) & _PAGE_WRITE)
322 pte_val(pte) |= _PAGE_SILENT_WRITE;
326 static inline pte_t pte_mkyoung(pte_t pte)
328 pte_val(pte) |= _PAGE_ACCESSED;
330 if (!(pte_val(pte) & _PAGE_NO_READ))
331 pte_val(pte) |= _PAGE_SILENT_READ;
333 if (pte_val(pte) & _PAGE_READ)
334 pte_val(pte) |= _PAGE_SILENT_READ;
340 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
342 static inline pte_t pte_mkhuge(pte_t pte)
344 pte_val(pte) |= _PAGE_HUGE;
347 #endif /* _PAGE_HUGE */
349 static inline int pte_special(pte_t pte) { return 0; }
350 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
353 * Macro to make mark a page protection value as "uncacheable". Note
354 * that "protection" is really a misnomer here as the protection value
355 * contains the memory attribute bits, dirty bits, and various other
358 #define pgprot_noncached pgprot_noncached
360 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
362 unsigned long prot = pgprot_val(_prot);
364 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
366 return __pgprot(prot);
370 * Conversion functions: convert a page and protection to a page entry,
371 * and a page entry and page directory to the page they refer to.
373 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
375 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
376 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
378 pte.pte_low &= _PAGE_CHG_MASK;
379 pte.pte_high &= ~0x3f;
380 pte.pte_low |= pgprot_val(newprot);
381 pte.pte_high |= pgprot_val(newprot) & 0x3f;
385 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
387 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
392 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
395 static inline void update_mmu_cache(struct vm_area_struct *vma,
396 unsigned long address, pte_t *ptep)
399 __update_tlb(vma, address, pte);
402 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
403 unsigned long address, pmd_t *pmdp)
405 pte_t pte = *(pte_t *)pmdp;
407 __update_tlb(vma, address, pte);
410 #define kern_addr_valid(addr) (1)
412 #ifdef CONFIG_64BIT_PHYS_ADDR
413 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
415 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
421 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
422 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
424 #define io_remap_pfn_range io_remap_pfn_range
427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
429 extern int has_transparent_hugepage(void);
431 static inline int pmd_trans_huge(pmd_t pmd)
433 return !!(pmd_val(pmd) & _PAGE_HUGE);
436 static inline pmd_t pmd_mkhuge(pmd_t pmd)
438 pmd_val(pmd) |= _PAGE_HUGE;
443 static inline int pmd_trans_splitting(pmd_t pmd)
445 return !!(pmd_val(pmd) & _PAGE_SPLITTING);
448 static inline pmd_t pmd_mksplitting(pmd_t pmd)
450 pmd_val(pmd) |= _PAGE_SPLITTING;
455 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
456 pmd_t *pmdp, pmd_t pmd);
458 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
459 /* Extern to avoid header file madness */
460 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
461 unsigned long address,
464 #define __HAVE_ARCH_PMD_WRITE
465 static inline int pmd_write(pmd_t pmd)
467 return !!(pmd_val(pmd) & _PAGE_WRITE);
470 static inline pmd_t pmd_wrprotect(pmd_t pmd)
472 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
476 static inline pmd_t pmd_mkwrite(pmd_t pmd)
478 pmd_val(pmd) |= _PAGE_WRITE;
479 if (pmd_val(pmd) & _PAGE_MODIFIED)
480 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
485 static inline int pmd_dirty(pmd_t pmd)
487 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
490 static inline pmd_t pmd_mkclean(pmd_t pmd)
492 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
496 static inline pmd_t pmd_mkdirty(pmd_t pmd)
498 pmd_val(pmd) |= _PAGE_MODIFIED;
499 if (pmd_val(pmd) & _PAGE_WRITE)
500 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
505 static inline int pmd_young(pmd_t pmd)
507 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
510 static inline pmd_t pmd_mkold(pmd_t pmd)
512 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
517 static inline pmd_t pmd_mkyoung(pmd_t pmd)
519 pmd_val(pmd) |= _PAGE_ACCESSED;
522 if (!(pmd_val(pmd) & _PAGE_NO_READ))
523 pmd_val(pmd) |= _PAGE_SILENT_READ;
525 if (pmd_val(pmd) & _PAGE_READ)
526 pmd_val(pmd) |= _PAGE_SILENT_READ;
532 /* Extern to avoid header file madness */
533 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
535 static inline unsigned long pmd_pfn(pmd_t pmd)
537 return pmd_val(pmd) >> _PFN_SHIFT;
540 static inline struct page *pmd_page(pmd_t pmd)
542 if (pmd_trans_huge(pmd))
543 return pfn_to_page(pmd_pfn(pmd));
545 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
548 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
550 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
554 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
556 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
562 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
563 * different prototype.
565 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
566 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
567 unsigned long address, pmd_t *pmdp)
576 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
578 #include <asm-generic/pgtable.h>
581 * uncached accelerated TLB map for video memory access
583 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
584 #define __HAVE_PHYS_MEM_ACCESS_PROT
587 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
588 unsigned long size, pgprot_t vma_prot);
589 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
590 unsigned long size, pgprot_t *vma_prot);
594 * We provide our own get_unmapped area to cope with the virtual aliasing
595 * constraints placed on us by the cache architecture.
597 #define HAVE_ARCH_UNMAPPED_AREA
598 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
601 * No page table caches to initialise
603 #define pgtable_cache_init() do { } while (0)
605 #endif /* _ASM_PGTABLE_H */