2 * include/asm-s390/pgtable.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * Derived from "include/asm-i386/pgtable.h"
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
17 * The Linux memory management assumes a three-level page table setup. For
18 * s390 31 bit we "fold" the mid level into the top-level page table, so
19 * that we physically have the same two-level page table as the s390 mmu
20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21 * the hardware provides (region first and region second tables are not
24 * The "pgd_xxx()" functions are trivial for a folded two-level
25 * setup: the pgd is never bad, and a pmd always exists (as it's folded
28 * This file contains the functions and defines necessary to modify and use
29 * the S390 page table tree.
32 #include <linux/mm_types.h>
34 #include <asm/processor.h>
36 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
37 extern void paging_init(void);
38 extern void vmem_map_init(void);
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
44 #define update_mmu_cache(vma, address, pte) do { } while (0)
47 * ZERO_PAGE is a global shared page that is always zero: used
48 * for zero-mapped memory areas etc..
50 extern char empty_zero_page[PAGE_SIZE];
51 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
52 #endif /* !__ASSEMBLY__ */
55 * PMD_SHIFT determines the size of the area a second-level page
57 * PGDIR_SHIFT determines what a third-level page table entry can map
62 # define PGDIR_SHIFT 22
66 # define PGDIR_SHIFT 31
67 #endif /* __s390x__ */
69 #define PMD_SIZE (1UL << PMD_SHIFT)
70 #define PMD_MASK (~(PMD_SIZE-1))
71 #define PUD_SIZE (1UL << PUD_SHIFT)
72 #define PUD_MASK (~(PUD_SIZE-1))
73 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
74 #define PGDIR_MASK (~(PGDIR_SIZE-1))
77 * entries per page directory level: the S390 is two-level, so
78 * we don't really have any PMD directory physically.
79 * for S390 segment-table entries are combined to one PGD
80 * that leads to 1024 pte per pgd
83 # define PTRS_PER_PTE 1024
84 # define PTRS_PER_PMD 1
85 # define PTRS_PER_PUD 1
86 # define PTRS_PER_PGD 512
88 # define PTRS_PER_PTE 512
89 # define PTRS_PER_PMD 1024
90 # define PTRS_PER_PUD 1
91 # define PTRS_PER_PGD 2048
92 #endif /* __s390x__ */
94 #define FIRST_USER_ADDRESS 0
96 #define pte_ERROR(e) \
97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
98 #define pmd_ERROR(e) \
99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100 #define pud_ERROR(e) \
101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
102 #define pgd_ERROR(e) \
103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
107 * The vmalloc area will always be on the topmost area of the kernel
108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
109 * which should be enough for any sane case.
110 * By putting vmalloc at the top, we maximise the gap between physical
111 * memory and vmalloc to catch misplaced memory accesses. As a side
112 * effect, this also makes sure that 64 bit module code cannot be used
113 * as system call address.
116 #define VMALLOC_START 0x78000000UL
117 #define VMALLOC_END 0x7e000000UL
118 #define VMEM_MAP_END 0x80000000UL
119 #else /* __s390x__ */
120 #define VMALLOC_START 0x3e000000000UL
121 #define VMALLOC_END 0x3e040000000UL
122 #define VMEM_MAP_END 0x40000000000UL
123 #endif /* __s390x__ */
126 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
127 * mapping. This needs to be calculated at compile time since the size of the
128 * VMEM_MAP is static but the size of struct page can change.
130 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
131 #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
132 #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
133 #define VMEM_MAP ((struct page *) VMALLOC_END)
136 * A 31 bit pagetable entry of S390 has following format:
139 * 00000000001111111111222222222233
140 * 01234567890123456789012345678901
142 * I Page-Invalid Bit: Page is not available for address-translation
143 * P Page-Protection Bit: Store access not possible for page
145 * A 31 bit segmenttable entry of S390 has following format:
146 * | P-table origin | |PTL
148 * 00000000001111111111222222222233
149 * 01234567890123456789012345678901
151 * I Segment-Invalid Bit: Segment is not available for address-translation
152 * C Common-Segment Bit: Segment is not private (PoP 3-30)
153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
155 * The 31 bit segmenttable origin of S390 has following format:
157 * |S-table origin | | STL |
159 * 00000000001111111111222222222233
160 * 01234567890123456789012345678901
162 * X Space-Switch event:
163 * G Segment-Invalid Bit: *
164 * P Private-Space Bit: Segment is not private (PoP 3-30)
165 * S Storage-Alteration:
166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
168 * A 64 bit pagetable entry of S390 has following format:
170 * 0000000000111111111122222222223333333333444444444455555555556666
171 * 0123456789012345678901234567890123456789012345678901234567890123
173 * I Page-Invalid Bit: Page is not available for address-translation
174 * P Page-Protection Bit: Store access not possible for page
176 * A 64 bit segmenttable entry of S390 has following format:
177 * | P-table origin | TT
178 * 0000000000111111111122222222223333333333444444444455555555556666
179 * 0123456789012345678901234567890123456789012345678901234567890123
181 * I Segment-Invalid Bit: Segment is not available for address-translation
182 * C Common-Segment Bit: Segment is not private (PoP 3-30)
183 * P Page-Protection Bit: Store access not possible for page
186 * A 64 bit region table entry of S390 has following format:
187 * | S-table origin | TF TTTL
188 * 0000000000111111111122222222223333333333444444444455555555556666
189 * 0123456789012345678901234567890123456789012345678901234567890123
191 * I Segment-Invalid Bit: Segment is not available for address-translation
196 * The 64 bit regiontable origin of S390 has following format:
197 * | region table origon | DTTL
198 * 0000000000111111111122222222223333333333444444444455555555556666
199 * 0123456789012345678901234567890123456789012345678901234567890123
201 * X Space-Switch event:
202 * G Segment-Invalid Bit:
203 * P Private-Space Bit:
204 * S Storage-Alteration:
208 * A storage key has the following format:
212 * F : fetch protection bit
217 /* Hardware bits in the page table entry */
218 #define _PAGE_RO 0x200 /* HW read-only bit */
219 #define _PAGE_INVALID 0x400 /* HW invalid bit */
221 /* Software bits in the page table entry */
222 #define _PAGE_SWT 0x001 /* SW pte type bit t */
223 #define _PAGE_SWX 0x002 /* SW pte type bit x */
225 /* Six different types of pages. */
226 #define _PAGE_TYPE_EMPTY 0x400
227 #define _PAGE_TYPE_NONE 0x401
228 #define _PAGE_TYPE_SWAP 0x403
229 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
230 #define _PAGE_TYPE_RO 0x200
231 #define _PAGE_TYPE_RW 0x000
232 #define _PAGE_TYPE_EX_RO 0x202
233 #define _PAGE_TYPE_EX_RW 0x002
236 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
237 * pte_none and pte_file to find out the pte type WITHOUT holding the page
238 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
239 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
240 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
241 * This change is done while holding the lock, but the intermediate step
242 * of a previously valid pte with the hw invalid bit set can be observed by
243 * handle_pte_fault. That makes it necessary that all valid pte types with
244 * the hw invalid bit set must be distinguishable from the four pte types
245 * empty, none, swap and file.
248 * _PAGE_TYPE_EMPTY 1000 -> 1000
249 * _PAGE_TYPE_NONE 1001 -> 1001
250 * _PAGE_TYPE_SWAP 1011 -> 1011
251 * _PAGE_TYPE_FILE 11?1 -> 11?1
252 * _PAGE_TYPE_RO 0100 -> 1100
253 * _PAGE_TYPE_RW 0000 -> 1000
254 * _PAGE_TYPE_EX_RO 0110 -> 1110
255 * _PAGE_TYPE_EX_RW 0010 -> 1010
257 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
258 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
259 * pte_file is true for bits combinations 1101, 1111
260 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
265 /* Bits in the segment table address-space-control-element */
266 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
267 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
268 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
269 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
270 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
272 /* Bits in the segment table entry */
273 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
274 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
275 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
276 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
278 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
279 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
281 #else /* __s390x__ */
283 /* Bits in the segment/region table address-space-control-element */
284 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
285 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
286 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
287 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
288 #define _ASCE_REAL_SPACE 0x20 /* real space control */
289 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
290 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
291 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
292 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
293 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
294 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
296 /* Bits in the region table entry */
297 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
298 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
299 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
300 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
301 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
302 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
303 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
305 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
306 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
307 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
308 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
309 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
310 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
312 /* Bits in the segment table entry */
313 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
314 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
315 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
317 #define _SEGMENT_ENTRY (0)
318 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
320 #endif /* __s390x__ */
323 * A user page table pointer has the space-switch-event bit, the
324 * private-space-control bit and the storage-alteration-event-control
325 * bit set. A kernel page table pointer doesn't need them.
327 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
330 /* Bits int the storage key */
331 #define _PAGE_CHANGED 0x02 /* HW changed bit */
332 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */
335 * Page protection definitions.
337 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
338 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
339 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
340 #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
341 #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
343 #define PAGE_KERNEL PAGE_RW
344 #define PAGE_COPY PAGE_RO
347 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
348 * Write permission always implies read permission. In theory with a
349 * primary/secondary page table execute only can be implemented but
350 * it would cost an additional bit in the pte to distinguish all the
351 * different pte types. To avoid that execute permission currently
352 * implies read permission as well.
355 #define __P000 PAGE_NONE
356 #define __P001 PAGE_RO
357 #define __P010 PAGE_RO
358 #define __P011 PAGE_RO
359 #define __P100 PAGE_EX_RO
360 #define __P101 PAGE_EX_RO
361 #define __P110 PAGE_EX_RO
362 #define __P111 PAGE_EX_RO
364 #define __S000 PAGE_NONE
365 #define __S001 PAGE_RO
366 #define __S010 PAGE_RW
367 #define __S011 PAGE_RW
368 #define __S100 PAGE_EX_RO
369 #define __S101 PAGE_EX_RO
370 #define __S110 PAGE_EX_RW
371 #define __S111 PAGE_EX_RW
374 # define PxD_SHADOW_SHIFT 1
375 #else /* __s390x__ */
376 # define PxD_SHADOW_SHIFT 2
377 #endif /* __s390x__ */
379 static inline struct page *get_shadow_page(struct page *page)
381 if (s390_noexec && page->index)
382 return virt_to_page((void *)(addr_t) page->index);
386 static inline void *get_shadow_pte(void *table)
388 unsigned long addr, offset;
391 addr = (unsigned long) table;
392 offset = addr & (PAGE_SIZE - 1);
393 page = virt_to_page((void *)(addr ^ offset));
394 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
397 static inline void *get_shadow_table(void *table)
399 unsigned long addr, offset;
402 addr = (unsigned long) table;
403 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
404 page = virt_to_page((void *)(addr ^ offset));
405 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
409 * Certain architectures need to do special things when PTEs
410 * within a page table are directly modified. Thus, the following
411 * hook is made available.
413 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
414 pte_t *pteptr, pte_t pteval)
416 pte_t *shadow_pte = get_shadow_pte(pteptr);
420 if (!(pte_val(pteval) & _PAGE_INVALID) &&
421 (pte_val(pteval) & _PAGE_SWX))
422 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
424 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
429 * pgd/pmd/pte query functions
433 static inline int pgd_present(pgd_t pgd) { return 1; }
434 static inline int pgd_none(pgd_t pgd) { return 0; }
435 static inline int pgd_bad(pgd_t pgd) { return 0; }
437 static inline int pud_present(pud_t pud) { return 1; }
438 static inline int pud_none(pud_t pud) { return 0; }
439 static inline int pud_bad(pud_t pud) { return 0; }
441 #else /* __s390x__ */
443 static inline int pgd_present(pgd_t pgd) { return 1; }
444 static inline int pgd_none(pgd_t pgd) { return 0; }
445 static inline int pgd_bad(pgd_t pgd) { return 0; }
447 static inline int pud_present(pud_t pud)
449 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
452 static inline int pud_none(pud_t pud)
454 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
457 static inline int pud_bad(pud_t pud)
459 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV;
460 return (pud_val(pud) & mask) != _REGION3_ENTRY;
463 #endif /* __s390x__ */
465 static inline int pmd_present(pmd_t pmd)
467 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
470 static inline int pmd_none(pmd_t pmd)
472 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
475 static inline int pmd_bad(pmd_t pmd)
477 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
478 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
481 static inline int pte_none(pte_t pte)
483 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
486 static inline int pte_present(pte_t pte)
488 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
489 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
490 (!(pte_val(pte) & _PAGE_INVALID) &&
491 !(pte_val(pte) & _PAGE_SWT));
494 static inline int pte_file(pte_t pte)
496 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
497 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
500 #define __HAVE_ARCH_PTE_SAME
501 #define pte_same(a,b) (pte_val(a) == pte_val(b))
504 * query functions pte_write/pte_dirty/pte_young only work if
505 * pte_present() is true. Undefined behaviour if not..
507 static inline int pte_write(pte_t pte)
509 return (pte_val(pte) & _PAGE_RO) == 0;
512 static inline int pte_dirty(pte_t pte)
514 /* A pte is neither clean nor dirty on s/390. The dirty bit
515 * is in the storage key. See page_test_and_clear_dirty for
521 static inline int pte_young(pte_t pte)
523 /* A pte is neither young nor old on s/390. The young bit
524 * is in the storage key. See page_test_and_clear_young for
531 * pgd/pmd/pte modification functions
536 #define pgd_clear(pgd) do { } while (0)
537 #define pud_clear(pud) do { } while (0)
539 static inline void pmd_clear_kernel(pmd_t * pmdp)
541 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
542 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
543 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
544 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
547 #else /* __s390x__ */
549 #define pgd_clear(pgd) do { } while (0)
551 static inline void pud_clear_kernel(pud_t *pud)
553 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
556 static inline void pud_clear(pud_t * pud)
558 pud_t *shadow = get_shadow_table(pud);
560 pud_clear_kernel(pud);
562 pud_clear_kernel(shadow);
565 static inline void pmd_clear_kernel(pmd_t * pmdp)
567 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
568 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
571 #endif /* __s390x__ */
573 static inline void pmd_clear(pmd_t * pmdp)
575 pmd_t *shadow_pmd = get_shadow_table(pmdp);
577 pmd_clear_kernel(pmdp);
579 pmd_clear_kernel(shadow_pmd);
582 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
584 pte_t *shadow_pte = get_shadow_pte(ptep);
586 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
588 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
592 * The following pte modification functions only work if
593 * pte_present() is true. Undefined behaviour if not..
595 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
597 pte_val(pte) &= PAGE_MASK;
598 pte_val(pte) |= pgprot_val(newprot);
602 static inline pte_t pte_wrprotect(pte_t pte)
604 /* Do not clobber _PAGE_TYPE_NONE pages! */
605 if (!(pte_val(pte) & _PAGE_INVALID))
606 pte_val(pte) |= _PAGE_RO;
610 static inline pte_t pte_mkwrite(pte_t pte)
612 pte_val(pte) &= ~_PAGE_RO;
616 static inline pte_t pte_mkclean(pte_t pte)
618 /* The only user of pte_mkclean is the fork() code.
619 We must *not* clear the *physical* page dirty bit
620 just because fork() wants to clear the dirty bit in
621 *one* of the page's mappings. So we just do nothing. */
625 static inline pte_t pte_mkdirty(pte_t pte)
627 /* We do not explicitly set the dirty bit because the
628 * sske instruction is slow. It is faster to let the
629 * next instruction set the dirty bit.
634 static inline pte_t pte_mkold(pte_t pte)
636 /* S/390 doesn't keep its dirty/referenced bit in the pte.
637 * There is no point in clearing the real referenced bit.
642 static inline pte_t pte_mkyoung(pte_t pte)
644 /* S/390 doesn't keep its dirty/referenced bit in the pte.
645 * There is no point in setting the real referenced bit.
650 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
651 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
652 unsigned long addr, pte_t *ptep)
657 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
658 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
659 unsigned long address, pte_t *ptep)
661 /* No need to flush TLB; bits are in storage key */
665 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
667 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
669 /* S390 has 1mb segments, we are emulating 4MB segments */
670 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
672 /* ipte in zarch mode can do the math */
677 : "=m" (*ptep) : "m" (*ptep),
678 "a" (pto), "a" (address));
680 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
683 static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
685 __ptep_ipte(address, ptep);
686 ptep = get_shadow_pte(ptep);
688 __ptep_ipte(address, ptep);
692 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
693 * both clear the TLB for the unmapped pte. The reason is that
694 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
695 * to modify an active pte. The sequence is
696 * 1) ptep_get_and_clear
699 * On s390 the tlb needs to get flushed with the modification of the pte
700 * if the pte is active. The only way how this can be implemented is to
701 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
704 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
705 #define ptep_get_and_clear(__mm, __address, __ptep) \
707 pte_t __pte = *(__ptep); \
708 if (atomic_read(&(__mm)->mm_users) > 1 || \
709 (__mm) != current->active_mm) \
710 ptep_invalidate(__address, __ptep); \
712 pte_clear((__mm), (__address), (__ptep)); \
716 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
717 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
718 unsigned long address, pte_t *ptep)
721 ptep_invalidate(address, ptep);
726 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
727 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
728 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
729 * cannot be accessed while the batched unmap is running. In this case
730 * full==1 and a simple pte_clear is enough. See tlb.h.
732 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
733 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
735 pte_t *ptep, int full)
740 pte_clear(mm, addr, ptep);
742 ptep_invalidate(addr, ptep);
746 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
747 #define ptep_set_wrprotect(__mm, __addr, __ptep) \
749 pte_t __pte = *(__ptep); \
750 if (pte_write(__pte)) { \
751 if (atomic_read(&(__mm)->mm_users) > 1 || \
752 (__mm) != current->active_mm) \
753 ptep_invalidate(__addr, __ptep); \
754 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
758 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
759 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
761 int __changed = !pte_same(*(__ptep), __entry); \
763 ptep_invalidate(__addr, __ptep); \
764 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
770 * Test and clear dirty bit in storage key.
771 * We can't clear the changed bit atomically. This is a potential
772 * race against modification of the referenced bit. This function
773 * should therefore only be called if it is not mapped in any
776 #define __HAVE_ARCH_PAGE_TEST_DIRTY
777 static inline int page_test_dirty(struct page *page)
779 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
782 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY
783 static inline void page_clear_dirty(struct page *page)
785 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
789 * Test and clear referenced bit in storage key.
791 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
792 static inline int page_test_and_clear_young(struct page *page)
794 unsigned long physpage = page_to_phys(page);
801 : "=d" (ccode) : "a" (physpage) : "cc" );
806 * Conversion functions: convert a page and protection to a page entry,
807 * and a page entry and page directory to the page they refer to.
809 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
812 pte_val(__pte) = physpage + pgprot_val(pgprot);
816 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
818 unsigned long physpage = page_to_phys(page);
820 return mk_pte_phys(physpage, pgprot);
823 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
824 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
825 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
826 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
828 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
829 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
833 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
834 #define pud_deref(pmd) ({ BUG(); 0UL; })
835 #define pgd_deref(pmd) ({ BUG(); 0UL; })
837 #define pud_offset(pgd, address) ((pud_t *) pgd)
838 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
840 #else /* __s390x__ */
842 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
843 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
844 #define pgd_deref(pgd) ({ BUG(); 0UL; })
846 #define pud_offset(pgd, address) ((pud_t *) pgd)
848 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
850 pmd_t *pmd = (pmd_t *) pud_deref(*pud);
851 return pmd + pmd_index(address);
854 #endif /* __s390x__ */
856 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
857 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
858 #define pte_page(x) pfn_to_page(pte_pfn(x))
860 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
862 /* Find an entry in the lowest level page table.. */
863 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
864 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
865 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
866 #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
867 #define pte_unmap(pte) do { } while (0)
868 #define pte_unmap_nested(pte) do { } while (0)
871 * 31 bit swap entry format:
872 * A page-table entry has some bits we have to treat in a special way.
873 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
874 * exception will occur instead of a page translation exception. The
875 * specifiation exception has the bad habit not to store necessary
876 * information in the lowcore.
877 * Bit 21 and bit 22 are the page invalid bit and the page protection
878 * bit. We set both to indicate a swapped page.
879 * Bit 30 and 31 are used to distinguish the different page types. For
880 * a swapped page these bits need to be zero.
881 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
882 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
883 * plus 24 for the offset.
884 * 0| offset |0110|o|type |00|
885 * 0 0000000001111111111 2222 2 22222 33
886 * 0 1234567890123456789 0123 4 56789 01
888 * 64 bit swap entry format:
889 * A page-table entry has some bits we have to treat in a special way.
890 * Bits 52 and bit 55 have to be zero, otherwise an specification
891 * exception will occur instead of a page translation exception. The
892 * specifiation exception has the bad habit not to store necessary
893 * information in the lowcore.
894 * Bit 53 and bit 54 are the page invalid bit and the page protection
895 * bit. We set both to indicate a swapped page.
896 * Bit 62 and 63 are used to distinguish the different page types. For
897 * a swapped page these bits need to be zero.
898 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
899 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
900 * plus 56 for the offset.
901 * | offset |0110|o|type |00|
902 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
903 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
906 #define __SWP_OFFSET_MASK (~0UL >> 12)
908 #define __SWP_OFFSET_MASK (~0UL >> 11)
910 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
913 offset &= __SWP_OFFSET_MASK;
914 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
915 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
919 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
920 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
921 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
923 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
924 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
927 # define PTE_FILE_MAX_BITS 26
928 #else /* __s390x__ */
929 # define PTE_FILE_MAX_BITS 59
930 #endif /* __s390x__ */
932 #define pte_to_pgoff(__pte) \
933 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
935 #define pgoff_to_pte(__off) \
936 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
939 #endif /* !__ASSEMBLY__ */
941 #define kern_addr_valid(addr) (1)
943 extern int add_shared_memory(unsigned long start, unsigned long size);
944 extern int remove_shared_memory(unsigned long start, unsigned long size);
947 * No page table caches to initialise
949 #define pgtable_cache_init() do { } while (0)
951 #define __HAVE_ARCH_MEMMAP_INIT
952 extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
954 #include <asm-generic/pgtable.h>
956 #endif /* _S390_PAGE_H */