2 * include/asm-s390/pgtable.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * Derived from "include/asm-i386/pgtable.h"
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
17 * The Linux memory management assumes a three-level page table setup. For
18 * s390 31 bit we "fold" the mid level into the top-level page table, so
19 * that we physically have the same two-level page table as the s390 mmu
20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21 * the hardware provides (region first and region second tables are not
24 * The "pgd_xxx()" functions are trivial for a folded two-level
25 * setup: the pgd is never bad, and a pmd always exists (as it's folded
28 * This file contains the functions and defines necessary to modify and use
29 * the S390 page table tree.
32 #include <linux/sched.h>
33 #include <linux/mm_types.h>
34 #include <asm/bitops.h>
36 #include <asm/processor.h>
38 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39 extern void paging_init(void);
40 extern void vmem_map_init(void);
43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information.
46 #define update_mmu_cache(vma, address, ptep) do { } while (0)
49 * ZERO_PAGE is a global shared page that is always zero; used
50 * for zero-mapped memory areas etc..
53 extern unsigned long empty_zero_page;
54 extern unsigned long zero_page_mask;
56 #define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
60 #define is_zero_pfn is_zero_pfn
61 static inline int is_zero_pfn(unsigned long pfn)
63 extern unsigned long zero_pfn;
64 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
65 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
68 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
70 #endif /* !__ASSEMBLY__ */
73 * PMD_SHIFT determines the size of the area a second-level page
75 * PGDIR_SHIFT determines what a third-level page table entry can map
80 # define PGDIR_SHIFT 20
84 # define PGDIR_SHIFT 42
85 #endif /* __s390x__ */
87 #define PMD_SIZE (1UL << PMD_SHIFT)
88 #define PMD_MASK (~(PMD_SIZE-1))
89 #define PUD_SIZE (1UL << PUD_SHIFT)
90 #define PUD_MASK (~(PUD_SIZE-1))
91 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
92 #define PGDIR_MASK (~(PGDIR_SIZE-1))
95 * entries per page directory level: the S390 is two-level, so
96 * we don't really have any PMD directory physically.
97 * for S390 segment-table entries are combined to one PGD
98 * that leads to 1024 pte per pgd
100 #define PTRS_PER_PTE 256
102 #define PTRS_PER_PMD 1
103 #define PTRS_PER_PUD 1
104 #else /* __s390x__ */
105 #define PTRS_PER_PMD 2048
106 #define PTRS_PER_PUD 2048
107 #endif /* __s390x__ */
108 #define PTRS_PER_PGD 2048
110 #define FIRST_USER_ADDRESS 0
112 #define pte_ERROR(e) \
113 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
114 #define pmd_ERROR(e) \
115 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
116 #define pud_ERROR(e) \
117 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
118 #define pgd_ERROR(e) \
119 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
123 * The vmalloc area will always be on the topmost area of the kernel
124 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
125 * which should be enough for any sane case.
126 * By putting vmalloc at the top, we maximise the gap between physical
127 * memory and vmalloc to catch misplaced memory accesses. As a side
128 * effect, this also makes sure that 64 bit module code cannot be used
129 * as system call address.
132 extern unsigned long VMALLOC_START;
135 #define VMALLOC_SIZE (96UL << 20)
136 #define VMALLOC_END 0x7e000000UL
137 #define VMEM_MAP_END 0x80000000UL
138 #else /* __s390x__ */
139 #define VMALLOC_SIZE (128UL << 30)
140 #define VMALLOC_END 0x3e000000000UL
141 #define VMEM_MAP_END 0x40000000000UL
142 #endif /* __s390x__ */
145 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
146 * mapping. This needs to be calculated at compile time since the size of the
147 * VMEM_MAP is static but the size of struct page can change.
149 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
150 #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
151 #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
152 #define vmemmap ((struct page *) VMALLOC_END)
155 * A 31 bit pagetable entry of S390 has following format:
158 * 00000000001111111111222222222233
159 * 01234567890123456789012345678901
161 * I Page-Invalid Bit: Page is not available for address-translation
162 * P Page-Protection Bit: Store access not possible for page
164 * A 31 bit segmenttable entry of S390 has following format:
165 * | P-table origin | |PTL
167 * 00000000001111111111222222222233
168 * 01234567890123456789012345678901
170 * I Segment-Invalid Bit: Segment is not available for address-translation
171 * C Common-Segment Bit: Segment is not private (PoP 3-30)
172 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
174 * The 31 bit segmenttable origin of S390 has following format:
176 * |S-table origin | | STL |
178 * 00000000001111111111222222222233
179 * 01234567890123456789012345678901
181 * X Space-Switch event:
182 * G Segment-Invalid Bit: *
183 * P Private-Space Bit: Segment is not private (PoP 3-30)
184 * S Storage-Alteration:
185 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
187 * A 64 bit pagetable entry of S390 has following format:
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
192 * I Page-Invalid Bit: Page is not available for address-translation
193 * P Page-Protection Bit: Store access not possible for page
194 * C Change-bit override: HW is not required to set change bit
196 * A 64 bit segmenttable entry of S390 has following format:
197 * | P-table origin | TT
198 * 0000000000111111111122222222223333333333444444444455555555556666
199 * 0123456789012345678901234567890123456789012345678901234567890123
201 * I Segment-Invalid Bit: Segment is not available for address-translation
202 * C Common-Segment Bit: Segment is not private (PoP 3-30)
203 * P Page-Protection Bit: Store access not possible for page
206 * A 64 bit region table entry of S390 has following format:
207 * | S-table origin | TF TTTL
208 * 0000000000111111111122222222223333333333444444444455555555556666
209 * 0123456789012345678901234567890123456789012345678901234567890123
211 * I Segment-Invalid Bit: Segment is not available for address-translation
216 * The 64 bit regiontable origin of S390 has following format:
217 * | region table origon | DTTL
218 * 0000000000111111111122222222223333333333444444444455555555556666
219 * 0123456789012345678901234567890123456789012345678901234567890123
221 * X Space-Switch event:
222 * G Segment-Invalid Bit:
223 * P Private-Space Bit:
224 * S Storage-Alteration:
228 * A storage key has the following format:
232 * F : fetch protection bit
237 /* Hardware bits in the page table entry */
238 #define _PAGE_CO 0x100 /* HW Change-bit override */
239 #define _PAGE_RO 0x200 /* HW read-only bit */
240 #define _PAGE_INVALID 0x400 /* HW invalid bit */
242 /* Software bits in the page table entry */
243 #define _PAGE_SWT 0x001 /* SW pte type bit t */
244 #define _PAGE_SWX 0x002 /* SW pte type bit x */
245 #define _PAGE_SPECIAL 0x004 /* SW associated with special page */
246 #define __HAVE_ARCH_PTE_SPECIAL
248 /* Set of bits not changed in pte_modify */
249 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
251 /* Six different types of pages. */
252 #define _PAGE_TYPE_EMPTY 0x400
253 #define _PAGE_TYPE_NONE 0x401
254 #define _PAGE_TYPE_SWAP 0x403
255 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
256 #define _PAGE_TYPE_RO 0x200
257 #define _PAGE_TYPE_RW 0x000
258 #define _PAGE_TYPE_EX_RO 0x202
259 #define _PAGE_TYPE_EX_RW 0x002
262 * Only four types for huge pages, using the invalid bit and protection bit
263 * of a segment table entry.
265 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
266 #define _HPAGE_TYPE_NONE 0x220
267 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
268 #define _HPAGE_TYPE_RW 0x000
271 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
272 * pte_none and pte_file to find out the pte type WITHOUT holding the page
273 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
274 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
275 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
276 * This change is done while holding the lock, but the intermediate step
277 * of a previously valid pte with the hw invalid bit set can be observed by
278 * handle_pte_fault. That makes it necessary that all valid pte types with
279 * the hw invalid bit set must be distinguishable from the four pte types
280 * empty, none, swap and file.
283 * _PAGE_TYPE_EMPTY 1000 -> 1000
284 * _PAGE_TYPE_NONE 1001 -> 1001
285 * _PAGE_TYPE_SWAP 1011 -> 1011
286 * _PAGE_TYPE_FILE 11?1 -> 11?1
287 * _PAGE_TYPE_RO 0100 -> 1100
288 * _PAGE_TYPE_RW 0000 -> 1000
289 * _PAGE_TYPE_EX_RO 0110 -> 1110
290 * _PAGE_TYPE_EX_RW 0010 -> 1010
292 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
293 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
294 * pte_file is true for bits combinations 1101, 1111
295 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
298 /* Page status table bits for virtualization */
299 #define RCP_PCL_BIT 55
300 #define RCP_HR_BIT 54
301 #define RCP_HC_BIT 53
302 #define RCP_GR_BIT 50
303 #define RCP_GC_BIT 49
305 /* User dirty bit for KVM's migration feature */
306 #define KVM_UD_BIT 47
310 /* Bits in the segment table address-space-control-element */
311 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
312 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
313 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
314 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
315 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
317 /* Bits in the segment table entry */
318 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
319 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
320 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
321 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
322 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
324 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
325 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
327 #else /* __s390x__ */
329 /* Bits in the segment/region table address-space-control-element */
330 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
331 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
332 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
333 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
334 #define _ASCE_REAL_SPACE 0x20 /* real space control */
335 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
336 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
337 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
338 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
339 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
340 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
342 /* Bits in the region table entry */
343 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
344 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
345 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
346 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
347 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
348 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
349 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
351 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
352 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
353 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
354 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
355 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
356 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
358 /* Bits in the segment table entry */
359 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
360 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
361 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
363 #define _SEGMENT_ENTRY (0)
364 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
366 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
367 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
369 #endif /* __s390x__ */
372 * A user page table pointer has the space-switch-event bit, the
373 * private-space-control bit and the storage-alteration-event-control
374 * bit set. A kernel page table pointer doesn't need them.
376 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
379 /* Bits int the storage key */
380 #define _PAGE_CHANGED 0x02 /* HW changed bit */
381 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */
384 * Page protection definitions.
386 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
387 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
388 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
389 #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
390 #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
392 #define PAGE_KERNEL PAGE_RW
393 #define PAGE_COPY PAGE_RO
396 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
397 * Write permission always implies read permission. In theory with a
398 * primary/secondary page table execute only can be implemented but
399 * it would cost an additional bit in the pte to distinguish all the
400 * different pte types. To avoid that execute permission currently
401 * implies read permission as well.
404 #define __P000 PAGE_NONE
405 #define __P001 PAGE_RO
406 #define __P010 PAGE_RO
407 #define __P011 PAGE_RO
408 #define __P100 PAGE_EX_RO
409 #define __P101 PAGE_EX_RO
410 #define __P110 PAGE_EX_RO
411 #define __P111 PAGE_EX_RO
413 #define __S000 PAGE_NONE
414 #define __S001 PAGE_RO
415 #define __S010 PAGE_RW
416 #define __S011 PAGE_RW
417 #define __S100 PAGE_EX_RO
418 #define __S101 PAGE_EX_RO
419 #define __S110 PAGE_EX_RW
420 #define __S111 PAGE_EX_RW
423 # define PxD_SHADOW_SHIFT 1
424 #else /* __s390x__ */
425 # define PxD_SHADOW_SHIFT 2
426 #endif /* __s390x__ */
428 static inline void *get_shadow_table(void *table)
430 unsigned long addr, offset;
433 addr = (unsigned long) table;
434 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
435 page = virt_to_page((void *)(addr ^ offset));
436 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
440 * Certain architectures need to do special things when PTEs
441 * within a page table are directly modified. Thus, the following
442 * hook is made available.
444 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
445 pte_t *ptep, pte_t entry)
448 if (mm->context.noexec) {
449 if (!(pte_val(entry) & _PAGE_INVALID) &&
450 (pte_val(entry) & _PAGE_SWX))
451 pte_val(entry) |= _PAGE_RO;
453 pte_val(entry) = _PAGE_TYPE_EMPTY;
454 ptep[PTRS_PER_PTE] = entry;
459 * pgd/pmd/pte query functions
463 static inline int pgd_present(pgd_t pgd) { return 1; }
464 static inline int pgd_none(pgd_t pgd) { return 0; }
465 static inline int pgd_bad(pgd_t pgd) { return 0; }
467 static inline int pud_present(pud_t pud) { return 1; }
468 static inline int pud_none(pud_t pud) { return 0; }
469 static inline int pud_bad(pud_t pud) { return 0; }
471 #else /* __s390x__ */
473 static inline int pgd_present(pgd_t pgd)
475 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
477 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
480 static inline int pgd_none(pgd_t pgd)
482 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
484 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
487 static inline int pgd_bad(pgd_t pgd)
490 * With dynamic page table levels the pgd can be a region table
491 * entry or a segment table entry. Check for the bit that are
492 * invalid for either table entry.
495 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
496 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
497 return (pgd_val(pgd) & mask) != 0;
500 static inline int pud_present(pud_t pud)
502 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
504 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
507 static inline int pud_none(pud_t pud)
509 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
511 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
514 static inline int pud_bad(pud_t pud)
517 * With dynamic page table levels the pud can be a region table
518 * entry or a segment table entry. Check for the bit that are
519 * invalid for either table entry.
522 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
523 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
524 return (pud_val(pud) & mask) != 0;
527 #endif /* __s390x__ */
529 static inline int pmd_present(pmd_t pmd)
531 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
534 static inline int pmd_none(pmd_t pmd)
536 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
539 static inline int pmd_bad(pmd_t pmd)
541 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
542 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
545 static inline int pte_none(pte_t pte)
547 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
550 static inline int pte_present(pte_t pte)
552 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
553 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
554 (!(pte_val(pte) & _PAGE_INVALID) &&
555 !(pte_val(pte) & _PAGE_SWT));
558 static inline int pte_file(pte_t pte)
560 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
561 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
564 static inline int pte_special(pte_t pte)
566 return (pte_val(pte) & _PAGE_SPECIAL);
569 #define __HAVE_ARCH_PTE_SAME
570 #define pte_same(a,b) (pte_val(a) == pte_val(b))
572 static inline void rcp_lock(pte_t *ptep)
575 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
577 while (test_and_set_bit(RCP_PCL_BIT, pgste))
582 static inline void rcp_unlock(pte_t *ptep)
585 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
586 clear_bit(RCP_PCL_BIT, pgste);
591 /* forward declaration for SetPageUptodate in page-flags.h*/
592 static inline void page_clear_dirty(struct page *page);
593 #include <linux/page-flags.h>
595 static inline void ptep_rcp_copy(pte_t *ptep)
598 struct page *page = virt_to_page(pte_val(*ptep));
600 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
602 skey = page_get_storage_key(page_to_phys(page));
603 if (skey & _PAGE_CHANGED) {
604 set_bit_simple(RCP_GC_BIT, pgste);
605 set_bit_simple(KVM_UD_BIT, pgste);
607 if (skey & _PAGE_REFERENCED)
608 set_bit_simple(RCP_GR_BIT, pgste);
609 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
611 set_bit_simple(KVM_UD_BIT, pgste);
613 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
614 SetPageReferenced(page);
619 * query functions pte_write/pte_dirty/pte_young only work if
620 * pte_present() is true. Undefined behaviour if not..
622 static inline int pte_write(pte_t pte)
624 return (pte_val(pte) & _PAGE_RO) == 0;
627 static inline int pte_dirty(pte_t pte)
629 /* A pte is neither clean nor dirty on s/390. The dirty bit
630 * is in the storage key. See page_test_and_clear_dirty for
636 static inline int pte_young(pte_t pte)
638 /* A pte is neither young nor old on s/390. The young bit
639 * is in the storage key. See page_test_and_clear_young for
646 * pgd/pmd/pte modification functions
651 #define pgd_clear(pgd) do { } while (0)
652 #define pud_clear(pud) do { } while (0)
654 #else /* __s390x__ */
656 static inline void pgd_clear_kernel(pgd_t * pgd)
658 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
659 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
662 static inline void pgd_clear(pgd_t * pgd)
664 pgd_t *shadow = get_shadow_table(pgd);
666 pgd_clear_kernel(pgd);
668 pgd_clear_kernel(shadow);
671 static inline void pud_clear_kernel(pud_t *pud)
673 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
674 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
677 static inline void pud_clear(pud_t *pud)
679 pud_t *shadow = get_shadow_table(pud);
681 pud_clear_kernel(pud);
683 pud_clear_kernel(shadow);
686 #endif /* __s390x__ */
688 static inline void pmd_clear_kernel(pmd_t * pmdp)
690 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
693 static inline void pmd_clear(pmd_t *pmd)
695 pmd_t *shadow = get_shadow_table(pmd);
697 pmd_clear_kernel(pmd);
699 pmd_clear_kernel(shadow);
702 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
704 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
705 if (mm->context.noexec)
706 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
710 * The following pte modification functions only work if
711 * pte_present() is true. Undefined behaviour if not..
713 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
715 pte_val(pte) &= _PAGE_CHG_MASK;
716 pte_val(pte) |= pgprot_val(newprot);
720 static inline pte_t pte_wrprotect(pte_t pte)
722 /* Do not clobber _PAGE_TYPE_NONE pages! */
723 if (!(pte_val(pte) & _PAGE_INVALID))
724 pte_val(pte) |= _PAGE_RO;
728 static inline pte_t pte_mkwrite(pte_t pte)
730 pte_val(pte) &= ~_PAGE_RO;
734 static inline pte_t pte_mkclean(pte_t pte)
736 /* The only user of pte_mkclean is the fork() code.
737 We must *not* clear the *physical* page dirty bit
738 just because fork() wants to clear the dirty bit in
739 *one* of the page's mappings. So we just do nothing. */
743 static inline pte_t pte_mkdirty(pte_t pte)
745 /* We do not explicitly set the dirty bit because the
746 * sske instruction is slow. It is faster to let the
747 * next instruction set the dirty bit.
752 static inline pte_t pte_mkold(pte_t pte)
754 /* S/390 doesn't keep its dirty/referenced bit in the pte.
755 * There is no point in clearing the real referenced bit.
760 static inline pte_t pte_mkyoung(pte_t pte)
762 /* S/390 doesn't keep its dirty/referenced bit in the pte.
763 * There is no point in setting the real referenced bit.
768 static inline pte_t pte_mkspecial(pte_t pte)
770 pte_val(pte) |= _PAGE_SPECIAL;
776 * Get (and clear) the user dirty bit for a PTE.
778 static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
782 unsigned long *pgste;
786 if (!mm->context.has_pgste)
789 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
790 page = virt_to_page(pte_val(*ptep));
791 skey = page_get_storage_key(page_to_phys(page));
792 if (skey & _PAGE_CHANGED) {
793 set_bit_simple(RCP_GC_BIT, pgste);
794 set_bit_simple(KVM_UD_BIT, pgste);
796 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
798 set_bit_simple(KVM_UD_BIT, pgste);
800 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
801 if (skey & _PAGE_CHANGED)
802 page_clear_dirty(page);
808 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
809 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
810 unsigned long addr, pte_t *ptep)
813 unsigned long physpage;
815 unsigned long *pgste;
817 if (!vma->vm_mm->context.has_pgste)
819 physpage = pte_val(*ptep) & PAGE_MASK;
820 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
822 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
825 set_bit_simple(RCP_GR_BIT, pgste);
826 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
833 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
834 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
835 unsigned long address, pte_t *ptep)
837 /* No need to flush TLB
838 * On s390 reference bits are in storage key and never in TLB
839 * With virtualization we handle the reference bit, without we
840 * we can simply return */
842 return ptep_test_and_clear_young(vma, address, ptep);
847 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
849 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
851 /* pto must point to the start of the segment table */
852 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
854 /* ipte in zarch mode can do the math */
859 : "=m" (*ptep) : "m" (*ptep),
860 "a" (pto), "a" (address));
864 static inline void ptep_invalidate(struct mm_struct *mm,
865 unsigned long address, pte_t *ptep)
867 if (mm->context.has_pgste) {
869 __ptep_ipte(address, ptep);
871 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
875 __ptep_ipte(address, ptep);
876 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
877 if (mm->context.noexec) {
878 __ptep_ipte(address, ptep + PTRS_PER_PTE);
879 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
884 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
885 * both clear the TLB for the unmapped pte. The reason is that
886 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
887 * to modify an active pte. The sequence is
888 * 1) ptep_get_and_clear
891 * On s390 the tlb needs to get flushed with the modification of the pte
892 * if the pte is active. The only way how this can be implemented is to
893 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
896 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
897 #define ptep_get_and_clear(__mm, __address, __ptep) \
899 pte_t __pte = *(__ptep); \
900 (__mm)->context.flush_mm = 1; \
901 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
902 (__mm) != current->active_mm) \
903 ptep_invalidate(__mm, __address, __ptep); \
905 pte_clear((__mm), (__address), (__ptep)); \
909 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
910 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
911 unsigned long address, pte_t *ptep)
914 ptep_invalidate(vma->vm_mm, address, ptep);
919 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
920 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
921 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
922 * cannot be accessed while the batched unmap is running. In this case
923 * full==1 and a simple pte_clear is enough. See tlb.h.
925 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
926 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
928 pte_t *ptep, int full)
933 pte_clear(mm, addr, ptep);
935 ptep_invalidate(mm, addr, ptep);
939 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
940 #define ptep_set_wrprotect(__mm, __addr, __ptep) \
942 pte_t __pte = *(__ptep); \
943 if (pte_write(__pte)) { \
944 (__mm)->context.flush_mm = 1; \
945 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
946 (__mm) != current->active_mm) \
947 ptep_invalidate(__mm, __addr, __ptep); \
948 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
952 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
953 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
955 int __changed = !pte_same(*(__ptep), __entry); \
957 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
958 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
964 * Test and clear dirty bit in storage key.
965 * We can't clear the changed bit atomically. This is a potential
966 * race against modification of the referenced bit. This function
967 * should therefore only be called if it is not mapped in any
970 #define __HAVE_ARCH_PAGE_TEST_DIRTY
971 static inline int page_test_dirty(struct page *page)
973 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
976 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY
977 static inline void page_clear_dirty(struct page *page)
979 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
983 * Test and clear referenced bit in storage key.
985 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
986 static inline int page_test_and_clear_young(struct page *page)
988 unsigned long physpage = page_to_phys(page);
995 : "=d" (ccode) : "a" (physpage) : "cc" );
1000 * Conversion functions: convert a page and protection to a page entry,
1001 * and a page entry and page directory to the page they refer to.
1003 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1006 pte_val(__pte) = physpage + pgprot_val(pgprot);
1010 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1012 unsigned long physpage = page_to_phys(page);
1014 return mk_pte_phys(physpage, pgprot);
1017 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1018 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1019 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1020 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1022 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1023 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1027 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1028 #define pud_deref(pmd) ({ BUG(); 0UL; })
1029 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1031 #define pud_offset(pgd, address) ((pud_t *) pgd)
1032 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1034 #else /* __s390x__ */
1036 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1037 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1038 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1040 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1042 pud_t *pud = (pud_t *) pgd;
1043 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1044 pud = (pud_t *) pgd_deref(*pgd);
1045 return pud + pud_index(address);
1048 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1050 pmd_t *pmd = (pmd_t *) pud;
1051 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1052 pmd = (pmd_t *) pud_deref(*pud);
1053 return pmd + pmd_index(address);
1056 #endif /* __s390x__ */
1058 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1059 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1060 #define pte_page(x) pfn_to_page(pte_pfn(x))
1062 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1064 /* Find an entry in the lowest level page table.. */
1065 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1066 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1067 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1068 #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
1069 #define pte_unmap(pte) do { } while (0)
1070 #define pte_unmap_nested(pte) do { } while (0)
1073 * 31 bit swap entry format:
1074 * A page-table entry has some bits we have to treat in a special way.
1075 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1076 * exception will occur instead of a page translation exception. The
1077 * specifiation exception has the bad habit not to store necessary
1078 * information in the lowcore.
1079 * Bit 21 and bit 22 are the page invalid bit and the page protection
1080 * bit. We set both to indicate a swapped page.
1081 * Bit 30 and 31 are used to distinguish the different page types. For
1082 * a swapped page these bits need to be zero.
1083 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1084 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1085 * plus 24 for the offset.
1086 * 0| offset |0110|o|type |00|
1087 * 0 0000000001111111111 2222 2 22222 33
1088 * 0 1234567890123456789 0123 4 56789 01
1090 * 64 bit swap entry format:
1091 * A page-table entry has some bits we have to treat in a special way.
1092 * Bits 52 and bit 55 have to be zero, otherwise an specification
1093 * exception will occur instead of a page translation exception. The
1094 * specifiation exception has the bad habit not to store necessary
1095 * information in the lowcore.
1096 * Bit 53 and bit 54 are the page invalid bit and the page protection
1097 * bit. We set both to indicate a swapped page.
1098 * Bit 62 and 63 are used to distinguish the different page types. For
1099 * a swapped page these bits need to be zero.
1100 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1101 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1102 * plus 56 for the offset.
1103 * | offset |0110|o|type |00|
1104 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1105 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1108 #define __SWP_OFFSET_MASK (~0UL >> 12)
1110 #define __SWP_OFFSET_MASK (~0UL >> 11)
1112 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1115 offset &= __SWP_OFFSET_MASK;
1116 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1117 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1121 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1122 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1123 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1125 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1126 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1129 # define PTE_FILE_MAX_BITS 26
1130 #else /* __s390x__ */
1131 # define PTE_FILE_MAX_BITS 59
1132 #endif /* __s390x__ */
1134 #define pte_to_pgoff(__pte) \
1135 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1137 #define pgoff_to_pte(__off) \
1138 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1139 | _PAGE_TYPE_FILE })
1141 #endif /* !__ASSEMBLY__ */
1143 #define kern_addr_valid(addr) (1)
1145 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1146 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1147 extern int s390_enable_sie(void);
1150 * No page table caches to initialise
1152 #define pgtable_cache_init() do { } while (0)
1154 #include <asm-generic/pgtable.h>
1156 #endif /* _S390_PAGE_H */