2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
20 #include <linux/swapops.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/tlbflush.h>
26 #include <asm/mmu_context.h>
30 #define FRAG_MASK 0x0f
33 #define FRAG_MASK 0x03
37 unsigned long *crst_table_alloc(struct mm_struct *mm)
39 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
43 return (unsigned long *) page_to_phys(page);
46 void crst_table_free(struct mm_struct *mm, unsigned long *table)
48 free_pages((unsigned long) table, ALLOC_ORDER);
52 static void __crst_table_upgrade(void *arg)
54 struct mm_struct *mm = arg;
56 if (current->active_mm == mm) {
63 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
65 unsigned long *table, *pgd;
69 BUG_ON(limit > (1UL << 53));
72 table = crst_table_alloc(mm);
75 spin_lock_bh(&mm->page_table_lock);
76 if (mm->context.asce_limit < limit) {
77 pgd = (unsigned long *) mm->pgd;
78 if (mm->context.asce_limit <= (1UL << 31)) {
79 entry = _REGION3_ENTRY_EMPTY;
80 mm->context.asce_limit = 1UL << 42;
81 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
85 entry = _REGION2_ENTRY_EMPTY;
86 mm->context.asce_limit = 1UL << 53;
87 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
91 crst_table_init(table, entry);
92 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
93 mm->pgd = (pgd_t *) table;
94 mm->task_size = mm->context.asce_limit;
98 spin_unlock_bh(&mm->page_table_lock);
100 crst_table_free(mm, table);
101 if (mm->context.asce_limit < limit)
104 on_each_cpu(__crst_table_upgrade, mm, 0);
108 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
112 if (current->active_mm == mm) {
116 while (mm->context.asce_limit > limit) {
118 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
119 case _REGION_ENTRY_TYPE_R2:
120 mm->context.asce_limit = 1UL << 42;
121 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
125 case _REGION_ENTRY_TYPE_R3:
126 mm->context.asce_limit = 1UL << 31;
127 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
134 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 mm->task_size = mm->context.asce_limit;
136 crst_table_free(mm, (unsigned long *) pgd);
138 if (current->active_mm == mm)
146 * gmap_alloc - allocate a guest address space
147 * @mm: pointer to the parent mm_struct
149 * Returns a guest address space structure.
151 struct gmap *gmap_alloc(struct mm_struct *mm)
155 unsigned long *table;
157 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
160 INIT_LIST_HEAD(&gmap->crst_list);
162 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
165 list_add(&page->lru, &gmap->crst_list);
166 table = (unsigned long *) page_to_phys(page);
167 crst_table_init(table, _REGION1_ENTRY_EMPTY);
169 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
170 _ASCE_USER_BITS | __pa(table);
171 list_add(&gmap->list, &mm->context.gmap_list);
179 EXPORT_SYMBOL_GPL(gmap_alloc);
181 static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
183 struct gmap_pgtable *mp;
184 struct gmap_rmap *rmap;
187 if (*table & _SEGMENT_ENTRY_INVALID)
189 page = pfn_to_page(*table >> PAGE_SHIFT);
190 mp = (struct gmap_pgtable *) page->index;
191 list_for_each_entry(rmap, &mp->mapper, list) {
192 if (rmap->entry != table)
194 list_del(&rmap->list);
198 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
202 static void gmap_flush_tlb(struct gmap *gmap)
204 if (MACHINE_HAS_IDTE)
205 __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
208 __tlb_flush_global();
212 * gmap_free - free a guest address space
213 * @gmap: pointer to the guest address space structure
215 void gmap_free(struct gmap *gmap)
217 struct page *page, *next;
218 unsigned long *table;
223 if (MACHINE_HAS_IDTE)
224 __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
227 __tlb_flush_global();
229 /* Free all segment & region tables. */
230 down_read(&gmap->mm->mmap_sem);
231 spin_lock(&gmap->mm->page_table_lock);
232 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
233 table = (unsigned long *) page_to_phys(page);
234 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
235 /* Remove gmap rmap structures for segment table. */
236 for (i = 0; i < PTRS_PER_PMD; i++, table++)
237 gmap_unlink_segment(gmap, table);
238 __free_pages(page, ALLOC_ORDER);
240 spin_unlock(&gmap->mm->page_table_lock);
241 up_read(&gmap->mm->mmap_sem);
242 list_del(&gmap->list);
245 EXPORT_SYMBOL_GPL(gmap_free);
248 * gmap_enable - switch primary space to the guest address space
249 * @gmap: pointer to the guest address space structure
251 void gmap_enable(struct gmap *gmap)
253 S390_lowcore.gmap = (unsigned long) gmap;
255 EXPORT_SYMBOL_GPL(gmap_enable);
258 * gmap_disable - switch back to the standard primary address space
259 * @gmap: pointer to the guest address space structure
261 void gmap_disable(struct gmap *gmap)
263 S390_lowcore.gmap = 0UL;
265 EXPORT_SYMBOL_GPL(gmap_disable);
268 * gmap_alloc_table is assumed to be called with mmap_sem held
270 static int gmap_alloc_table(struct gmap *gmap,
271 unsigned long *table, unsigned long init)
272 __releases(&gmap->mm->page_table_lock)
273 __acquires(&gmap->mm->page_table_lock)
278 /* since we dont free the gmap table until gmap_free we can unlock */
279 spin_unlock(&gmap->mm->page_table_lock);
280 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
281 spin_lock(&gmap->mm->page_table_lock);
284 new = (unsigned long *) page_to_phys(page);
285 crst_table_init(new, init);
286 if (*table & _REGION_ENTRY_INVALID) {
287 list_add(&page->lru, &gmap->crst_list);
288 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
289 (*table & _REGION_ENTRY_TYPE_MASK);
291 __free_pages(page, ALLOC_ORDER);
296 * gmap_unmap_segment - unmap segment from the guest address space
297 * @gmap: pointer to the guest address space structure
298 * @to: address in the guest address space
299 * @len: length of the memory area to unmap
301 * Returns 0 if the unmap succeeded, -EINVAL if not.
303 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
305 unsigned long *table;
309 if ((to | len) & (PMD_SIZE - 1))
311 if (len == 0 || to + len < to)
315 down_read(&gmap->mm->mmap_sem);
316 spin_lock(&gmap->mm->page_table_lock);
317 for (off = 0; off < len; off += PMD_SIZE) {
318 /* Walk the guest addr space page table */
319 table = gmap->table + (((to + off) >> 53) & 0x7ff);
320 if (*table & _REGION_ENTRY_INVALID)
322 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
323 table = table + (((to + off) >> 42) & 0x7ff);
324 if (*table & _REGION_ENTRY_INVALID)
326 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
327 table = table + (((to + off) >> 31) & 0x7ff);
328 if (*table & _REGION_ENTRY_INVALID)
330 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
331 table = table + (((to + off) >> 20) & 0x7ff);
333 /* Clear segment table entry in guest address space. */
334 flush |= gmap_unlink_segment(gmap, table);
335 *table = _SEGMENT_ENTRY_INVALID;
338 spin_unlock(&gmap->mm->page_table_lock);
339 up_read(&gmap->mm->mmap_sem);
341 gmap_flush_tlb(gmap);
344 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
347 * gmap_mmap_segment - map a segment to the guest address space
348 * @gmap: pointer to the guest address space structure
349 * @from: source address in the parent address space
350 * @to: target address in the guest address space
351 * @len: length of the memory area to map
353 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
355 int gmap_map_segment(struct gmap *gmap, unsigned long from,
356 unsigned long to, unsigned long len)
358 unsigned long *table;
362 if ((from | to | len) & (PMD_SIZE - 1))
364 if (len == 0 || from + len > TASK_MAX_SIZE ||
365 from + len < from || to + len < to)
369 down_read(&gmap->mm->mmap_sem);
370 spin_lock(&gmap->mm->page_table_lock);
371 for (off = 0; off < len; off += PMD_SIZE) {
372 /* Walk the gmap address space page table */
373 table = gmap->table + (((to + off) >> 53) & 0x7ff);
374 if ((*table & _REGION_ENTRY_INVALID) &&
375 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
377 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
378 table = table + (((to + off) >> 42) & 0x7ff);
379 if ((*table & _REGION_ENTRY_INVALID) &&
380 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
382 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
383 table = table + (((to + off) >> 31) & 0x7ff);
384 if ((*table & _REGION_ENTRY_INVALID) &&
385 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
387 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
388 table = table + (((to + off) >> 20) & 0x7ff);
390 /* Store 'from' address in an invalid segment table entry. */
391 flush |= gmap_unlink_segment(gmap, table);
392 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
393 _SEGMENT_ENTRY_PROTECT);
395 spin_unlock(&gmap->mm->page_table_lock);
396 up_read(&gmap->mm->mmap_sem);
398 gmap_flush_tlb(gmap);
402 spin_unlock(&gmap->mm->page_table_lock);
403 up_read(&gmap->mm->mmap_sem);
404 gmap_unmap_segment(gmap, to, len);
407 EXPORT_SYMBOL_GPL(gmap_map_segment);
409 static unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr)
411 unsigned long *table;
413 table = gmap->table + ((gaddr >> 53) & 0x7ff);
414 if (unlikely(*table & _REGION_ENTRY_INVALID))
415 return ERR_PTR(-EFAULT);
416 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
417 table = table + ((gaddr >> 42) & 0x7ff);
418 if (unlikely(*table & _REGION_ENTRY_INVALID))
419 return ERR_PTR(-EFAULT);
420 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
421 table = table + ((gaddr >> 31) & 0x7ff);
422 if (unlikely(*table & _REGION_ENTRY_INVALID))
423 return ERR_PTR(-EFAULT);
424 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
425 table = table + ((gaddr >> 20) & 0x7ff);
430 * __gmap_translate - translate a guest address to a user space address
431 * @gmap: pointer to guest mapping meta data structure
432 * @gaddr: guest address
434 * Returns user space address which corresponds to the guest address or
435 * -EFAULT if no such mapping exists.
436 * This function does not establish potentially missing page table entries.
437 * The mmap_sem of the mm that belongs to the address space must be held
438 * when this function gets called.
440 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
442 unsigned long *segment_ptr, vmaddr, segment;
443 struct gmap_pgtable *mp;
446 current->thread.gmap_addr = gaddr;
447 segment_ptr = gmap_table_walk(gmap, gaddr);
448 if (IS_ERR(segment_ptr))
449 return PTR_ERR(segment_ptr);
450 /* Convert the gmap address to an mm address. */
451 segment = *segment_ptr;
452 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
453 page = pfn_to_page(segment >> PAGE_SHIFT);
454 mp = (struct gmap_pgtable *) page->index;
455 return mp->vmaddr | (gaddr & ~PMD_MASK);
456 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
457 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
458 return vmaddr | (gaddr & ~PMD_MASK);
462 EXPORT_SYMBOL_GPL(__gmap_translate);
465 * gmap_translate - translate a guest address to a user space address
466 * @gmap: pointer to guest mapping meta data structure
467 * @gaddr: guest address
469 * Returns user space address which corresponds to the guest address or
470 * -EFAULT if no such mapping exists.
471 * This function does not establish potentially missing page table entries.
473 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
477 down_read(&gmap->mm->mmap_sem);
478 rc = __gmap_translate(gmap, gaddr);
479 up_read(&gmap->mm->mmap_sem);
482 EXPORT_SYMBOL_GPL(gmap_translate);
484 static int gmap_connect_pgtable(struct gmap *gmap, unsigned long gaddr,
485 unsigned long segment,
486 unsigned long *segment_ptr)
488 unsigned long vmaddr;
489 struct vm_area_struct *vma;
490 struct gmap_pgtable *mp;
491 struct gmap_rmap *rmap;
492 struct mm_struct *mm;
499 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
500 vma = find_vma(mm, vmaddr);
501 if (!vma || vma->vm_start > vmaddr)
503 /* Walk the parent mm page table */
504 pgd = pgd_offset(mm, vmaddr);
505 pud = pud_alloc(mm, pgd, vmaddr);
508 pmd = pmd_alloc(mm, pud, vmaddr);
511 if (!pmd_present(*pmd) &&
512 __pte_alloc(mm, vma, pmd, vmaddr))
514 /* large pmds cannot yet be handled */
517 /* pmd now points to a valid segment table entry. */
518 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
521 /* Link gmap segment table entry location to page table. */
522 page = pmd_page(*pmd);
523 mp = (struct gmap_pgtable *) page->index;
525 rmap->entry = segment_ptr;
526 rmap->vmaddr = gaddr & PMD_MASK;
527 spin_lock(&mm->page_table_lock);
528 if (*segment_ptr == segment) {
529 list_add(&rmap->list, &mp->mapper);
530 /* Set gmap segment table entry to page table. */
531 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
534 spin_unlock(&mm->page_table_lock);
539 static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
541 struct gmap_rmap *rmap, *next;
542 struct gmap_pgtable *mp;
547 spin_lock(&mm->page_table_lock);
548 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
549 mp = (struct gmap_pgtable *) page->index;
550 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
551 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
552 _SEGMENT_ENTRY_PROTECT);
553 list_del(&rmap->list);
557 spin_unlock(&mm->page_table_lock);
559 __tlb_flush_global();
563 * this function is assumed to be called with mmap_sem held
565 unsigned long __gmap_fault(struct gmap *gmap, unsigned long gaddr)
567 unsigned long *segment_ptr, segment;
568 struct gmap_pgtable *mp;
572 current->thread.gmap_addr = gaddr;
573 segment_ptr = gmap_table_walk(gmap, gaddr);
574 if (IS_ERR(segment_ptr))
576 /* Convert the gmap address to an mm address. */
578 segment = *segment_ptr;
579 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
580 /* Page table is present */
581 page = pfn_to_page(segment >> PAGE_SHIFT);
582 mp = (struct gmap_pgtable *) page->index;
583 return mp->vmaddr | (gaddr & ~PMD_MASK);
585 if (!(segment & _SEGMENT_ENTRY_PROTECT))
586 /* Nothing mapped in the gmap address space. */
588 rc = gmap_connect_pgtable(gmap, gaddr, segment, segment_ptr);
595 unsigned long gmap_fault(struct gmap *gmap, unsigned long gaddr)
599 down_read(&gmap->mm->mmap_sem);
600 rc = __gmap_fault(gmap, gaddr);
601 up_read(&gmap->mm->mmap_sem);
605 EXPORT_SYMBOL_GPL(gmap_fault);
607 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
609 if (!non_swap_entry(entry))
610 dec_mm_counter(mm, MM_SWAPENTS);
611 else if (is_migration_entry(entry)) {
612 struct page *page = migration_entry_to_page(entry);
615 dec_mm_counter(mm, MM_ANONPAGES);
617 dec_mm_counter(mm, MM_FILEPAGES);
619 free_swap_and_cache(entry);
623 * The mm->mmap_sem lock must be held
625 static void gmap_zap_unused(struct mm_struct *mm, unsigned long vmaddr)
627 unsigned long ptev, pgstev;
632 ptep = get_locked_pte(mm, vmaddr, &ptl);
638 /* Zap unused and logically-zero pages */
639 pgste = pgste_get_lock(ptep);
640 pgstev = pgste_val(pgste);
642 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
643 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
644 gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
645 pte_clear(mm, vmaddr, ptep);
647 pgste_set_unlock(ptep, pgste);
649 pte_unmap_unlock(*ptep, ptl);
653 * this function is assumed to be called with mmap_sem held
655 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
657 unsigned long *table, *segment_ptr;
658 unsigned long segment, vmaddr, pgstev, ptev;
659 struct gmap_pgtable *mp;
662 segment_ptr = gmap_table_walk(gmap, gaddr);
663 if (IS_ERR(segment_ptr))
665 segment = *segment_ptr;
666 if (segment & _SEGMENT_ENTRY_INVALID)
668 page = pfn_to_page(segment >> PAGE_SHIFT);
669 mp = (struct gmap_pgtable *) page->index;
670 vmaddr = mp->vmaddr | (gaddr & ~PMD_MASK);
671 /* Page table is present */
672 table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
673 table = table + ((vmaddr >> 12) & 0xff);
674 pgstev = table[PTRS_PER_PTE];
676 /* quick check, checked again with locks held */
677 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
678 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
679 gmap_zap_unused(gmap->mm, vmaddr);
681 EXPORT_SYMBOL_GPL(__gmap_zap);
683 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
686 unsigned long *table, gaddr, size;
687 struct vm_area_struct *vma;
688 struct gmap_pgtable *mp;
691 down_read(&gmap->mm->mmap_sem);
694 /* Walk the gmap address space page table */
695 table = gmap->table + ((gaddr >> 53) & 0x7ff);
696 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
697 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
700 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
701 table = table + ((gaddr >> 42) & 0x7ff);
702 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
703 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
706 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
707 table = table + ((gaddr >> 31) & 0x7ff);
708 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
709 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
712 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
713 table = table + ((gaddr >> 20) & 0x7ff);
714 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
715 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
718 page = pfn_to_page(*table >> PAGE_SHIFT);
719 mp = (struct gmap_pgtable *) page->index;
720 vma = find_vma(gmap->mm, mp->vmaddr);
721 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
722 zap_page_range(vma, mp->vmaddr | (gaddr & ~PMD_MASK),
724 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
726 up_read(&gmap->mm->mmap_sem);
728 EXPORT_SYMBOL_GPL(gmap_discard);
730 static LIST_HEAD(gmap_notifier_list);
731 static DEFINE_SPINLOCK(gmap_notifier_lock);
734 * gmap_register_ipte_notifier - register a pte invalidation callback
735 * @nb: pointer to the gmap notifier block
737 void gmap_register_ipte_notifier(struct gmap_notifier *nb)
739 spin_lock(&gmap_notifier_lock);
740 list_add(&nb->list, &gmap_notifier_list);
741 spin_unlock(&gmap_notifier_lock);
743 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
746 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
747 * @nb: pointer to the gmap notifier block
749 void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
751 spin_lock(&gmap_notifier_lock);
752 list_del_init(&nb->list);
753 spin_unlock(&gmap_notifier_lock);
755 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
758 * gmap_ipte_notify - mark a range of ptes for invalidation notification
759 * @gmap: pointer to guest mapping meta data structure
760 * @gaddr: virtual address in the guest address space
763 * Returns 0 if for each page in the given range a gmap mapping exists and
764 * the invalidation notification could be set. If the gmap mapping is missing
765 * for one or more pages -EFAULT is returned. If no memory could be allocated
766 * -ENOMEM is returned. This function establishes missing page table entries.
768 int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
776 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
778 down_read(&gmap->mm->mmap_sem);
780 /* Convert gmap address and connect the page tables */
781 addr = __gmap_fault(gmap, gaddr);
782 if (IS_ERR_VALUE(addr)) {
786 /* Get the page mapped */
787 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
791 /* Walk the process page table, lock and get pte pointer */
792 ptep = get_locked_pte(gmap->mm, addr, &ptl);
795 /* Set notification bit in the pgste of the pte */
797 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
798 pgste = pgste_get_lock(ptep);
799 pgste_val(pgste) |= PGSTE_IN_BIT;
800 pgste_set_unlock(ptep, pgste);
806 up_read(&gmap->mm->mmap_sem);
809 EXPORT_SYMBOL_GPL(gmap_ipte_notify);
812 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
813 * @mm: pointer to the process mm_struct
814 * @addr: virtual address in the process address space
815 * @pte: pointer to the page table entry
817 * This function is assumed to be called with the page table lock held
818 * for the pte to notify.
820 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
822 unsigned long segment_offset;
823 struct gmap_notifier *nb;
824 struct gmap_pgtable *mp;
825 struct gmap_rmap *rmap;
828 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
829 segment_offset = segment_offset * (4096 / sizeof(pte_t));
830 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
831 mp = (struct gmap_pgtable *) page->index;
832 spin_lock(&gmap_notifier_lock);
833 list_for_each_entry(rmap, &mp->mapper, list) {
834 list_for_each_entry(nb, &gmap_notifier_list, list)
835 nb->notifier_call(rmap->gmap,
836 rmap->vmaddr + segment_offset);
838 spin_unlock(&gmap_notifier_lock);
840 EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
842 static inline int page_table_with_pgste(struct page *page)
844 return atomic_read(&page->_mapcount) == 0;
847 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
848 unsigned long vmaddr)
851 unsigned long *table;
852 struct gmap_pgtable *mp;
854 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
857 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
862 if (!pgtable_page_ctor(page)) {
867 mp->vmaddr = vmaddr & PMD_MASK;
868 INIT_LIST_HEAD(&mp->mapper);
869 page->index = (unsigned long) mp;
870 atomic_set(&page->_mapcount, 0);
871 table = (unsigned long *) page_to_phys(page);
872 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
873 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
877 static inline void page_table_free_pgste(unsigned long *table)
880 struct gmap_pgtable *mp;
882 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
883 mp = (struct gmap_pgtable *) page->index;
884 BUG_ON(!list_empty(&mp->mapper));
885 pgtable_page_dtor(page);
886 atomic_set(&page->_mapcount, -1);
891 static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
892 unsigned long addr, unsigned long end, bool init_skey)
894 pte_t *start_pte, *pte;
898 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
901 pgste = pgste_get_lock(pte);
902 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
904 unsigned long address;
906 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
907 PGSTE_GR_BIT | PGSTE_GC_BIT);
909 /* skip invalid and not writable pages */
910 if (pte_val(*pte) & _PAGE_INVALID ||
911 !(pte_val(*pte) & _PAGE_WRITE)) {
912 pgste_set_unlock(pte, pgste);
916 address = pte_val(*pte) & PAGE_MASK;
917 page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
919 pgste_set_unlock(pte, pgste);
920 } while (pte++, addr += PAGE_SIZE, addr != end);
921 pte_unmap_unlock(start_pte, ptl);
926 static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
927 unsigned long addr, unsigned long end, bool init_skey)
932 pmd = pmd_offset(pud, addr);
934 next = pmd_addr_end(addr, end);
935 if (pmd_none_or_clear_bad(pmd))
937 next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
938 } while (pmd++, addr = next, addr != end);
943 static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
944 unsigned long addr, unsigned long end, bool init_skey)
949 pud = pud_offset(pgd, addr);
951 next = pud_addr_end(addr, end);
952 if (pud_none_or_clear_bad(pud))
954 next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
955 } while (pud++, addr = next, addr != end);
960 void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
961 unsigned long end, bool init_skey)
963 unsigned long addr, next;
966 down_write(&mm->mmap_sem);
967 if (init_skey && mm_use_skey(mm))
970 pgd = pgd_offset(mm, addr);
972 next = pgd_addr_end(addr, end);
973 if (pgd_none_or_clear_bad(pgd))
975 next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
976 } while (pgd++, addr = next, addr != end);
978 current->mm->context.use_skey = 1;
980 up_write(&mm->mmap_sem);
982 EXPORT_SYMBOL(page_table_reset_pgste);
984 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
985 unsigned long key, bool nq)
991 down_read(&mm->mmap_sem);
993 ptep = get_locked_pte(current->mm, addr, &ptl);
994 if (unlikely(!ptep)) {
995 up_read(&mm->mmap_sem);
998 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
999 (pte_val(*ptep) & _PAGE_PROTECT)) {
1000 pte_unmap_unlock(*ptep, ptl);
1001 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
1002 up_read(&mm->mmap_sem);
1008 new = old = pgste_get_lock(ptep);
1009 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
1010 PGSTE_ACC_BITS | PGSTE_FP_BIT);
1011 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
1012 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
1013 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1014 unsigned long address, bits, skey;
1016 address = pte_val(*ptep) & PAGE_MASK;
1017 skey = (unsigned long) page_get_storage_key(address);
1018 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
1019 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
1020 /* Set storage key ACC and FP */
1021 page_set_storage_key(address, skey, !nq);
1022 /* Merge host changed & referenced into pgste */
1023 pgste_val(new) |= bits << 52;
1025 /* changing the guest storage key is considered a change of the page */
1026 if ((pgste_val(new) ^ pgste_val(old)) &
1027 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
1028 pgste_val(new) |= PGSTE_UC_BIT;
1030 pgste_set_unlock(ptep, new);
1031 pte_unmap_unlock(*ptep, ptl);
1032 up_read(&mm->mmap_sem);
1035 EXPORT_SYMBOL(set_guest_storage_key);
1037 #else /* CONFIG_PGSTE */
1039 static inline int page_table_with_pgste(struct page *page)
1044 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
1045 unsigned long vmaddr)
1050 void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
1051 unsigned long end, bool init_skey)
1055 static inline void page_table_free_pgste(unsigned long *table)
1059 static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
1060 unsigned long *table)
1064 #endif /* CONFIG_PGSTE */
1066 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
1068 unsigned int old, new;
1071 old = atomic_read(v);
1073 } while (atomic_cmpxchg(v, old, new) != old);
1078 * page table entry allocation/free routines.
1080 unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
1082 unsigned long *uninitialized_var(table);
1083 struct page *uninitialized_var(page);
1084 unsigned int mask, bit;
1086 if (mm_has_pgste(mm))
1087 return page_table_alloc_pgste(mm, vmaddr);
1088 /* Allocate fragments of a 4K page as 1K/2K page table */
1089 spin_lock_bh(&mm->context.list_lock);
1091 if (!list_empty(&mm->context.pgtable_list)) {
1092 page = list_first_entry(&mm->context.pgtable_list,
1094 table = (unsigned long *) page_to_phys(page);
1095 mask = atomic_read(&page->_mapcount);
1096 mask = mask | (mask >> 4);
1098 if ((mask & FRAG_MASK) == FRAG_MASK) {
1099 spin_unlock_bh(&mm->context.list_lock);
1100 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
1103 if (!pgtable_page_ctor(page)) {
1107 atomic_set(&page->_mapcount, 1);
1108 table = (unsigned long *) page_to_phys(page);
1109 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
1110 spin_lock_bh(&mm->context.list_lock);
1111 list_add(&page->lru, &mm->context.pgtable_list);
1113 for (bit = 1; mask & bit; bit <<= 1)
1114 table += PTRS_PER_PTE;
1115 mask = atomic_xor_bits(&page->_mapcount, bit);
1116 if ((mask & FRAG_MASK) == FRAG_MASK)
1117 list_del(&page->lru);
1119 spin_unlock_bh(&mm->context.list_lock);
1123 void page_table_free(struct mm_struct *mm, unsigned long *table)
1126 unsigned int bit, mask;
1128 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1129 if (page_table_with_pgste(page)) {
1130 gmap_disconnect_pgtable(mm, table);
1131 return page_table_free_pgste(table);
1133 /* Free 1K/2K page table fragment of a 4K page */
1134 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
1135 spin_lock_bh(&mm->context.list_lock);
1136 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1137 list_del(&page->lru);
1138 mask = atomic_xor_bits(&page->_mapcount, bit);
1139 if (mask & FRAG_MASK)
1140 list_add(&page->lru, &mm->context.pgtable_list);
1141 spin_unlock_bh(&mm->context.list_lock);
1143 pgtable_page_dtor(page);
1144 atomic_set(&page->_mapcount, -1);
1149 static void __page_table_free_rcu(void *table, unsigned bit)
1153 if (bit == FRAG_MASK)
1154 return page_table_free_pgste(table);
1155 /* Free 1K/2K page table fragment of a 4K page */
1156 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1157 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
1158 pgtable_page_dtor(page);
1159 atomic_set(&page->_mapcount, -1);
1164 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
1166 struct mm_struct *mm;
1168 unsigned int bit, mask;
1171 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1172 if (page_table_with_pgste(page)) {
1173 gmap_disconnect_pgtable(mm, table);
1174 table = (unsigned long *) (__pa(table) | FRAG_MASK);
1175 tlb_remove_table(tlb, table);
1178 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
1179 spin_lock_bh(&mm->context.list_lock);
1180 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1181 list_del(&page->lru);
1182 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
1183 if (mask & FRAG_MASK)
1184 list_add_tail(&page->lru, &mm->context.pgtable_list);
1185 spin_unlock_bh(&mm->context.list_lock);
1186 table = (unsigned long *) (__pa(table) | (bit << 4));
1187 tlb_remove_table(tlb, table);
1190 static void __tlb_remove_table(void *_table)
1192 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
1193 void *table = (void *)((unsigned long) _table & ~mask);
1194 unsigned type = (unsigned long) _table & mask;
1197 __page_table_free_rcu(table, type);
1199 free_pages((unsigned long) table, ALLOC_ORDER);
1202 static void tlb_remove_table_smp_sync(void *arg)
1204 /* Simply deliver the interrupt */
1207 static void tlb_remove_table_one(void *table)
1210 * This isn't an RCU grace period and hence the page-tables cannot be
1211 * assumed to be actually RCU-freed.
1213 * It is however sufficient for software page-table walkers that rely
1214 * on IRQ disabling. See the comment near struct mmu_table_batch.
1216 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1217 __tlb_remove_table(table);
1220 static void tlb_remove_table_rcu(struct rcu_head *head)
1222 struct mmu_table_batch *batch;
1225 batch = container_of(head, struct mmu_table_batch, rcu);
1227 for (i = 0; i < batch->nr; i++)
1228 __tlb_remove_table(batch->tables[i]);
1230 free_page((unsigned long)batch);
1233 void tlb_table_flush(struct mmu_gather *tlb)
1235 struct mmu_table_batch **batch = &tlb->batch;
1238 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1243 void tlb_remove_table(struct mmu_gather *tlb, void *table)
1245 struct mmu_table_batch **batch = &tlb->batch;
1247 tlb->mm->context.flush_mm = 1;
1248 if (*batch == NULL) {
1249 *batch = (struct mmu_table_batch *)
1250 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1251 if (*batch == NULL) {
1252 __tlb_flush_mm_lazy(tlb->mm);
1253 tlb_remove_table_one(table);
1258 (*batch)->tables[(*batch)->nr++] = table;
1259 if ((*batch)->nr == MAX_TABLE_BATCH)
1263 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1264 static inline void thp_split_vma(struct vm_area_struct *vma)
1268 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1269 follow_page(vma, addr, FOLL_SPLIT);
1272 static inline void thp_split_mm(struct mm_struct *mm)
1274 struct vm_area_struct *vma;
1276 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1278 vma->vm_flags &= ~VM_HUGEPAGE;
1279 vma->vm_flags |= VM_NOHUGEPAGE;
1281 mm->def_flags |= VM_NOHUGEPAGE;
1284 static inline void thp_split_mm(struct mm_struct *mm)
1287 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1289 static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1290 struct mm_struct *mm, pud_t *pud,
1291 unsigned long addr, unsigned long end)
1293 unsigned long next, *table, *new;
1298 pmd = pmd_offset(pud, addr);
1300 next = pmd_addr_end(addr, end);
1302 if (pmd_none_or_clear_bad(pmd))
1304 table = (unsigned long *) pmd_deref(*pmd);
1305 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1306 if (page_table_with_pgste(page))
1308 /* Allocate new page table with pgstes */
1309 new = page_table_alloc_pgste(mm, addr);
1313 ptl = pmd_lock(mm, pmd);
1314 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1315 /* Nuke pmd entry pointing to the "short" page table */
1316 pmdp_flush_lazy(mm, addr, pmd);
1318 /* Copy ptes from old table to new table */
1319 memcpy(new, table, PAGE_SIZE/2);
1320 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1321 /* Establish new table */
1322 pmd_populate(mm, pmd, (pte_t *) new);
1323 /* Free old table with rcu, there might be a walker! */
1324 page_table_free_rcu(tlb, table);
1329 page_table_free_pgste(new);
1332 } while (pmd++, addr = next, addr != end);
1337 static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1338 struct mm_struct *mm, pgd_t *pgd,
1339 unsigned long addr, unsigned long end)
1344 pud = pud_offset(pgd, addr);
1346 next = pud_addr_end(addr, end);
1347 if (pud_none_or_clear_bad(pud))
1349 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1350 if (unlikely(IS_ERR_VALUE(next)))
1352 } while (pud++, addr = next, addr != end);
1357 static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1358 unsigned long addr, unsigned long end)
1363 pgd = pgd_offset(mm, addr);
1365 next = pgd_addr_end(addr, end);
1366 if (pgd_none_or_clear_bad(pgd))
1368 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1369 if (unlikely(IS_ERR_VALUE(next)))
1371 } while (pgd++, addr = next, addr != end);
1377 * switch on pgstes for its userspace process (for kvm)
1379 int s390_enable_sie(void)
1381 struct task_struct *tsk = current;
1382 struct mm_struct *mm = tsk->mm;
1383 struct mmu_gather tlb;
1385 /* Do we have pgstes? if yes, we are done */
1386 if (mm_has_pgste(tsk->mm))
1389 down_write(&mm->mmap_sem);
1390 /* split thp mappings and disable thp for future mappings */
1392 /* Reallocate the page tables with pgstes */
1393 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1394 if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1395 mm->context.has_pgste = 1;
1396 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1397 up_write(&mm->mmap_sem);
1398 return mm->context.has_pgste ? 0 : -ENOMEM;
1400 EXPORT_SYMBOL_GPL(s390_enable_sie);
1403 * Enable storage key handling from now on and initialize the storage
1404 * keys with the default key.
1406 void s390_enable_skey(void)
1408 page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
1410 EXPORT_SYMBOL_GPL(s390_enable_skey);
1413 * Test and reset if a guest page is dirty
1415 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1421 pte = get_locked_pte(gmap->mm, address, &ptl);
1425 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
1431 EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
1433 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1434 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1437 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1438 /* No need to flush TLB
1439 * On s390 reference bits are in storage key and never in TLB */
1440 return pmdp_test_and_clear_young(vma, address, pmdp);
1443 int pmdp_set_access_flags(struct vm_area_struct *vma,
1444 unsigned long address, pmd_t *pmdp,
1445 pmd_t entry, int dirty)
1447 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1449 entry = pmd_mkyoung(entry);
1451 entry = pmd_mkdirty(entry);
1452 if (pmd_same(*pmdp, entry))
1454 pmdp_invalidate(vma, address, pmdp);
1455 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1459 static void pmdp_splitting_flush_sync(void *arg)
1461 /* Simply deliver the interrupt */
1464 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1467 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1468 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1469 (unsigned long *) pmdp)) {
1470 /* need to serialize against gup-fast (IRQ disabled) */
1471 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1475 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1478 struct list_head *lh = (struct list_head *) pgtable;
1480 assert_spin_locked(pmd_lockptr(mm, pmdp));
1483 if (!pmd_huge_pte(mm, pmdp))
1486 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1487 pmd_huge_pte(mm, pmdp) = pgtable;
1490 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1492 struct list_head *lh;
1496 assert_spin_locked(pmd_lockptr(mm, pmdp));
1499 pgtable = pmd_huge_pte(mm, pmdp);
1500 lh = (struct list_head *) pgtable;
1502 pmd_huge_pte(mm, pmdp) = NULL;
1504 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1507 ptep = (pte_t *) pgtable;
1508 pte_val(*ptep) = _PAGE_INVALID;
1510 pte_val(*ptep) = _PAGE_INVALID;
1513 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */