2 * Copyright IBM Corp. 2007,2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
21 #include <asm/system.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/tlbflush.h>
26 #include <asm/mmu_context.h>
30 #define FRAG_MASK 0x0f
33 #define FRAG_MASK 0x03
36 unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
37 EXPORT_SYMBOL(VMALLOC_START);
39 static int __init parse_vmalloc(char *arg)
43 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
46 early_param("vmalloc", parse_vmalloc);
48 unsigned long *crst_table_alloc(struct mm_struct *mm)
50 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
54 return (unsigned long *) page_to_phys(page);
57 void crst_table_free(struct mm_struct *mm, unsigned long *table)
59 free_pages((unsigned long) table, ALLOC_ORDER);
63 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
65 unsigned long *table, *pgd;
68 BUG_ON(limit > (1UL << 53));
70 table = crst_table_alloc(mm);
73 spin_lock_bh(&mm->page_table_lock);
74 if (mm->context.asce_limit < limit) {
75 pgd = (unsigned long *) mm->pgd;
76 if (mm->context.asce_limit <= (1UL << 31)) {
77 entry = _REGION3_ENTRY_EMPTY;
78 mm->context.asce_limit = 1UL << 42;
79 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
83 entry = _REGION2_ENTRY_EMPTY;
84 mm->context.asce_limit = 1UL << 53;
85 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
89 crst_table_init(table, entry);
90 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 mm->pgd = (pgd_t *) table;
92 mm->task_size = mm->context.asce_limit;
95 spin_unlock_bh(&mm->page_table_lock);
97 crst_table_free(mm, table);
98 if (mm->context.asce_limit < limit)
100 update_mm(mm, current);
104 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
108 if (mm->context.asce_limit <= limit)
111 while (mm->context.asce_limit > limit) {
113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 case _REGION_ENTRY_TYPE_R2:
115 mm->context.asce_limit = 1UL << 42;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
120 case _REGION_ENTRY_TYPE_R3:
121 mm->context.asce_limit = 1UL << 31;
122 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
129 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
130 mm->task_size = mm->context.asce_limit;
131 crst_table_free(mm, (unsigned long *) pgd);
133 update_mm(mm, current);
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
143 * Returns a guest address space structure.
145 struct gmap *gmap_alloc(struct mm_struct *mm)
149 unsigned long *table;
151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
154 INIT_LIST_HEAD(&gmap->crst_list);
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
165 list_add(&gmap->list, &mm->context.gmap_list);
173 EXPORT_SYMBOL_GPL(gmap_alloc);
175 static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
177 struct gmap_pgtable *mp;
178 struct gmap_rmap *rmap;
181 if (*table & _SEGMENT_ENTRY_INV)
183 page = pfn_to_page(*table >> PAGE_SHIFT);
184 mp = (struct gmap_pgtable *) page->index;
185 list_for_each_entry(rmap, &mp->mapper, list) {
186 if (rmap->entry != table)
188 list_del(&rmap->list);
192 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
196 static void gmap_flush_tlb(struct gmap *gmap)
198 if (MACHINE_HAS_IDTE)
199 __tlb_flush_idte((unsigned long) gmap->table |
202 __tlb_flush_global();
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
209 void gmap_free(struct gmap *gmap)
211 struct page *page, *next;
212 unsigned long *table;
217 if (MACHINE_HAS_IDTE)
218 __tlb_flush_idte((unsigned long) gmap->table |
221 __tlb_flush_global();
223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem);
225 spin_lock(&gmap->mm->page_table_lock);
226 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
227 table = (unsigned long *) page_to_phys(page);
228 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
229 /* Remove gmap rmap structures for segment table. */
230 for (i = 0; i < PTRS_PER_PMD; i++, table++)
231 gmap_unlink_segment(gmap, table);
232 __free_pages(page, ALLOC_ORDER);
234 spin_unlock(&gmap->mm->page_table_lock);
235 up_read(&gmap->mm->mmap_sem);
236 list_del(&gmap->list);
239 EXPORT_SYMBOL_GPL(gmap_free);
242 * gmap_enable - switch primary space to the guest address space
243 * @gmap: pointer to the guest address space structure
245 void gmap_enable(struct gmap *gmap)
247 S390_lowcore.gmap = (unsigned long) gmap;
249 EXPORT_SYMBOL_GPL(gmap_enable);
252 * gmap_disable - switch back to the standard primary address space
253 * @gmap: pointer to the guest address space structure
255 void gmap_disable(struct gmap *gmap)
257 S390_lowcore.gmap = 0UL;
259 EXPORT_SYMBOL_GPL(gmap_disable);
262 * gmap_alloc_table is assumed to be called with mmap_sem held
264 static int gmap_alloc_table(struct gmap *gmap,
265 unsigned long *table, unsigned long init)
270 /* since we dont free the gmap table until gmap_free we can unlock */
271 spin_unlock(&gmap->mm->page_table_lock);
272 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
273 spin_lock(&gmap->mm->page_table_lock);
276 new = (unsigned long *) page_to_phys(page);
277 crst_table_init(new, init);
278 if (*table & _REGION_ENTRY_INV) {
279 list_add(&page->lru, &gmap->crst_list);
280 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
281 (*table & _REGION_ENTRY_TYPE_MASK);
283 __free_pages(page, ALLOC_ORDER);
288 * gmap_unmap_segment - unmap segment from the guest address space
289 * @gmap: pointer to the guest address space structure
290 * @addr: address in the guest address space
291 * @len: length of the memory area to unmap
293 * Returns 0 if the unmap succeded, -EINVAL if not.
295 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
297 unsigned long *table;
301 if ((to | len) & (PMD_SIZE - 1))
303 if (len == 0 || to + len < to)
307 down_read(&gmap->mm->mmap_sem);
308 spin_lock(&gmap->mm->page_table_lock);
309 for (off = 0; off < len; off += PMD_SIZE) {
310 /* Walk the guest addr space page table */
311 table = gmap->table + (((to + off) >> 53) & 0x7ff);
312 if (*table & _REGION_ENTRY_INV)
314 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
315 table = table + (((to + off) >> 42) & 0x7ff);
316 if (*table & _REGION_ENTRY_INV)
318 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
319 table = table + (((to + off) >> 31) & 0x7ff);
320 if (*table & _REGION_ENTRY_INV)
322 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
323 table = table + (((to + off) >> 20) & 0x7ff);
325 /* Clear segment table entry in guest address space. */
326 flush |= gmap_unlink_segment(gmap, table);
327 *table = _SEGMENT_ENTRY_INV;
330 spin_unlock(&gmap->mm->page_table_lock);
331 up_read(&gmap->mm->mmap_sem);
333 gmap_flush_tlb(gmap);
336 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
339 * gmap_mmap_segment - map a segment to the guest address space
340 * @gmap: pointer to the guest address space structure
341 * @from: source address in the parent address space
342 * @to: target address in the guest address space
344 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
346 int gmap_map_segment(struct gmap *gmap, unsigned long from,
347 unsigned long to, unsigned long len)
349 unsigned long *table;
353 if ((from | to | len) & (PMD_SIZE - 1))
355 if (len == 0 || from + len > PGDIR_SIZE ||
356 from + len < from || to + len < to)
360 down_read(&gmap->mm->mmap_sem);
361 spin_lock(&gmap->mm->page_table_lock);
362 for (off = 0; off < len; off += PMD_SIZE) {
363 /* Walk the gmap address space page table */
364 table = gmap->table + (((to + off) >> 53) & 0x7ff);
365 if ((*table & _REGION_ENTRY_INV) &&
366 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
368 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
369 table = table + (((to + off) >> 42) & 0x7ff);
370 if ((*table & _REGION_ENTRY_INV) &&
371 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
373 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
374 table = table + (((to + off) >> 31) & 0x7ff);
375 if ((*table & _REGION_ENTRY_INV) &&
376 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
378 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
379 table = table + (((to + off) >> 20) & 0x7ff);
381 /* Store 'from' address in an invalid segment table entry. */
382 flush |= gmap_unlink_segment(gmap, table);
383 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
385 spin_unlock(&gmap->mm->page_table_lock);
386 up_read(&gmap->mm->mmap_sem);
388 gmap_flush_tlb(gmap);
392 spin_unlock(&gmap->mm->page_table_lock);
393 up_read(&gmap->mm->mmap_sem);
394 gmap_unmap_segment(gmap, to, len);
397 EXPORT_SYMBOL_GPL(gmap_map_segment);
400 * this function is assumed to be called with mmap_sem held
402 unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
404 unsigned long *table, vmaddr, segment;
405 struct mm_struct *mm;
406 struct gmap_pgtable *mp;
407 struct gmap_rmap *rmap;
408 struct vm_area_struct *vma;
414 current->thread.gmap_addr = address;
416 /* Walk the gmap address space page table */
417 table = gmap->table + ((address >> 53) & 0x7ff);
418 if (unlikely(*table & _REGION_ENTRY_INV))
420 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
421 table = table + ((address >> 42) & 0x7ff);
422 if (unlikely(*table & _REGION_ENTRY_INV))
424 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
425 table = table + ((address >> 31) & 0x7ff);
426 if (unlikely(*table & _REGION_ENTRY_INV))
428 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
429 table = table + ((address >> 20) & 0x7ff);
431 /* Convert the gmap address to an mm address. */
433 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
434 page = pfn_to_page(segment >> PAGE_SHIFT);
435 mp = (struct gmap_pgtable *) page->index;
436 return mp->vmaddr | (address & ~PMD_MASK);
437 } else if (segment & _SEGMENT_ENTRY_RO) {
438 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
439 vma = find_vma(mm, vmaddr);
440 if (!vma || vma->vm_start > vmaddr)
443 /* Walk the parent mm page table */
444 pgd = pgd_offset(mm, vmaddr);
445 pud = pud_alloc(mm, pgd, vmaddr);
448 pmd = pmd_alloc(mm, pud, vmaddr);
451 if (!pmd_present(*pmd) &&
452 __pte_alloc(mm, vma, pmd, vmaddr))
454 /* pmd now points to a valid segment table entry. */
455 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
458 /* Link gmap segment table entry location to page table. */
459 page = pmd_page(*pmd);
460 mp = (struct gmap_pgtable *) page->index;
462 spin_lock(&mm->page_table_lock);
463 list_add(&rmap->list, &mp->mapper);
464 spin_unlock(&mm->page_table_lock);
465 /* Set gmap segment table entry to page table. */
466 *table = pmd_val(*pmd) & PAGE_MASK;
467 return vmaddr | (address & ~PMD_MASK);
472 unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
476 down_read(&gmap->mm->mmap_sem);
477 rc = __gmap_fault(address, gmap);
478 up_read(&gmap->mm->mmap_sem);
482 EXPORT_SYMBOL_GPL(gmap_fault);
484 void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
487 unsigned long *table, address, size;
488 struct vm_area_struct *vma;
489 struct gmap_pgtable *mp;
492 down_read(&gmap->mm->mmap_sem);
494 while (address < to) {
495 /* Walk the gmap address space page table */
496 table = gmap->table + ((address >> 53) & 0x7ff);
497 if (unlikely(*table & _REGION_ENTRY_INV)) {
498 address = (address + PMD_SIZE) & PMD_MASK;
501 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
502 table = table + ((address >> 42) & 0x7ff);
503 if (unlikely(*table & _REGION_ENTRY_INV)) {
504 address = (address + PMD_SIZE) & PMD_MASK;
507 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
508 table = table + ((address >> 31) & 0x7ff);
509 if (unlikely(*table & _REGION_ENTRY_INV)) {
510 address = (address + PMD_SIZE) & PMD_MASK;
513 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
514 table = table + ((address >> 20) & 0x7ff);
515 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
516 address = (address + PMD_SIZE) & PMD_MASK;
519 page = pfn_to_page(*table >> PAGE_SHIFT);
520 mp = (struct gmap_pgtable *) page->index;
521 vma = find_vma(gmap->mm, mp->vmaddr);
522 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
523 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
525 address = (address + PMD_SIZE) & PMD_MASK;
527 up_read(&gmap->mm->mmap_sem);
529 EXPORT_SYMBOL_GPL(gmap_discard);
531 void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
533 struct gmap_rmap *rmap, *next;
534 struct gmap_pgtable *mp;
539 spin_lock(&mm->page_table_lock);
540 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
541 mp = (struct gmap_pgtable *) page->index;
542 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
544 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
545 list_del(&rmap->list);
549 spin_unlock(&mm->page_table_lock);
551 __tlb_flush_global();
554 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
555 unsigned long vmaddr)
558 unsigned long *table;
559 struct gmap_pgtable *mp;
561 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
564 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
569 pgtable_page_ctor(page);
570 mp->vmaddr = vmaddr & PMD_MASK;
571 INIT_LIST_HEAD(&mp->mapper);
572 page->index = (unsigned long) mp;
573 atomic_set(&page->_mapcount, 3);
574 table = (unsigned long *) page_to_phys(page);
575 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
576 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
580 static inline void page_table_free_pgste(unsigned long *table)
583 struct gmap_pgtable *mp;
585 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
586 mp = (struct gmap_pgtable *) page->index;
587 BUG_ON(!list_empty(&mp->mapper));
588 pgtable_page_ctor(page);
589 atomic_set(&page->_mapcount, -1);
594 #else /* CONFIG_PGSTE */
596 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
597 unsigned long vmaddr)
602 static inline void page_table_free_pgste(unsigned long *table)
606 static inline void gmap_unmap_notifier(struct mm_struct *mm,
607 unsigned long *table)
611 #endif /* CONFIG_PGSTE */
613 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
615 unsigned int old, new;
618 old = atomic_read(v);
620 } while (atomic_cmpxchg(v, old, new) != old);
625 * page table entry allocation/free routines.
627 unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
630 unsigned long *table;
631 unsigned int mask, bit;
633 if (mm_has_pgste(mm))
634 return page_table_alloc_pgste(mm, vmaddr);
635 /* Allocate fragments of a 4K page as 1K/2K page table */
636 spin_lock_bh(&mm->context.list_lock);
638 if (!list_empty(&mm->context.pgtable_list)) {
639 page = list_first_entry(&mm->context.pgtable_list,
641 table = (unsigned long *) page_to_phys(page);
642 mask = atomic_read(&page->_mapcount);
643 mask = mask | (mask >> 4);
645 if ((mask & FRAG_MASK) == FRAG_MASK) {
646 spin_unlock_bh(&mm->context.list_lock);
647 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
650 pgtable_page_ctor(page);
651 atomic_set(&page->_mapcount, 1);
652 table = (unsigned long *) page_to_phys(page);
653 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
654 spin_lock_bh(&mm->context.list_lock);
655 list_add(&page->lru, &mm->context.pgtable_list);
657 for (bit = 1; mask & bit; bit <<= 1)
658 table += PTRS_PER_PTE;
659 mask = atomic_xor_bits(&page->_mapcount, bit);
660 if ((mask & FRAG_MASK) == FRAG_MASK)
661 list_del(&page->lru);
663 spin_unlock_bh(&mm->context.list_lock);
667 void page_table_free(struct mm_struct *mm, unsigned long *table)
670 unsigned int bit, mask;
672 if (mm_has_pgste(mm)) {
673 gmap_unmap_notifier(mm, table);
674 return page_table_free_pgste(table);
676 /* Free 1K/2K page table fragment of a 4K page */
677 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
678 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
679 spin_lock_bh(&mm->context.list_lock);
680 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
681 list_del(&page->lru);
682 mask = atomic_xor_bits(&page->_mapcount, bit);
683 if (mask & FRAG_MASK)
684 list_add(&page->lru, &mm->context.pgtable_list);
685 spin_unlock_bh(&mm->context.list_lock);
687 pgtable_page_dtor(page);
688 atomic_set(&page->_mapcount, -1);
693 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
695 static void __page_table_free_rcu(void *table, unsigned bit)
699 if (bit == FRAG_MASK)
700 return page_table_free_pgste(table);
701 /* Free 1K/2K page table fragment of a 4K page */
702 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
703 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
704 pgtable_page_dtor(page);
705 atomic_set(&page->_mapcount, -1);
710 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
712 struct mm_struct *mm;
714 unsigned int bit, mask;
717 if (mm_has_pgste(mm)) {
718 gmap_unmap_notifier(mm, table);
719 table = (unsigned long *) (__pa(table) | FRAG_MASK);
720 tlb_remove_table(tlb, table);
723 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
724 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
725 spin_lock_bh(&mm->context.list_lock);
726 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
727 list_del(&page->lru);
728 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
729 if (mask & FRAG_MASK)
730 list_add_tail(&page->lru, &mm->context.pgtable_list);
731 spin_unlock_bh(&mm->context.list_lock);
732 table = (unsigned long *) (__pa(table) | (bit << 4));
733 tlb_remove_table(tlb, table);
736 void __tlb_remove_table(void *_table)
738 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
739 void *table = (void *)((unsigned long) _table & ~mask);
740 unsigned type = (unsigned long) _table & mask;
743 __page_table_free_rcu(table, type);
745 free_pages((unsigned long) table, ALLOC_ORDER);
751 * switch on pgstes for its userspace process (for kvm)
753 int s390_enable_sie(void)
755 struct task_struct *tsk = current;
756 struct mm_struct *mm, *old_mm;
758 /* Do we have switched amode? If no, we cannot do sie */
759 if (user_mode == HOME_SPACE_MODE)
762 /* Do we have pgstes? if yes, we are done */
763 if (mm_has_pgste(tsk->mm))
766 /* lets check if we are allowed to replace the mm */
768 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
770 !hlist_empty(&tsk->mm->ioctx_list) ||
772 tsk->mm != tsk->active_mm) {
778 /* we copy the mm and let dup_mm create the page tables with_pgstes */
779 tsk->mm->context.alloc_pgste = 1;
781 tsk->mm->context.alloc_pgste = 0;
785 /* Now lets check again if something happened */
787 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
789 !hlist_empty(&tsk->mm->ioctx_list) ||
791 tsk->mm != tsk->active_mm) {
797 /* ok, we are alone. No ptrace, no threads, etc. */
799 tsk->mm = tsk->active_mm = mm;
802 atomic_inc(&mm->context.attach_count);
803 atomic_dec(&old_mm->context.attach_count);
804 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
810 EXPORT_SYMBOL_GPL(s390_enable_sie);
812 #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
813 bool kernel_page_present(struct page *page)
818 addr = page_to_phys(page);
823 : "=d" (cc), "+a" (addr) : : "cc");
826 #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */