2 * Copyright IBM Corp. 2007,2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
20 #include <asm/system.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 struct rcu_table_freelist {
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
35 #define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
39 static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43 static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
45 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
46 struct rcu_table_freelist *batch = *batchp;
50 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
54 batch->crst_index = RCU_FREELIST_SIZE;
60 static void rcu_table_freelist_callback(struct rcu_head *head)
62 struct rcu_table_freelist *batch =
63 container_of(head, struct rcu_table_freelist, rcu);
65 while (batch->pgt_index > 0)
66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
67 while (batch->crst_index < RCU_FREELIST_SIZE)
68 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
69 free_page((unsigned long) batch);
72 void rcu_table_freelist_finish(void)
74 struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
78 call_rcu(&batch->rcu, rcu_table_freelist_callback);
79 __get_cpu_var(rcu_table_freelist) = NULL;
82 static void smp_sync(void *arg)
88 #define TABLES_PER_PAGE 4
89 #define FRAG_MASK 15UL
90 #define SECOND_HALVES 10UL
92 void clear_table_pgstes(unsigned long *table)
94 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
95 memset(table + 256, 0, PAGE_SIZE/4);
96 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
97 memset(table + 768, 0, PAGE_SIZE/4);
101 #define ALLOC_ORDER 2
102 #define TABLES_PER_PAGE 2
103 #define FRAG_MASK 3UL
104 #define SECOND_HALVES 2UL
106 void clear_table_pgstes(unsigned long *table)
108 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
109 memset(table + 256, 0, PAGE_SIZE/2);
114 unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
115 EXPORT_SYMBOL(VMALLOC_START);
117 static int __init parse_vmalloc(char *arg)
121 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
124 early_param("vmalloc", parse_vmalloc);
126 unsigned long *crst_table_alloc(struct mm_struct *mm)
128 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
132 return (unsigned long *) page_to_phys(page);
135 void crst_table_free(struct mm_struct *mm, unsigned long *table)
137 free_pages((unsigned long) table, ALLOC_ORDER);
140 void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
142 struct rcu_table_freelist *batch;
144 if (atomic_read(&mm->mm_users) < 2 &&
145 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
146 crst_table_free(mm, table);
149 batch = rcu_table_freelist_get(mm);
151 smp_call_function(smp_sync, NULL, 1);
152 crst_table_free(mm, table);
155 batch->table[--batch->crst_index] = table;
156 if (batch->pgt_index >= batch->crst_index)
157 rcu_table_freelist_finish();
161 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
163 unsigned long *table, *pgd;
166 BUG_ON(limit > (1UL << 53));
168 table = crst_table_alloc(mm);
171 spin_lock_bh(&mm->page_table_lock);
172 if (mm->context.asce_limit < limit) {
173 pgd = (unsigned long *) mm->pgd;
174 if (mm->context.asce_limit <= (1UL << 31)) {
175 entry = _REGION3_ENTRY_EMPTY;
176 mm->context.asce_limit = 1UL << 42;
177 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
181 entry = _REGION2_ENTRY_EMPTY;
182 mm->context.asce_limit = 1UL << 53;
183 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
187 crst_table_init(table, entry);
188 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
189 mm->pgd = (pgd_t *) table;
190 mm->task_size = mm->context.asce_limit;
193 spin_unlock_bh(&mm->page_table_lock);
195 crst_table_free(mm, table);
196 if (mm->context.asce_limit < limit)
198 update_mm(mm, current);
202 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
206 if (mm->context.asce_limit <= limit)
209 while (mm->context.asce_limit > limit) {
211 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
212 case _REGION_ENTRY_TYPE_R2:
213 mm->context.asce_limit = 1UL << 42;
214 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
218 case _REGION_ENTRY_TYPE_R3:
219 mm->context.asce_limit = 1UL << 31;
220 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
227 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
228 mm->task_size = mm->context.asce_limit;
229 crst_table_free(mm, (unsigned long *) pgd);
231 update_mm(mm, current);
236 * page table entry allocation/free routines.
238 unsigned long *page_table_alloc(struct mm_struct *mm)
241 unsigned long *table;
244 bits = (mm->context.has_pgste) ? 3UL : 1UL;
245 spin_lock_bh(&mm->context.list_lock);
247 if (!list_empty(&mm->context.pgtable_list)) {
248 page = list_first_entry(&mm->context.pgtable_list,
250 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
254 spin_unlock_bh(&mm->context.list_lock);
255 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
258 pgtable_page_ctor(page);
259 page->flags &= ~FRAG_MASK;
260 table = (unsigned long *) page_to_phys(page);
261 if (mm->context.has_pgste)
262 clear_table_pgstes(table);
264 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
265 spin_lock_bh(&mm->context.list_lock);
266 list_add(&page->lru, &mm->context.pgtable_list);
268 table = (unsigned long *) page_to_phys(page);
269 while (page->flags & bits) {
274 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
275 list_move_tail(&page->lru, &mm->context.pgtable_list);
276 spin_unlock_bh(&mm->context.list_lock);
280 static void __page_table_free(struct mm_struct *mm, unsigned long *table)
285 bits = ((unsigned long) table) & 15;
286 table = (unsigned long *)(((unsigned long) table) ^ bits);
287 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
289 if (!(page->flags & FRAG_MASK)) {
290 pgtable_page_dtor(page);
295 void page_table_free(struct mm_struct *mm, unsigned long *table)
300 bits = (mm->context.has_pgste) ? 3UL : 1UL;
301 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
302 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
303 spin_lock_bh(&mm->context.list_lock);
305 if (page->flags & FRAG_MASK) {
306 /* Page now has some free pgtable fragments. */
307 if (!list_empty(&page->lru))
308 list_move(&page->lru, &mm->context.pgtable_list);
311 /* All fragments of the 4K page have been freed. */
312 list_del(&page->lru);
313 spin_unlock_bh(&mm->context.list_lock);
315 pgtable_page_dtor(page);
320 void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
322 struct rcu_table_freelist *batch;
326 if (atomic_read(&mm->mm_users) < 2 &&
327 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
328 page_table_free(mm, table);
331 batch = rcu_table_freelist_get(mm);
333 smp_call_function(smp_sync, NULL, 1);
334 page_table_free(mm, table);
337 bits = (mm->context.has_pgste) ? 3UL : 1UL;
338 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
339 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
340 spin_lock_bh(&mm->context.list_lock);
341 /* Delayed freeing with rcu prevents reuse of pgtable fragments */
342 list_del_init(&page->lru);
343 spin_unlock_bh(&mm->context.list_lock);
344 table = (unsigned long *)(((unsigned long) table) | bits);
345 batch->table[batch->pgt_index++] = table;
346 if (batch->pgt_index >= batch->crst_index)
347 rcu_table_freelist_finish();
351 * switch on pgstes for its userspace process (for kvm)
353 int s390_enable_sie(void)
355 struct task_struct *tsk = current;
356 struct mm_struct *mm, *old_mm;
358 /* Do we have switched amode? If no, we cannot do sie */
359 if (user_mode == HOME_SPACE_MODE)
362 /* Do we have pgstes? if yes, we are done */
363 if (tsk->mm->context.has_pgste)
366 /* lets check if we are allowed to replace the mm */
368 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
370 !hlist_empty(&tsk->mm->ioctx_list) ||
372 tsk->mm != tsk->active_mm) {
378 /* we copy the mm and let dup_mm create the page tables with_pgstes */
379 tsk->mm->context.alloc_pgste = 1;
381 tsk->mm->context.alloc_pgste = 0;
385 /* Now lets check again if something happened */
387 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
389 !hlist_empty(&tsk->mm->ioctx_list) ||
391 tsk->mm != tsk->active_mm) {
397 /* ok, we are alone. No ptrace, no threads, etc. */
399 tsk->mm = tsk->active_mm = mm;
402 atomic_inc(&mm->context.attach_count);
403 atomic_dec(&old_mm->context.attach_count);
404 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
410 EXPORT_SYMBOL_GPL(s390_enable_sie);
412 #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
413 bool kernel_page_present(struct page *page)
418 addr = page_to_phys(page);
423 : "=d" (cc), "+a" (addr) : : "cc");
426 #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */