11cadb225f0a2fdf33fed8a9f23f5a08ce0fe570
[pandora-kernel.git] / arch / um / kernel / tlb.c
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #include "as-layout.h"
11 #include "mem_user.h"
12 #include "os.h"
13 #include "skas.h"
14
15 struct host_vm_change {
16         struct host_vm_op {
17                 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
18                 union {
19                         struct {
20                                 unsigned long addr;
21                                 unsigned long len;
22                                 unsigned int prot;
23                                 int fd;
24                                 __u64 offset;
25                         } mmap;
26                         struct {
27                                 unsigned long addr;
28                                 unsigned long len;
29                         } munmap;
30                         struct {
31                                 unsigned long addr;
32                                 unsigned long len;
33                                 unsigned int prot;
34                         } mprotect;
35                 } u;
36         } ops[1];
37         int index;
38         struct mm_id *id;
39         void *data;
40         int force;
41 };
42
43 #define INIT_HVC(mm, force) \
44         ((struct host_vm_change) \
45          { .ops         = { { .type = NONE } }, \
46            .id          = &mm->context.id, \
47            .data        = NULL, \
48            .index       = 0, \
49            .force       = force })
50
51 static int do_ops(struct host_vm_change *hvc, int end,
52                   int finished)
53 {
54         struct host_vm_op *op;
55         int i, ret = 0;
56
57         for (i = 0; i < end && !ret; i++) {
58                 op = &hvc->ops[i];
59                 switch (op->type) {
60                 case MMAP:
61                         ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
62                                   op->u.mmap.prot, op->u.mmap.fd,
63                                   op->u.mmap.offset, finished, &hvc->data);
64                         break;
65                 case MUNMAP:
66                         ret = unmap(hvc->id, op->u.munmap.addr,
67                                     op->u.munmap.len, finished, &hvc->data);
68                         break;
69                 case MPROTECT:
70                         ret = protect(hvc->id, op->u.mprotect.addr,
71                                       op->u.mprotect.len, op->u.mprotect.prot,
72                                       finished, &hvc->data);
73                         break;
74                 default:
75                         printk(KERN_ERR "Unknown op type %d in do_ops\n",
76                                op->type);
77                         break;
78                 }
79         }
80
81         return ret;
82 }
83
84 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
85                     unsigned int prot, struct host_vm_change *hvc)
86 {
87         __u64 offset;
88         struct host_vm_op *last;
89         int fd, ret = 0;
90
91         fd = phys_mapping(phys, &offset);
92         if (hvc->index != 0) {
93                 last = &hvc->ops[hvc->index - 1];
94                 if ((last->type == MMAP) &&
95                    (last->u.mmap.addr + last->u.mmap.len == virt) &&
96                    (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
97                    (last->u.mmap.offset + last->u.mmap.len == offset)) {
98                         last->u.mmap.len += len;
99                         return 0;
100                 }
101         }
102
103         if (hvc->index == ARRAY_SIZE(hvc->ops)) {
104                 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
105                 hvc->index = 0;
106         }
107
108         hvc->ops[hvc->index++] = ((struct host_vm_op)
109                                   { .type       = MMAP,
110                                     .u = { .mmap = { .addr      = virt,
111                                                      .len       = len,
112                                                      .prot      = prot,
113                                                      .fd        = fd,
114                                                      .offset    = offset }
115                            } });
116         return ret;
117 }
118
119 static int add_munmap(unsigned long addr, unsigned long len,
120                       struct host_vm_change *hvc)
121 {
122         struct host_vm_op *last;
123         int ret = 0;
124
125         if (hvc->index != 0) {
126                 last = &hvc->ops[hvc->index - 1];
127                 if ((last->type == MUNMAP) &&
128                    (last->u.munmap.addr + last->u.mmap.len == addr)) {
129                         last->u.munmap.len += len;
130                         return 0;
131                 }
132         }
133
134         if (hvc->index == ARRAY_SIZE(hvc->ops)) {
135                 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
136                 hvc->index = 0;
137         }
138
139         hvc->ops[hvc->index++] = ((struct host_vm_op)
140                                   { .type       = MUNMAP,
141                                     .u = { .munmap = { .addr    = addr,
142                                                        .len     = len } } });
143         return ret;
144 }
145
146 static int add_mprotect(unsigned long addr, unsigned long len,
147                         unsigned int prot, struct host_vm_change *hvc)
148 {
149         struct host_vm_op *last;
150         int ret = 0;
151
152         if (hvc->index != 0) {
153                 last = &hvc->ops[hvc->index - 1];
154                 if ((last->type == MPROTECT) &&
155                    (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
156                    (last->u.mprotect.prot == prot)) {
157                         last->u.mprotect.len += len;
158                         return 0;
159                 }
160         }
161
162         if (hvc->index == ARRAY_SIZE(hvc->ops)) {
163                 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
164                 hvc->index = 0;
165         }
166
167         hvc->ops[hvc->index++] = ((struct host_vm_op)
168                                   { .type       = MPROTECT,
169                                     .u = { .mprotect = { .addr  = addr,
170                                                          .len   = len,
171                                                          .prot  = prot } } });
172         return ret;
173 }
174
175 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
176
177 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
178                                    unsigned long end,
179                                    struct host_vm_change *hvc)
180 {
181         pte_t *pte;
182         int r, w, x, prot, ret = 0;
183
184         pte = pte_offset_kernel(pmd, addr);
185         do {
186                 if ((addr >= STUB_START) && (addr < STUB_END))
187                         continue;
188
189                 r = pte_read(*pte);
190                 w = pte_write(*pte);
191                 x = pte_exec(*pte);
192                 if (!pte_young(*pte)) {
193                         r = 0;
194                         w = 0;
195                 } else if (!pte_dirty(*pte))
196                         w = 0;
197
198                 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
199                         (x ? UM_PROT_EXEC : 0));
200                 if (hvc->force || pte_newpage(*pte)) {
201                         if (pte_present(*pte))
202                                 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
203                                                PAGE_SIZE, prot, hvc);
204                         else
205                                 ret = add_munmap(addr, PAGE_SIZE, hvc);
206                 } else if (pte_newprot(*pte))
207                         ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
208                 *pte = pte_mkuptodate(*pte);
209         } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
210         return ret;
211 }
212
213 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
214                                    unsigned long end,
215                                    struct host_vm_change *hvc)
216 {
217         pmd_t *pmd;
218         unsigned long next;
219         int ret = 0;
220
221         pmd = pmd_offset(pud, addr);
222         do {
223                 next = pmd_addr_end(addr, end);
224                 if (!pmd_present(*pmd)) {
225                         if (hvc->force || pmd_newpage(*pmd)) {
226                                 ret = add_munmap(addr, next - addr, hvc);
227                                 pmd_mkuptodate(*pmd);
228                         }
229                 }
230                 else ret = update_pte_range(pmd, addr, next, hvc);
231         } while (pmd++, addr = next, ((addr < end) && !ret));
232         return ret;
233 }
234
235 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
236                                    unsigned long end,
237                                    struct host_vm_change *hvc)
238 {
239         pud_t *pud;
240         unsigned long next;
241         int ret = 0;
242
243         pud = pud_offset(pgd, addr);
244         do {
245                 next = pud_addr_end(addr, end);
246                 if (!pud_present(*pud)) {
247                         if (hvc->force || pud_newpage(*pud)) {
248                                 ret = add_munmap(addr, next - addr, hvc);
249                                 pud_mkuptodate(*pud);
250                         }
251                 }
252                 else ret = update_pmd_range(pud, addr, next, hvc);
253         } while (pud++, addr = next, ((addr < end) && !ret));
254         return ret;
255 }
256
257 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
258                       unsigned long end_addr, int force)
259 {
260         pgd_t *pgd;
261         struct host_vm_change hvc;
262         unsigned long addr = start_addr, next;
263         int ret = 0;
264
265         hvc = INIT_HVC(mm, force);
266         pgd = pgd_offset(mm, addr);
267         do {
268                 next = pgd_addr_end(addr, end_addr);
269                 if (!pgd_present(*pgd)) {
270                         if (force || pgd_newpage(*pgd)) {
271                                 ret = add_munmap(addr, next - addr, &hvc);
272                                 pgd_mkuptodate(*pgd);
273                         }
274                 }
275                 else ret = update_pud_range(pgd, addr, next, &hvc);
276         } while (pgd++, addr = next, ((addr < end_addr) && !ret));
277
278         if (!ret)
279                 ret = do_ops(&hvc, hvc.index, 1);
280
281         /* This is not an else because ret is modified above */
282         if (ret) {
283                 printk(KERN_ERR "fix_range_common: failed, killing current "
284                        "process\n");
285                 force_sig(SIGKILL, current);
286         }
287 }
288
289 static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
290 {
291         struct mm_struct *mm;
292         pgd_t *pgd;
293         pud_t *pud;
294         pmd_t *pmd;
295         pte_t *pte;
296         unsigned long addr, last;
297         int updated = 0, err;
298
299         mm = &init_mm;
300         for (addr = start; addr < end;) {
301                 pgd = pgd_offset(mm, addr);
302                 if (!pgd_present(*pgd)) {
303                         last = ADD_ROUND(addr, PGDIR_SIZE);
304                         if (last > end)
305                                 last = end;
306                         if (pgd_newpage(*pgd)) {
307                                 updated = 1;
308                                 err = os_unmap_memory((void *) addr,
309                                                       last - addr);
310                                 if (err < 0)
311                                         panic("munmap failed, errno = %d\n",
312                                               -err);
313                         }
314                         addr = last;
315                         continue;
316                 }
317
318                 pud = pud_offset(pgd, addr);
319                 if (!pud_present(*pud)) {
320                         last = ADD_ROUND(addr, PUD_SIZE);
321                         if (last > end)
322                                 last = end;
323                         if (pud_newpage(*pud)) {
324                                 updated = 1;
325                                 err = os_unmap_memory((void *) addr,
326                                                       last - addr);
327                                 if (err < 0)
328                                         panic("munmap failed, errno = %d\n",
329                                               -err);
330                         }
331                         addr = last;
332                         continue;
333                 }
334
335                 pmd = pmd_offset(pud, addr);
336                 if (!pmd_present(*pmd)) {
337                         last = ADD_ROUND(addr, PMD_SIZE);
338                         if (last > end)
339                                 last = end;
340                         if (pmd_newpage(*pmd)) {
341                                 updated = 1;
342                                 err = os_unmap_memory((void *) addr,
343                                                       last - addr);
344                                 if (err < 0)
345                                         panic("munmap failed, errno = %d\n",
346                                               -err);
347                         }
348                         addr = last;
349                         continue;
350                 }
351
352                 pte = pte_offset_kernel(pmd, addr);
353                 if (!pte_present(*pte) || pte_newpage(*pte)) {
354                         updated = 1;
355                         err = os_unmap_memory((void *) addr,
356                                               PAGE_SIZE);
357                         if (err < 0)
358                                 panic("munmap failed, errno = %d\n",
359                                       -err);
360                         if (pte_present(*pte))
361                                 map_memory(addr,
362                                            pte_val(*pte) & PAGE_MASK,
363                                            PAGE_SIZE, 1, 1, 1);
364                 }
365                 else if (pte_newprot(*pte)) {
366                         updated = 1;
367                         os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
368                 }
369                 addr += PAGE_SIZE;
370         }
371         return updated;
372 }
373
374 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
375 {
376         pgd_t *pgd;
377         pud_t *pud;
378         pmd_t *pmd;
379         pte_t *pte;
380         struct mm_struct *mm = vma->vm_mm;
381         void *flush = NULL;
382         int r, w, x, prot, err = 0;
383         struct mm_id *mm_id;
384
385         address &= PAGE_MASK;
386         pgd = pgd_offset(mm, address);
387         if (!pgd_present(*pgd))
388                 goto kill;
389
390         pud = pud_offset(pgd, address);
391         if (!pud_present(*pud))
392                 goto kill;
393
394         pmd = pmd_offset(pud, address);
395         if (!pmd_present(*pmd))
396                 goto kill;
397
398         pte = pte_offset_kernel(pmd, address);
399
400         r = pte_read(*pte);
401         w = pte_write(*pte);
402         x = pte_exec(*pte);
403         if (!pte_young(*pte)) {
404                 r = 0;
405                 w = 0;
406         } else if (!pte_dirty(*pte)) {
407                 w = 0;
408         }
409
410         mm_id = &mm->context.id;
411         prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
412                 (x ? UM_PROT_EXEC : 0));
413         if (pte_newpage(*pte)) {
414                 if (pte_present(*pte)) {
415                         unsigned long long offset;
416                         int fd;
417
418                         fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
419                         err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
420                                   1, &flush);
421                 }
422                 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
423         }
424         else if (pte_newprot(*pte))
425                 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
426
427         if (err)
428                 goto kill;
429
430         *pte = pte_mkuptodate(*pte);
431
432         return;
433
434 kill:
435         printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
436         force_sig(SIGKILL, current);
437 }
438
439 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
440 {
441         return pgd_offset(mm, address);
442 }
443
444 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
445 {
446         return pud_offset(pgd, address);
447 }
448
449 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
450 {
451         return pmd_offset(pud, address);
452 }
453
454 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
455 {
456         return pte_offset_kernel(pmd, address);
457 }
458
459 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
460 {
461         pgd_t *pgd = pgd_offset(task->mm, addr);
462         pud_t *pud = pud_offset(pgd, addr);
463         pmd_t *pmd = pmd_offset(pud, addr);
464
465         return pte_offset_map(pmd, addr);
466 }
467
468 void flush_tlb_all(void)
469 {
470         flush_tlb_mm(current->mm);
471 }
472
473 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
474 {
475         flush_tlb_kernel_range_common(start, end);
476 }
477
478 void flush_tlb_kernel_vm(void)
479 {
480         flush_tlb_kernel_range_common(start_vm, end_vm);
481 }
482
483 void __flush_tlb_one(unsigned long addr)
484 {
485         flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
486 }
487
488 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
489                       unsigned long end_addr, int force)
490 {
491         fix_range_common(mm, start_addr, end_addr, force);
492 }
493
494 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
495                      unsigned long end)
496 {
497         if (vma->vm_mm == NULL)
498                 flush_tlb_kernel_range_common(start, end);
499         else fix_range(vma->vm_mm, start, end, 0);
500 }
501
502 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
503                         unsigned long end)
504 {
505         /*
506          * Don't bother flushing if this address space is about to be
507          * destroyed.
508          */
509         if (atomic_read(&mm->mm_users) == 0)
510                 return;
511
512         fix_range(mm, start, end, 0);
513 }
514
515 void flush_tlb_mm(struct mm_struct *mm)
516 {
517         struct vm_area_struct *vma = mm->mmap;
518
519         while (vma != NULL) {
520                 fix_range(mm, vma->vm_start, vma->vm_end, 0);
521                 vma = vma->vm_next;
522         }
523 }
524
525 void force_flush_all(void)
526 {
527         struct mm_struct *mm = current->mm;
528         struct vm_area_struct *vma = mm->mmap;
529
530         while (vma != NULL) {
531                 fix_range(mm, vma->vm_start, vma->vm_end, 1);
532                 vma = vma->vm_next;
533         }
534 }