1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
15 #include <linux/mman.h>
16 #include <linux/uaccess.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
26 #include <asm/xen/hypervisor.h>
27 #include <asm/xen/hypercall.h>
30 #include <xen/privcmd.h>
31 #include <xen/interface/xen.h>
32 #include <xen/features.h>
35 #ifndef HAVE_ARCH_PRIVCMD_MMAP
36 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
45 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
46 unsigned long addr, void *data)
48 struct remap_data *rmd = data;
49 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
51 xen_set_domain_pte(ptep, pte, rmd->domid);
56 int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr,
57 unsigned long mfn, unsigned long size,
58 pgprot_t prot, unsigned domid)
60 struct remap_data rmd;
63 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
65 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
71 err = apply_to_page_range(vma->vm_mm, addr, size,
72 remap_area_mfn_pte_fn, &rmd);
77 static long privcmd_ioctl_hypercall(void __user *udata)
79 struct privcmd_hypercall hypercall;
82 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
85 ret = privcmd_call(hypercall.op,
86 hypercall.arg[0], hypercall.arg[1],
87 hypercall.arg[2], hypercall.arg[3],
93 static void free_page_list(struct list_head *pages)
97 list_for_each_entry_safe(p, n, pages, lru)
100 INIT_LIST_HEAD(pages);
104 * Given an array of items in userspace, return a list of pages
105 * containing the data. If copying fails, either because of memory
106 * allocation failure or a problem reading user memory, return an
107 * error code; its up to the caller to dispose of any partial list.
109 static int gather_array(struct list_head *pagelist,
110 unsigned nelem, size_t size,
117 if (size > PAGE_SIZE)
121 pagedata = NULL; /* quiet, gcc */
123 if (pageidx > PAGE_SIZE-size) {
124 struct page *page = alloc_page(GFP_KERNEL);
130 pagedata = page_address(page);
132 list_add_tail(&page->lru, pagelist);
137 if (copy_from_user(pagedata + pageidx, data, size))
151 * Call function "fn" on each element of the array fragmented
152 * over a list of pages.
154 static int traverse_pages(unsigned nelem, size_t size,
155 struct list_head *pos,
156 int (*fn)(void *data, void *state),
163 BUG_ON(size > PAGE_SIZE);
166 pagedata = NULL; /* hush, gcc */
169 if (pageidx > PAGE_SIZE-size) {
172 page = list_entry(pos, struct page, lru);
173 pagedata = page_address(page);
177 ret = (*fn)(pagedata + pageidx, state);
186 struct mmap_mfn_state {
188 struct vm_area_struct *vma;
192 static int mmap_mfn_range(void *data, void *state)
194 struct privcmd_mmap_entry *msg = data;
195 struct mmap_mfn_state *st = state;
196 struct vm_area_struct *vma = st->vma;
199 /* Do not allow range to wrap the address space. */
200 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
201 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
204 /* Range chunks must be contiguous in va space. */
205 if ((msg->va != st->va) ||
206 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
209 rc = remap_domain_mfn_range(vma,
212 msg->npages << PAGE_SHIFT,
218 st->va += msg->npages << PAGE_SHIFT;
223 static long privcmd_ioctl_mmap(void __user *udata)
225 struct privcmd_mmap mmapcmd;
226 struct mm_struct *mm = current->mm;
227 struct vm_area_struct *vma;
230 struct mmap_mfn_state state;
232 if (!xen_initial_domain())
235 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
238 rc = gather_array(&pagelist,
239 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
242 if (rc || list_empty(&pagelist))
245 down_write(&mm->mmap_sem);
248 struct page *page = list_first_entry(&pagelist,
250 struct privcmd_mmap_entry *msg = page_address(page);
252 vma = find_vma(mm, msg->va);
255 if (!vma || (msg->va != vma->vm_start) ||
256 !privcmd_enforce_singleshot_mapping(vma))
260 state.va = vma->vm_start;
262 state.domain = mmapcmd.dom;
264 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
266 mmap_mfn_range, &state);
270 up_write(&mm->mmap_sem);
273 free_page_list(&pagelist);
278 struct mmap_batch_state {
281 struct vm_area_struct *vma;
284 xen_pfn_t __user *user;
287 static int mmap_batch_fn(void *data, void *state)
289 xen_pfn_t *mfnp = data;
290 struct mmap_batch_state *st = state;
292 if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK,
294 st->vma->vm_page_prot, st->domain) < 0) {
295 *mfnp |= 0xf0000000U;
303 static int mmap_return_errors(void *data, void *state)
305 xen_pfn_t *mfnp = data;
306 struct mmap_batch_state *st = state;
308 put_user(*mfnp, st->user++);
313 static long privcmd_ioctl_mmap_batch(void __user *udata)
316 struct privcmd_mmapbatch m;
317 struct mm_struct *mm = current->mm;
318 struct vm_area_struct *vma;
319 unsigned long nr_pages;
321 struct mmap_batch_state state;
323 if (!xen_initial_domain())
326 if (copy_from_user(&m, udata, sizeof(m)))
330 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
333 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
336 if (ret || list_empty(&pagelist))
339 down_write(&mm->mmap_sem);
341 vma = find_vma(mm, m.addr);
344 (m.addr != vma->vm_start) ||
345 ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
346 !privcmd_enforce_singleshot_mapping(vma)) {
347 up_write(&mm->mmap_sem);
351 state.domain = m.dom;
356 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
357 &pagelist, mmap_batch_fn, &state);
359 up_write(&mm->mmap_sem);
365 traverse_pages(m.num, sizeof(xen_pfn_t),
367 mmap_return_errors, &state);
371 free_page_list(&pagelist);
376 static long privcmd_ioctl(struct file *file,
377 unsigned int cmd, unsigned long data)
380 void __user *udata = (void __user *) data;
383 case IOCTL_PRIVCMD_HYPERCALL:
384 ret = privcmd_ioctl_hypercall(udata);
387 case IOCTL_PRIVCMD_MMAP:
388 ret = privcmd_ioctl_mmap(udata);
391 case IOCTL_PRIVCMD_MMAPBATCH:
392 ret = privcmd_ioctl_mmap_batch(udata);
403 #ifndef HAVE_ARCH_PRIVCMD_MMAP
404 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
406 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
407 vma, vma->vm_start, vma->vm_end,
408 vmf->pgoff, vmf->virtual_address);
410 return VM_FAULT_SIGBUS;
413 static struct vm_operations_struct privcmd_vm_ops = {
414 .fault = privcmd_fault
417 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
419 /* Unsupported for auto-translate guests. */
420 if (xen_feature(XENFEAT_auto_translated_physmap))
423 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
424 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
425 vma->vm_ops = &privcmd_vm_ops;
426 vma->vm_private_data = NULL;
431 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
433 return (xchg(&vma->vm_private_data, (void *)1) == NULL);
437 const struct file_operations privcmd_file_ops = {
438 .unlocked_ioctl = privcmd_ioctl,
439 .mmap = privcmd_mmap,