1 /******************************************************************************
4 * Granting foreign access to our memory reservation.
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include <linux/module.h>
35 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/uaccess.h>
43 #include <xen/interface/xen.h>
45 #include <xen/grant_table.h>
46 #include <xen/interface/memory.h>
47 #include <asm/xen/hypercall.h>
49 #include <asm/pgtable.h>
50 #include <asm/sync_bitops.h>
53 /* External tools reserve first few grant table entries. */
54 #define NR_RESERVED_ENTRIES 8
55 #define GNTTAB_LIST_END 0xffffffff
56 #define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
58 static grant_ref_t **gnttab_list;
59 static unsigned int nr_grant_frames;
60 static unsigned int boot_max_nr_grant_frames;
61 static int gnttab_free_count;
62 static grant_ref_t gnttab_free_head;
63 static DEFINE_SPINLOCK(gnttab_list_lock);
64 unsigned long xen_hvm_resume_frames;
65 EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
67 static struct grant_entry *shared;
69 static struct gnttab_free_callback *gnttab_free_callback_list;
71 static int gnttab_expand(unsigned int req_entries);
73 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
75 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
77 return &gnttab_list[(entry) / RPP][(entry) % RPP];
79 /* This can be used as an l-value */
80 #define gnttab_entry(entry) (*__gnttab_entry(entry))
82 static int get_free_entries(unsigned count)
88 spin_lock_irqsave(&gnttab_list_lock, flags);
90 if ((gnttab_free_count < count) &&
91 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
92 spin_unlock_irqrestore(&gnttab_list_lock, flags);
96 ref = head = gnttab_free_head;
97 gnttab_free_count -= count;
99 head = gnttab_entry(head);
100 gnttab_free_head = gnttab_entry(head);
101 gnttab_entry(head) = GNTTAB_LIST_END;
103 spin_unlock_irqrestore(&gnttab_list_lock, flags);
108 static void do_free_callbacks(void)
110 struct gnttab_free_callback *callback, *next;
112 callback = gnttab_free_callback_list;
113 gnttab_free_callback_list = NULL;
115 while (callback != NULL) {
116 next = callback->next;
117 if (gnttab_free_count >= callback->count) {
118 callback->next = NULL;
119 callback->fn(callback->arg);
121 callback->next = gnttab_free_callback_list;
122 gnttab_free_callback_list = callback;
128 static inline void check_free_callbacks(void)
130 if (unlikely(gnttab_free_callback_list))
134 static void put_free_entry(grant_ref_t ref)
137 spin_lock_irqsave(&gnttab_list_lock, flags);
138 gnttab_entry(ref) = gnttab_free_head;
139 gnttab_free_head = ref;
141 check_free_callbacks();
142 spin_unlock_irqrestore(&gnttab_list_lock, flags);
145 static void update_grant_entry(grant_ref_t ref, domid_t domid,
146 unsigned long frame, unsigned flags)
149 * Introducing a valid entry into the grant table:
150 * 1. Write ent->domid.
151 * 2. Write ent->frame:
152 * GTF_permit_access: Frame to which access is permitted.
153 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
154 * frame, or zero if none.
155 * 3. Write memory barrier (WMB).
156 * 4. Write ent->flags, inc. valid type.
158 shared[ref].frame = frame;
159 shared[ref].domid = domid;
161 shared[ref].flags = flags;
165 * Public grant-issuing interface functions
167 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
168 unsigned long frame, int readonly)
170 update_grant_entry(ref, domid, frame,
171 GTF_permit_access | (readonly ? GTF_readonly : 0));
173 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
175 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
180 ref = get_free_entries(1);
181 if (unlikely(ref < 0))
184 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
188 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
190 int gnttab_query_foreign_access(grant_ref_t ref)
194 nflags = shared[ref].flags;
196 return (nflags & (GTF_reading|GTF_writing));
198 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
200 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
204 nflags = shared[ref].flags;
207 if (flags & (GTF_reading|GTF_writing)) {
208 printk(KERN_ALERT "WARNING: g.e. still in use!\n");
211 } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
215 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
217 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
220 if (gnttab_end_foreign_access_ref(ref, readonly)) {
225 /* XXX This needs to be fixed so that the ref and page are
226 placed on a list to be freed up later. */
228 "WARNING: leaking g.e. and page still in use!\n");
231 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
233 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
237 ref = get_free_entries(1);
238 if (unlikely(ref < 0))
240 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
244 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
246 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
249 update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
251 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
253 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
259 * If a transfer is not even yet started, try to reclaim the grant
260 * reference and return failure (== 0).
262 while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
263 if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
268 /* If a transfer is in progress then wait until it is completed. */
269 while (!(flags & GTF_transfer_completed)) {
270 flags = shared[ref].flags;
274 rmb(); /* Read the frame number /after/ reading completion status. */
275 frame = shared[ref].frame;
280 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
282 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
284 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
288 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
290 void gnttab_free_grant_reference(grant_ref_t ref)
294 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
296 void gnttab_free_grant_references(grant_ref_t head)
301 if (head == GNTTAB_LIST_END)
303 spin_lock_irqsave(&gnttab_list_lock, flags);
305 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
306 ref = gnttab_entry(ref);
309 gnttab_entry(ref) = gnttab_free_head;
310 gnttab_free_head = head;
311 gnttab_free_count += count;
312 check_free_callbacks();
313 spin_unlock_irqrestore(&gnttab_list_lock, flags);
315 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
317 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
319 int h = get_free_entries(count);
328 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
330 int gnttab_empty_grant_references(const grant_ref_t *private_head)
332 return (*private_head == GNTTAB_LIST_END);
334 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
336 int gnttab_claim_grant_reference(grant_ref_t *private_head)
338 grant_ref_t g = *private_head;
339 if (unlikely(g == GNTTAB_LIST_END))
341 *private_head = gnttab_entry(g);
344 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
346 void gnttab_release_grant_reference(grant_ref_t *private_head,
349 gnttab_entry(release) = *private_head;
350 *private_head = release;
352 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
354 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
355 void (*fn)(void *), void *arg, u16 count)
358 spin_lock_irqsave(&gnttab_list_lock, flags);
363 callback->count = count;
364 callback->next = gnttab_free_callback_list;
365 gnttab_free_callback_list = callback;
366 check_free_callbacks();
368 spin_unlock_irqrestore(&gnttab_list_lock, flags);
370 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
372 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
374 struct gnttab_free_callback **pcb;
377 spin_lock_irqsave(&gnttab_list_lock, flags);
378 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
379 if (*pcb == callback) {
380 *pcb = callback->next;
384 spin_unlock_irqrestore(&gnttab_list_lock, flags);
386 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
388 static int grow_gnttab_list(unsigned int more_frames)
390 unsigned int new_nr_grant_frames, extra_entries, i;
391 unsigned int nr_glist_frames, new_nr_glist_frames;
393 new_nr_grant_frames = nr_grant_frames + more_frames;
394 extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
396 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
397 new_nr_glist_frames =
398 (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
399 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
400 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
406 for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
407 i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
408 gnttab_entry(i) = i + 1;
410 gnttab_entry(i) = gnttab_free_head;
411 gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
412 gnttab_free_count += extra_entries;
414 nr_grant_frames = new_nr_grant_frames;
416 check_free_callbacks();
421 for ( ; i >= nr_glist_frames; i--)
422 free_page((unsigned long) gnttab_list[i]);
426 static unsigned int __max_nr_grant_frames(void)
428 struct gnttab_query_size query;
431 query.dom = DOMID_SELF;
433 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
434 if ((rc < 0) || (query.status != GNTST_okay))
435 return 4; /* Legacy max supported number of frames */
437 return query.max_nr_frames;
440 unsigned int gnttab_max_grant_frames(void)
442 unsigned int xen_max = __max_nr_grant_frames();
444 if (xen_max > boot_max_nr_grant_frames)
445 return boot_max_nr_grant_frames;
448 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
450 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
451 struct gnttab_map_grant_ref *kmap_ops,
452 struct page **pages, unsigned int count)
458 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
462 if (xen_feature(XENFEAT_auto_translated_physmap))
465 for (i = 0; i < count; i++) {
466 /* Do not add to override if the map failed. */
467 if (map_ops[i].status)
470 if (map_ops[i].flags & GNTMAP_contains_pte) {
471 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
472 (map_ops[i].host_addr & ~PAGE_MASK));
475 /* If you really wanted to do this:
476 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
478 * The reason we do not implement it is b/c on the
479 * unmap path (gnttab_unmap_refs) we have no means of
480 * checking whether the page is !GNTMAP_contains_pte.
482 * That is without some extra data-structure to carry
483 * the struct page, bool clear_pte, and list_head next
484 * tuples and deal with allocation/delallocation, etc.
486 * The users of this API set the GNTMAP_contains_pte
487 * flag so lets just return not supported until it
488 * becomes neccessary to implement.
492 ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
499 EXPORT_SYMBOL_GPL(gnttab_map_refs);
501 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
502 struct page **pages, unsigned int count)
506 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
510 if (xen_feature(XENFEAT_auto_translated_physmap))
513 for (i = 0; i < count; i++) {
514 ret = m2p_remove_override(pages[i], true /* clear the PTE */);
521 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
523 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
525 struct gnttab_setup_table setup;
526 unsigned long *frames;
527 unsigned int nr_gframes = end_idx + 1;
530 if (xen_hvm_domain()) {
531 struct xen_add_to_physmap xatp;
532 unsigned int i = end_idx;
535 * Loop backwards, so that the first hypercall has the largest
536 * index, ensuring that the table will grow only once.
539 xatp.domid = DOMID_SELF;
541 xatp.space = XENMAPSPACE_grant_table;
542 xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
543 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
546 "grant table add_to_physmap failed, err=%d\n", rc);
549 } while (i-- > start_idx);
554 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
558 setup.dom = DOMID_SELF;
559 setup.nr_frames = nr_gframes;
560 set_xen_guest_handle(setup.frame_list, frames);
562 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
568 BUG_ON(rc || setup.status);
570 rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
579 int gnttab_resume(void)
581 unsigned int max_nr_gframes;
583 max_nr_gframes = gnttab_max_grant_frames();
584 if (max_nr_gframes < nr_grant_frames)
588 return gnttab_map(0, nr_grant_frames - 1);
591 shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
592 if (shared == NULL) {
594 "Failed to ioremap gnttab share frames!");
599 gnttab_map(0, nr_grant_frames - 1);
604 int gnttab_suspend(void)
606 arch_gnttab_unmap_shared(shared, nr_grant_frames);
610 static int gnttab_expand(unsigned int req_entries)
613 unsigned int cur, extra;
615 cur = nr_grant_frames;
616 extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
617 GREFS_PER_GRANT_FRAME);
618 if (cur + extra > gnttab_max_grant_frames())
621 rc = gnttab_map(cur, cur + extra - 1);
623 rc = grow_gnttab_list(extra);
628 int gnttab_init(void)
631 unsigned int max_nr_glist_frames, nr_glist_frames;
632 unsigned int nr_init_grefs;
635 boot_max_nr_grant_frames = __max_nr_grant_frames();
637 /* Determine the maximum number of frames required for the
638 * grant reference free list on the current hypervisor.
640 max_nr_glist_frames = (boot_max_nr_grant_frames *
641 GREFS_PER_GRANT_FRAME / RPP);
643 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
645 if (gnttab_list == NULL)
648 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
649 for (i = 0; i < nr_glist_frames; i++) {
650 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
651 if (gnttab_list[i] == NULL)
655 if (gnttab_resume() < 0)
658 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
660 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
661 gnttab_entry(i) = i + 1;
663 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
664 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
665 gnttab_free_head = NR_RESERVED_ENTRIES;
667 printk("Grant table initialized\n");
671 for (i--; i >= 0; i--)
672 free_page((unsigned long)gnttab_list[i]);
676 EXPORT_SYMBOL_GPL(gnttab_init);
678 static int __devinit __gnttab_init(void)
680 /* Delay grant-table initialization in the PV on HVM case */
681 if (xen_hvm_domain())
684 if (!xen_pv_domain())
687 return gnttab_init();
690 core_initcall(__gnttab_init);