1 /******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/delay.h>
42 #include <linux/freezer.h>
44 #include <xen/events.h>
46 #include <asm/xen/hypervisor.h>
47 #include <asm/xen/hypercall.h>
50 #define WRITE_BARRIER (REQ_WRITE | REQ_FLUSH | REQ_FUA)
53 * These are rather arbitrary. They are fairly large because adjacent requests
54 * pulled from a communication ring are quite likely to end up being part of
55 * the same scatter/gather request at the disc.
57 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
59 * This will increase the chances of being able to write whole tracks.
60 * 64 should be enough to keep us competitive with Linux.
62 static int blkif_reqs = 64;
63 module_param_named(reqs, blkif_reqs, int, 0);
64 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
66 /* Run-time switchable: /sys/module/blkback/parameters/ */
67 static unsigned int log_stats = 0;
68 static unsigned int debug_lvl = 0;
69 module_param(log_stats, int, 0644);
70 module_param(debug_lvl, int, 0644);
73 * Each outstanding request that we've passed to the lower device layers has a
74 * 'pending_req' allocated to it. Each buffer_head that completes decrements
75 * the pendcnt towards zero. When it hits zero, the specified domain has a
76 * response queued for it, with the saved 'id' passed back.
83 unsigned short operation;
85 struct list_head free_list;
88 #define BLKBACK_INVALID_HANDLE (~0)
91 pending_req_t *pending_reqs;
92 struct list_head pending_free;
93 spinlock_t pending_free_lock;
94 wait_queue_head_t pending_free_wq;
95 struct page **pending_pages;
96 grant_handle_t *pending_grant_handles;
99 static struct xen_blkbk *blkbk;
101 static inline int vaddr_pagenr(pending_req_t *req, int seg)
103 return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
106 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
108 static inline unsigned long vaddr(pending_req_t *req, int seg)
110 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
111 return (unsigned long)pfn_to_kaddr(pfn);
114 #define pending_handle(_req, _seg) \
115 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
118 static int do_block_io_op(blkif_t *blkif);
119 static void dispatch_rw_block_io(blkif_t *blkif,
120 struct blkif_request *req,
121 pending_req_t *pending_req);
122 static void make_response(blkif_t *blkif, u64 id,
123 unsigned short op, int st);
125 /******************************************************************
128 static pending_req_t* alloc_req(void)
130 pending_req_t *req = NULL;
133 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
134 if (!list_empty(&blkbk->pending_free)) {
135 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
136 list_del(&req->free_list);
138 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
142 static void free_req(pending_req_t *req)
147 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
148 was_empty = list_empty(&blkbk->pending_free);
149 list_add(&req->free_list, &blkbk->pending_free);
150 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
152 wake_up(&blkbk->pending_free_wq);
155 static void unplug_queue(blkif_t *blkif)
157 if (blkif->plug == NULL)
159 if (blkif->plug->unplug_fn)
160 blkif->plug->unplug_fn(blkif->plug);
161 blk_put_queue(blkif->plug);
165 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
167 struct request_queue *q = bdev_get_queue(bdev);
169 if (q == blkif->plug)
176 static void fast_flush_area(pending_req_t *req)
178 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
179 unsigned int i, invcount = 0;
180 grant_handle_t handle;
183 for (i = 0; i < req->nr_pages; i++) {
184 handle = pending_handle(req, i);
185 if (handle == BLKBACK_INVALID_HANDLE)
187 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
188 GNTMAP_host_map, handle);
189 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
193 ret = HYPERVISOR_grant_table_op(
194 GNTTABOP_unmap_grant_ref, unmap, invcount);
196 /* Note, we use invcount, so nr->pages, so we can't index
197 * using vaddr(req, i). */
198 for (i = 0; i < invcount; i++) {
199 ret = m2p_remove_override(
200 virt_to_page(unmap[i].host_addr), false);
202 printk(KERN_ALERT "Failed to remove M2P override for " \
203 "%lx\n", (unsigned long)unmap[i].host_addr);
209 /******************************************************************
210 * SCHEDULER FUNCTIONS
213 static void print_stats(blkif_t *blkif)
215 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
216 current->comm, blkif->st_oo_req,
217 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
218 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
219 blkif->st_rd_req = 0;
220 blkif->st_wr_req = 0;
221 blkif->st_oo_req = 0;
224 int blkif_schedule(void *arg)
226 blkif_t *blkif = arg;
227 struct vbd *vbd = &blkif->vbd;
232 printk(KERN_DEBUG "%s: started\n", current->comm);
234 while (!kthread_should_stop()) {
237 if (unlikely(vbd->size != vbd_size(vbd)))
240 wait_event_interruptible(
242 blkif->waiting_reqs || kthread_should_stop());
243 wait_event_interruptible(
244 blkbk->pending_free_wq,
245 !list_empty(&blkbk->pending_free) || kthread_should_stop());
247 blkif->waiting_reqs = 0;
248 smp_mb(); /* clear flag *before* checking for work */
250 if (do_block_io_op(blkif))
251 blkif->waiting_reqs = 1;
254 if (log_stats && time_after(jiffies, blkif->st_print))
261 printk(KERN_DEBUG "%s: exiting\n", current->comm);
263 blkif->xenblkd = NULL;
269 /******************************************************************
270 * COMPLETION CALLBACK -- Called as bh->b_end_io()
273 static void __end_block_io_op(pending_req_t *pending_req, int error)
275 /* An error fails the entire request. */
276 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
277 (error == -EOPNOTSUPP)) {
278 DPRINTK("blkback: write barrier op failed, not supported\n");
279 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
280 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
282 DPRINTK("Buffer not up-to-date at end of operation, "
283 "error=%d\n", error);
284 pending_req->status = BLKIF_RSP_ERROR;
287 if (atomic_dec_and_test(&pending_req->pendcnt)) {
288 fast_flush_area(pending_req);
289 make_response(pending_req->blkif, pending_req->id,
290 pending_req->operation, pending_req->status);
291 blkif_put(pending_req->blkif);
292 free_req(pending_req);
296 static void end_block_io_op(struct bio *bio, int error)
298 __end_block_io_op(bio->bi_private, error);
303 /******************************************************************************
304 * NOTIFICATION FROM GUEST OS.
307 static void blkif_notify_work(blkif_t *blkif)
309 blkif->waiting_reqs = 1;
313 irqreturn_t blkif_be_int(int irq, void *dev_id)
315 blkif_notify_work(dev_id);
321 /******************************************************************
322 * DOWNWARD CALLS -- These interface with the block-device layer proper.
325 static int do_block_io_op(blkif_t *blkif)
327 union blkif_back_rings *blk_rings = &blkif->blk_rings;
328 struct blkif_request req;
329 pending_req_t *pending_req;
333 rc = blk_rings->common.req_cons;
334 rp = blk_rings->common.sring->req_prod;
335 rmb(); /* Ensure we see queued requests up to 'rp'. */
339 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
342 if (kthread_should_stop()) {
347 pending_req = alloc_req();
348 if (NULL == pending_req) {
354 switch (blkif->blk_protocol) {
355 case BLKIF_PROTOCOL_NATIVE:
356 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
358 case BLKIF_PROTOCOL_X86_32:
359 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
361 case BLKIF_PROTOCOL_X86_64:
362 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
367 blk_rings->common.req_cons = ++rc; /* before make_response() */
369 /* Apply all sanity checks to /private copy/ of request. */
372 switch (req.operation) {
375 dispatch_rw_block_io(blkif, &req, pending_req);
377 case BLKIF_OP_WRITE_BARRIER:
382 dispatch_rw_block_io(blkif, &req, pending_req);
385 /* A good sign something is wrong: sleep for a while to
386 * avoid excessive CPU consumption by a bad guest. */
388 DPRINTK("error: unknown block io operation [%d]\n",
390 make_response(blkif, req.id, req.operation,
392 free_req(pending_req);
396 /* Yield point for this unbounded loop. */
403 static void dispatch_rw_block_io(blkif_t *blkif,
404 struct blkif_request *req,
405 pending_req_t *pending_req)
407 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
408 struct phys_req preq;
410 unsigned long buf; unsigned int nsec;
411 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
413 struct bio *bio = NULL;
417 switch (req->operation) {
424 case BLKIF_OP_WRITE_BARRIER:
425 operation = WRITE_BARRIER;
428 operation = 0; /* make gcc happy */
432 /* Check that number of segments is sane. */
433 nseg = req->nr_segments;
434 if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
435 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
436 DPRINTK("Bad number of segments in request (%d)\n", nseg);
440 preq.dev = req->handle;
441 preq.sector_number = req->u.rw.sector_number;
444 pending_req->blkif = blkif;
445 pending_req->id = req->id;
446 pending_req->operation = req->operation;
447 pending_req->status = BLKIF_RSP_OKAY;
448 pending_req->nr_pages = nseg;
450 for (i = 0; i < nseg; i++) {
453 seg[i].nsec = req->u.rw.seg[i].last_sect -
454 req->u.rw.seg[i].first_sect + 1;
456 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
457 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
459 preq.nr_sects += seg[i].nsec;
461 flags = GNTMAP_host_map;
462 if (operation != READ)
463 flags |= GNTMAP_readonly;
464 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
465 req->u.rw.seg[i].gref, blkif->domid);
468 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
471 for (i = 0; i < nseg; i++) {
472 if (unlikely(map[i].status != 0)) {
473 DPRINTK("invalid buffer -- could not remap it\n");
474 map[i].handle = BLKBACK_INVALID_HANDLE;
478 pending_handle(pending_req, i) = map[i].handle;
483 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
484 blkbk->pending_page(pending_req, i), false);
486 printk(KERN_ALERT "Failed to install M2P override for"\
487 " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret);
491 seg[i].buf = map[i].dev_bus_addr |
492 (req->u.rw.seg[i].first_sect << 9);
498 if (vbd_translate(&preq, blkif, operation) != 0) {
499 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
500 operation == READ ? "read" : "write",
502 preq.sector_number + preq.nr_sects, preq.dev);
506 plug_queue(blkif, preq.bdev);
507 atomic_set(&pending_req->pendcnt, 1);
510 for (i = 0; i < nseg; i++) {
511 if (((int)preq.sector_number|(int)seg[i].nsec) &
512 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
513 DPRINTK("Misaligned I/O request from domain %d",
518 while ((bio == NULL) ||
520 blkbk->pending_page(pending_req, i),
522 seg[i].buf & ~PAGE_MASK) == 0)) {
524 atomic_inc(&pending_req->pendcnt);
525 submit_bio(operation, bio);
528 bio = bio_alloc(GFP_KERNEL, nseg-i);
529 if (unlikely(bio == NULL))
532 bio->bi_bdev = preq.bdev;
533 bio->bi_private = pending_req;
534 bio->bi_end_io = end_block_io_op;
535 bio->bi_sector = preq.sector_number;
538 preq.sector_number += seg[i].nsec;
542 BUG_ON(operation != WRITE_BARRIER);
543 bio = bio_alloc(GFP_KERNEL, 0);
544 if (unlikely(bio == NULL))
547 bio->bi_bdev = preq.bdev;
548 bio->bi_private = pending_req;
549 bio->bi_end_io = end_block_io_op;
553 submit_bio(operation, bio);
555 if (operation == READ)
556 blkif->st_rd_sect += preq.nr_sects;
557 else if (operation == WRITE || operation == WRITE_BARRIER)
558 blkif->st_wr_sect += preq.nr_sects;
563 fast_flush_area(pending_req);
565 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
566 free_req(pending_req);
567 msleep(1); /* back off a bit */
571 __end_block_io_op(pending_req, -EINVAL);
575 msleep(1); /* back off a bit */
581 /******************************************************************
582 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
586 static void make_response(blkif_t *blkif, u64 id,
587 unsigned short op, int st)
589 struct blkif_response resp;
591 union blkif_back_rings *blk_rings = &blkif->blk_rings;
599 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
600 /* Place on the response ring for the relevant domain. */
601 switch (blkif->blk_protocol) {
602 case BLKIF_PROTOCOL_NATIVE:
603 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
604 &resp, sizeof(resp));
606 case BLKIF_PROTOCOL_X86_32:
607 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
608 &resp, sizeof(resp));
610 case BLKIF_PROTOCOL_X86_64:
611 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
612 &resp, sizeof(resp));
617 blk_rings->common.rsp_prod_pvt++;
618 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
619 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
621 * Tail check for pending requests. Allows frontend to avoid
622 * notifications if requests are already in flight (lower
623 * overheads and promotes batching).
625 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
627 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
631 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
634 blkif_notify_work(blkif);
636 notify_remote_via_irq(blkif->irq);
639 static int __init blkif_init(void)
644 if (!xen_pv_domain())
647 blkbk = (struct xen_blkbk *)kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
649 printk(KERN_ALERT "%s: out of memory!\n", __func__);
653 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
655 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
656 blkif_reqs, GFP_KERNEL);
657 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
658 mmap_pages, GFP_KERNEL);
659 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
660 mmap_pages, GFP_KERNEL);
662 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
667 for (i = 0; i < mmap_pages; i++) {
668 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
669 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
670 if (blkbk->pending_pages[i] == NULL) {
675 rc = blkif_interface_init();
679 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
681 INIT_LIST_HEAD(&blkbk->pending_free);
682 spin_lock_init(&blkbk->pending_free_lock);
683 init_waitqueue_head(&blkbk->pending_free_wq);
685 for (i = 0; i < blkif_reqs; i++)
686 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
688 rc = blkif_xenbus_init();
695 printk(KERN_ERR "%s: out of memory\n", __func__);
697 kfree(blkbk->pending_reqs);
698 kfree(blkbk->pending_grant_handles);
699 for (i = 0; i < mmap_pages; i++) {
700 if (blkbk->pending_pages[i])
701 __free_page(blkbk->pending_pages[i]);
703 kfree(blkbk->pending_pages);
709 module_init(blkif_init);
711 MODULE_LICENSE("Dual BSD/GPL");