2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
10 #include <linux/device-mapper.h>
12 #include <linux/bio.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
24 /* FIXME: can we shrink this ? */
26 unsigned long error_bits;
27 unsigned long eopnotsupp_bits;
29 struct task_struct *sleeper;
30 struct dm_io_client *client;
31 io_notify_fn callback;
35 static struct kmem_cache *_dm_io_cache;
38 * io contexts are only dynamically allocated for asynchronous
39 * io. Since async io is likely to be the majority of io we'll
40 * have the same number of io contexts as bios! (FIXME: must reduce this).
43 static unsigned int pages_to_ios(unsigned int pages)
45 return 4 * pages; /* too many ? */
49 * Create a client with mempool and bioset.
51 struct dm_io_client *dm_io_client_create(unsigned num_pages)
53 unsigned ios = pages_to_ios(num_pages);
54 struct dm_io_client *client;
56 client = kmalloc(sizeof(*client), GFP_KERNEL);
58 return ERR_PTR(-ENOMEM);
60 client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
64 client->bios = bioset_create(16, 0);
72 mempool_destroy(client->pool);
74 return ERR_PTR(-ENOMEM);
76 EXPORT_SYMBOL(dm_io_client_create);
78 int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
80 return mempool_resize(client->pool, pages_to_ios(num_pages),
83 EXPORT_SYMBOL(dm_io_client_resize);
85 void dm_io_client_destroy(struct dm_io_client *client)
87 mempool_destroy(client->pool);
88 bioset_free(client->bios);
91 EXPORT_SYMBOL(dm_io_client_destroy);
93 /*-----------------------------------------------------------------
94 * We need to keep track of which region a bio is doing io for.
95 * In order to save a memory allocation we store this the last
96 * bvec which we know is unused (blech).
97 * XXX This is ugly and can OOPS with some configs... find another way.
98 *---------------------------------------------------------------*/
99 static inline void bio_set_region(struct bio *bio, unsigned region)
101 bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
104 static inline unsigned bio_get_region(struct bio *bio)
106 return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
109 /*-----------------------------------------------------------------
110 * We need an io object to keep track of the number of bios that
111 * have been dispatched for a particular io.
112 *---------------------------------------------------------------*/
113 static void dec_count(struct io *io, unsigned int region, int error)
116 set_bit(region, &io->error_bits);
117 if (error == -EOPNOTSUPP)
118 set_bit(region, &io->eopnotsupp_bits);
121 if (atomic_dec_and_test(&io->count)) {
123 wake_up_process(io->sleeper);
126 unsigned long r = io->error_bits;
127 io_notify_fn fn = io->callback;
128 void *context = io->context;
130 mempool_free(io, io->client->pool);
136 static void endio(struct bio *bio, int error)
141 if (error && bio_data_dir(bio) == READ)
145 * The bio destructor in bio_put() may use the io object.
147 io = bio->bi_private;
148 region = bio_get_region(bio);
153 dec_count(io, region, error);
156 /*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
170 * Functions for getting the pages from a list.
172 static void list_get_page(struct dpages *dp,
173 struct page **p, unsigned long *len, unsigned *offset)
175 unsigned o = dp->context_u;
176 struct page_list *pl = (struct page_list *) dp->context_ptr;
179 *len = PAGE_SIZE - o;
183 static void list_next_page(struct dpages *dp)
185 struct page_list *pl = (struct page_list *) dp->context_ptr;
186 dp->context_ptr = pl->next;
190 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
192 dp->get_page = list_get_page;
193 dp->next_page = list_next_page;
194 dp->context_u = offset;
195 dp->context_ptr = pl;
199 * Functions for getting the pages from a bvec.
201 static void bvec_get_page(struct dpages *dp,
202 struct page **p, unsigned long *len, unsigned *offset)
204 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
207 *offset = bvec->bv_offset;
210 static void bvec_next_page(struct dpages *dp)
212 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
213 dp->context_ptr = bvec + 1;
216 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
218 dp->get_page = bvec_get_page;
219 dp->next_page = bvec_next_page;
220 dp->context_ptr = bvec;
224 * Functions for getting the pages from a VMA.
226 static void vm_get_page(struct dpages *dp,
227 struct page **p, unsigned long *len, unsigned *offset)
229 *p = vmalloc_to_page(dp->context_ptr);
230 *offset = dp->context_u;
231 *len = PAGE_SIZE - dp->context_u;
234 static void vm_next_page(struct dpages *dp)
236 dp->context_ptr += PAGE_SIZE - dp->context_u;
240 static void vm_dp_init(struct dpages *dp, void *data)
242 dp->get_page = vm_get_page;
243 dp->next_page = vm_next_page;
244 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
245 dp->context_ptr = data;
248 static void dm_bio_destructor(struct bio *bio)
250 struct io *io = bio->bi_private;
252 bio_free(bio, io->client->bios);
256 * Functions for getting the pages from kernel memory.
258 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
261 *p = virt_to_page(dp->context_ptr);
262 *offset = dp->context_u;
263 *len = PAGE_SIZE - dp->context_u;
266 static void km_next_page(struct dpages *dp)
268 dp->context_ptr += PAGE_SIZE - dp->context_u;
272 static void km_dp_init(struct dpages *dp, void *data)
274 dp->get_page = km_get_page;
275 dp->next_page = km_next_page;
276 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
277 dp->context_ptr = data;
280 /*-----------------------------------------------------------------
281 * IO routines that accept a list of pages.
282 *---------------------------------------------------------------*/
283 static void do_region(int rw, unsigned region, struct dm_io_region *where,
284 struct dpages *dp, struct io *io)
291 sector_t remaining = where->count;
295 * Allocate a suitably sized-bio: we add an extra
296 * bvec for bio_get/set_region() and decrement bi_max_vecs
297 * to hide it from bio_add_page().
299 num_bvecs = dm_sector_div_up(remaining,
300 (PAGE_SIZE >> SECTOR_SHIFT));
301 num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
303 if (unlikely(num_bvecs > BIO_MAX_PAGES))
304 num_bvecs = BIO_MAX_PAGES;
305 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
306 bio->bi_sector = where->sector + (where->count - remaining);
307 bio->bi_bdev = where->bdev;
308 bio->bi_end_io = endio;
309 bio->bi_private = io;
310 bio->bi_destructor = dm_bio_destructor;
312 bio_set_region(bio, region);
315 * Try and add as many pages as possible.
318 dp->get_page(dp, &page, &len, &offset);
319 len = min(len, to_bytes(remaining));
320 if (!bio_add_page(bio, page, len, offset))
324 remaining -= to_sector(len);
328 atomic_inc(&io->count);
333 static void dispatch_io(int rw, unsigned int num_regions,
334 struct dm_io_region *where, struct dpages *dp,
335 struct io *io, int sync)
338 struct dpages old_pages = *dp;
341 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
344 * For multiple regions we need to be careful to rewind
345 * the dp object for each call to do_region.
347 for (i = 0; i < num_regions; i++) {
350 do_region(rw, i, where + i, dp, io);
354 * Drop the extra reference that we were holding to avoid
355 * the io being completed too early.
360 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
361 struct dm_io_region *where, int rw, struct dpages *dp,
362 unsigned long *error_bits)
366 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
373 io.eopnotsupp_bits = 0;
374 atomic_set(&io.count, 1); /* see dispatch_io() */
375 io.sleeper = current;
378 dispatch_io(rw, num_regions, where, dp, &io, 1);
381 set_current_state(TASK_UNINTERRUPTIBLE);
383 if (!atomic_read(&io.count))
388 set_current_state(TASK_RUNNING);
390 if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
391 rw &= ~(1 << BIO_RW_BARRIER);
396 *error_bits = io.error_bits;
398 return io.error_bits ? -EIO : 0;
401 static int async_io(struct dm_io_client *client, unsigned int num_regions,
402 struct dm_io_region *where, int rw, struct dpages *dp,
403 io_notify_fn fn, void *context)
407 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
413 io = mempool_alloc(client->pool, GFP_NOIO);
415 io->eopnotsupp_bits = 0;
416 atomic_set(&io->count, 1); /* see dispatch_io() */
420 io->context = context;
422 dispatch_io(rw, num_regions, where, dp, io, 0);
426 static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
428 /* Set up dpages based on memory type */
429 switch (io_req->mem.type) {
430 case DM_IO_PAGE_LIST:
431 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
435 bvec_dp_init(dp, io_req->mem.ptr.bvec);
439 vm_dp_init(dp, io_req->mem.ptr.vma);
443 km_dp_init(dp, io_req->mem.ptr.addr);
454 * New collapsed (a)synchronous interface.
456 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
457 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
458 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
459 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
461 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
462 struct dm_io_region *where, unsigned long *sync_error_bits)
467 r = dp_init(io_req, &dp);
471 if (!io_req->notify.fn)
472 return sync_io(io_req->client, num_regions, where,
473 io_req->bi_rw, &dp, sync_error_bits);
475 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
476 &dp, io_req->notify.fn, io_req->notify.context);
478 EXPORT_SYMBOL(dm_io);
480 int __init dm_io_init(void)
482 _dm_io_cache = KMEM_CACHE(io, 0);
489 void dm_io_exit(void)
491 kmem_cache_destroy(_dm_io_cache);