#define DM_MSG_PREFIX "core"
-#ifdef CONFIG_PRINTK
-/*
- * ratelimit state to be used in DMXXX_LIMIT().
- */
-DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
- DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
-EXPORT_SYMBOL(dm_ratelimit_state);
-#endif
-
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
/* forced geometry settings */
struct hd_geometry geometry;
- /* sysfs handle */
- struct kobject kobj;
+ /* kobject and completion */
+ struct dm_kobject_holder kobj_holder;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
if (!md_in_flight(md))
wake_up(&md->wait);
+ /*
+ * Run this off this callpath, as drivers could invoke end_io while
+ * inside their request_fn (and holding the queue lock). Calling
+ * back into ->request_fn() could deadlock attempting to grab the
+ * queue lock again.
+ */
if (run_queue)
- blk_run_queue(md->queue);
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ dm_request_endio_fn rq_end_io = NULL;
- if (mapped && rq_end_io)
- r = rq_end_io(tio->ti, clone, error, &tio->info);
+ if (tio->ti) {
+ rq_end_io = tio->ti->type->rq_end_io;
+
+ if (mapped && rq_end_io)
+ r = rq_end_io(tio->ti, clone, error, &tio->info);
+ }
if (r <= 0)
/* The target wants to complete the I/O */
int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
- /*
- * Hold the md reference here for the in-flight I/O.
- * We can't rely on the reference count by device opener,
- * because the device may be closed during the request completion
- * when all bios are completed.
- * See the comment in rq_completed() too.
- */
- dm_get(md);
-
tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
return requeued;
}
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+{
+ struct request *clone;
+
+ blk_start_request(orig);
+ clone = orig->special;
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+
+ return clone;
+}
+
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
- BUG_ON(!dm_target_is_valid(ti));
+ if (!dm_target_is_valid(ti)) {
+ /*
+ * Must perform setup, that dm_done() requires,
+ * before calling dm_kill_unmapped_request
+ */
+ DMERR_LIMIT("request attempted access beyond the end of device");
+ clone = dm_start_request(md, rq);
+ dm_kill_unmapped_request(clone, -EIO);
+ continue;
+ }
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
- blk_start_request(rq);
- clone = rq->special;
- atomic_inc(&md->pending[rq_data_dir(clone)]);
+ clone = dm_start_request(md, rq);
spin_unlock(q->queue_lock);
if (map_request(ti, clone, md))
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
-
- return;
}
int dm_underlying_device_busy(struct request_queue *q)
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
+ init_completion(&md->kobj_holder.completion);
md->disk->major = _major;
md->disk->first_minor = minor;
return 0;
}
-static struct mapped_device *dm_find_md(dev_t dev)
+struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md;
unsigned minor = MINOR(dev);
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
- if (md && (md == MINOR_ALLOCED ||
- (MINOR(disk_devt(dm_disk(md))) != minor) ||
- dm_deleting_md(md) ||
- test_bit(DMF_FREEING, &md->flags))) {
- md = NULL;
- goto out;
+ if (md) {
+ if ((md == MINOR_ALLOCED ||
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
+ dm_deleting_md(md) ||
+ test_bit(DMF_FREEING, &md->flags))) {
+ md = NULL;
+ goto out;
+ }
+ dm_get(md);
}
out:
return md;
}
-
-struct mapped_device *dm_get_md(dev_t dev)
-{
- struct mapped_device *md = dm_find_md(dev);
-
- if (md)
- dm_get(md);
-
- return md;
-}
EXPORT_SYMBOL_GPL(dm_get_md);
void *dm_get_mdptr(struct mapped_device *md)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
+ /*
+ * Take suspend_lock so that presuspend and postsuspend methods
+ * do not race with internal suspend.
+ */
+ mutex_lock(&md->suspend_lock);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
+ mutex_unlock(&md->suspend_lock);
/*
* Rare, but there may be I/O requests still going to complete,
struct kobject *dm_kobject(struct mapped_device *md)
{
- return &md->kobj;
+ return &md->kobj_holder.kobj;
}
/*
{
struct mapped_device *md;
- md = container_of(kobj, struct mapped_device, kobj);
- if (&md->kobj != kobj)
- return NULL;
-
- if (test_bit(DMF_FREEING, &md->flags) ||
- dm_deleting_md(md))
- return NULL;
+ md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
+ spin_lock(&_minor_lock);
+ if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
+ }
dm_get(md);
+out:
+ spin_unlock(&_minor_lock);
+
return md;
}