sector_t *highs;
struct dm_target *targets;
+ struct target_type *immutable_target_type;
unsigned integrity_supported:1;
+ unsigned singleton:1;
/*
* Indicates the rw permissions for the new logical
num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+ if (!num_targets) {
+ kfree(t);
+ return -ENOMEM;
+ }
+
if (alloc_targets(t, num_targets)) {
kfree(t);
t = NULL;
}
/*
- * Add a device to the list, or just increment the usage count if
- * it's already present.
+ * Convert the path to a device
*/
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
- struct dm_dev **result)
+dev_t dm_get_dev_t(const char *path)
{
- int r;
dev_t uninitialized_var(dev);
- struct dm_dev_internal *dd;
unsigned int major, minor;
- struct dm_table *t = ti->table;
-
- BUG_ON(!t);
if (sscanf(path, "%u:%u", &major, &minor) == 2) {
/* Extract the major/minor numbers */
dev = MKDEV(major, minor);
if (MAJOR(dev) != major || MINOR(dev) != minor)
- return -EOVERFLOW;
+ return 0;
} else {
/* convert the path to a device */
struct block_device *bdev = lookup_bdev(path);
if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ return 0;
dev = bdev->bd_dev;
bdput(bdev);
}
+ return dev;
+}
+EXPORT_SYMBOL_GPL(dm_get_dev_t);
+
+/*
+ * Add a device to the list, or just increment the usage count if
+ * it's already present.
+ */
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result)
+{
+ int r;
+ dev_t dev;
+ struct dm_dev_internal *dd;
+ struct dm_table *t = ti->table;
+
+ BUG_ON(!t);
+
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
+
dd = find_device(&t->devices, dev);
if (!dd) {
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
/*
* Used to dynamically allocate the arg array.
+ *
+ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
+ * process messages even if some device is suspended. These messages have a
+ * small fixed number of arguments.
+ *
+ * On the other hand, dm-switch needs to process bulk data using messages and
+ * excessive use of GFP_NOIO could cause trouble.
*/
static char **realloc_argv(unsigned *array_size, char **old_argv)
{
char **argv;
unsigned new_size;
+ gfp_t gfp;
- new_size = *array_size ? *array_size * 2 : 64;
- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
+ if (*array_size) {
+ new_size = *array_size * 2;
+ gfp = GFP_KERNEL;
+ } else {
+ new_size = 8;
+ gfp = GFP_NOIO;
+ }
+ argv = kmalloc(new_size * sizeof(*argv), gfp);
if (argv) {
memcpy(argv, old_argv, *array_size * sizeof(*argv));
*array_size = new_size;
char **argv;
struct dm_target *tgt;
+ if (t->singleton) {
+ DMERR("%s: target type %s must appear alone in table",
+ dm_device_name(t->md), t->targets->type->name);
+ return -EINVAL;
+ }
+
if ((r = check_space(t)))
return r;
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
+ if (dm_target_needs_singleton(tgt->type)) {
+ if (t->num_targets) {
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
+ }
+ t->singleton = 1;
+ }
+
+ if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
+ }
+
+ if (t->immutable_target_type) {
+ if (t->immutable_target_type != tgt->type) {
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
+ }
+ } else if (dm_target_is_immutable(tgt->type)) {
+ if (t->num_targets) {
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
+ }
+ t->immutable_target_type = tgt->type;
+ }
+
tgt->table = t;
tgt->begin = start;
tgt->len = len;
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}
return t->type;
}
+struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
+{
+ return t->immutable_target_type;
+}
+
bool dm_table_request_based(struct dm_table *t)
{
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
return 1;
}
+static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && blk_queue_nonrot(q);
+}
+
+static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !blk_queue_add_random(q);
+}
+
+static bool dm_table_all_devices_attribute(struct dm_table *t,
+ iterate_devices_callout_fn func)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, func, NULL))
+ return 0;
+ }
+
+ return 1;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0;
+ /* Ensure that all underlying devices are non-rotational. */
+ if (dm_table_all_devices_attribute(t, device_is_nonrot))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+
dm_table_set_integrity(t);
+ /*
+ * Determine whether or not this queue's I/O timings contribute
+ * to the entropy pool, Only request-based targets use this.
+ * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+ * have it set.
+ */
+ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+
/*
* QUEUE_FLAG_STACKABLE must be set after all queue settings are
* visible to other CPUs because, once the flag is set, incoming bios