X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-table.c;h=0d0bc044a69cc637f62fb6d00fceee04dd61abde;hb=ec042914054a3965417de7acb4f767365cb47ea8;hp=8e91321301424afda07c456a8b6ae53aec524d04;hpb=b2409fb6a49d1f633a8fc488e48043da7d3fd6a7;p=pandora-kernel.git diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 8e9132130142..0d0bc044a69c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -215,6 +215,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode, num_targets = dm_round_up(num_targets, KEYS_PER_NODE); + if (!num_targets) { + kfree(t); + return -ENOMEM; + } + if (alloc_targets(t, num_targets)) { kfree(t); t = NULL; @@ -453,35 +458,50 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, } /* - * Add a device to the list, or just increment the usage count if - * it's already present. + * Convert the path to a device */ -int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, - struct dm_dev **result) +dev_t dm_get_dev_t(const char *path) { - int r; dev_t uninitialized_var(dev); - struct dm_dev_internal *dd; unsigned int major, minor; - struct dm_table *t = ti->table; - - BUG_ON(!t); if (sscanf(path, "%u:%u", &major, &minor) == 2) { /* Extract the major/minor numbers */ dev = MKDEV(major, minor); if (MAJOR(dev) != major || MINOR(dev) != minor) - return -EOVERFLOW; + return 0; } else { /* convert the path to a device */ struct block_device *bdev = lookup_bdev(path); if (IS_ERR(bdev)) - return PTR_ERR(bdev); + return 0; dev = bdev->bd_dev; bdput(bdev); } + return dev; +} +EXPORT_SYMBOL_GPL(dm_get_dev_t); + +/* + * Add a device to the list, or just increment the usage count if + * it's already present. + */ +int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + struct dm_dev **result) +{ + int r; + dev_t dev; + struct dm_dev_internal *dd; + struct dm_table *t = ti->table; + + BUG_ON(!t); + + dev = dm_get_dev_t(path); + if (!dev) + return -ENODEV; + dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); @@ -581,14 +601,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti) /* * Used to dynamically allocate the arg array. + * + * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must + * process messages even if some device is suspended. These messages have a + * small fixed number of arguments. + * + * On the other hand, dm-switch needs to process bulk data using messages and + * excessive use of GFP_NOIO could cause trouble. */ static char **realloc_argv(unsigned *array_size, char **old_argv) { char **argv; unsigned new_size; + gfp_t gfp; - new_size = *array_size ? *array_size * 2 : 64; - argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); + if (*array_size) { + new_size = *array_size * 2; + gfp = GFP_KERNEL; + } else { + new_size = 8; + gfp = GFP_NOIO; + } + argv = kmalloc(new_size * sizeof(*argv), gfp); if (argv) { memcpy(argv, old_argv, *array_size * sizeof(*argv)); *array_size = new_size; @@ -761,37 +795,32 @@ int dm_table_add_target(struct dm_table *t, const char *type, tgt->type = dm_get_target_type(type); if (!tgt->type) { - DMERR("%s: %s: unknown target type", dm_device_name(t->md), - type); + DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } if (dm_target_needs_singleton(tgt->type)) { if (t->num_targets) { - DMERR("%s: target type %s must appear alone in table", - dm_device_name(t->md), type); - return -EINVAL; + tgt->error = "singleton target type must appear alone in table"; + goto bad; } t->singleton = 1; } if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { - DMERR("%s: target type %s may not be included in read-only tables", - dm_device_name(t->md), type); - return -EINVAL; + tgt->error = "target type may not be included in a read-only table"; + goto bad; } if (t->immutable_target_type) { if (t->immutable_target_type != tgt->type) { - DMERR("%s: immutable target type %s cannot be mixed with other target types", - dm_device_name(t->md), t->immutable_target_type->name); - return -EINVAL; + tgt->error = "immutable target type cannot be mixed with other target types"; + goto bad; } } else if (dm_target_is_immutable(tgt->type)) { if (t->num_targets) { - DMERR("%s: immutable target type %s cannot be mixed with other target types", - dm_device_name(t->md), tgt->type->name); - return -EINVAL; + tgt->error = "immutable target type cannot be mixed with other target types"; + goto bad; } t->immutable_target_type = tgt->type; } @@ -806,7 +835,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, */ if (!adjoin(t, tgt)) { tgt->error = "Gap in table"; - r = -EINVAL; goto bad; } @@ -1350,17 +1378,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, return q && blk_queue_nonrot(q); } -static bool dm_table_is_nonrot(struct dm_table *t) +static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && !blk_queue_add_random(q); +} + +static bool dm_table_all_devices_attribute(struct dm_table *t, + iterate_devices_callout_fn func) { struct dm_target *ti; unsigned i = 0; - /* Ensure that all underlying device are non-rotational. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) + !ti->type->iterate_devices(ti, func, NULL)) return 0; } @@ -1392,13 +1428,23 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_discard_zeroes_data(t)) q->limits.discard_zeroes_data = 0; - if (dm_table_is_nonrot(t)) + /* Ensure that all underlying devices are non-rotational. */ + if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); dm_table_set_integrity(t); + /* + * Determine whether or not this queue's I/O timings contribute + * to the entropy pool, Only request-based targets use this. + * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not + * have it set. + */ + if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + /* * QUEUE_FLAG_STACKABLE must be set after all queue settings are * visible to other CPUs because, once the flag is set, incoming bios