2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
16 #include <linux/device-mapper.h>
18 #define DM_MSG_PREFIX "raid"
21 * The following flags are used by dm-raid.c to set up the array state.
22 * They must be cleared before md_run is called.
24 #define FirstUse 10 /* rdev flag */
28 * Two DM devices, one to hold metadata and one to hold the
29 * actual data/parity. The reason for this is to not confuse
30 * ti->len and give more flexibility in altering size and
33 * While it is possible for this device to be associated
34 * with a different physical device than the data_dev, it
35 * is intended for it to be the same.
36 * |--------- Physical Device ---------|
37 * |- meta_dev -|------ data_dev ------|
39 struct dm_dev *meta_dev;
40 struct dm_dev *data_dev;
45 * Flags for rs->print_flags field.
48 #define DMPF_NOSYNC 0x2
49 #define DMPF_REBUILD 0x4
50 #define DMPF_DAEMON_SLEEP 0x8
51 #define DMPF_MIN_RECOVERY_RATE 0x10
52 #define DMPF_MAX_RECOVERY_RATE 0x20
53 #define DMPF_MAX_WRITE_BEHIND 0x40
54 #define DMPF_STRIPE_CACHE 0x80
55 #define DMPF_REGION_SIZE 0X100
62 struct raid_type *raid_type;
63 struct dm_target_callbacks callbacks;
65 struct raid_dev dev[0];
68 /* Supported raid types and properties. */
69 static struct raid_type {
70 const char *name; /* RAID algorithm. */
71 const char *descr; /* Descriptor text for logging. */
72 const unsigned parity_devs; /* # of parity devices. */
73 const unsigned minimal_devs; /* minimal # of devices in set. */
74 const unsigned level; /* RAID level. */
75 const unsigned algorithm; /* RAID algorithm. */
77 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
78 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
79 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
80 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
81 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
82 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
83 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
84 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
85 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
88 static struct raid_type *get_raid_type(char *name)
92 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
93 if (!strcmp(raid_types[i].name, name))
94 return &raid_types[i];
99 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
103 sector_t sectors_per_dev;
105 if (raid_devs <= raid_type->parity_devs) {
106 ti->error = "Insufficient number of devices";
107 return ERR_PTR(-EINVAL);
110 sectors_per_dev = ti->len;
111 if ((raid_type->level > 1) &&
112 sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
113 ti->error = "Target length not divisible by number of data devices";
114 return ERR_PTR(-EINVAL);
117 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
119 ti->error = "Cannot allocate raid context";
120 return ERR_PTR(-ENOMEM);
126 rs->raid_type = raid_type;
127 rs->md.raid_disks = raid_devs;
128 rs->md.level = raid_type->level;
129 rs->md.new_level = rs->md.level;
130 rs->md.dev_sectors = sectors_per_dev;
131 rs->md.layout = raid_type->algorithm;
132 rs->md.new_layout = rs->md.layout;
133 rs->md.delta_disks = 0;
134 rs->md.recovery_cp = 0;
136 for (i = 0; i < raid_devs; i++)
137 md_rdev_init(&rs->dev[i].rdev);
140 * Remaining items to be initialized by further RAID params:
143 * rs->md.chunk_sectors
144 * rs->md.new_chunk_sectors
150 static void context_free(struct raid_set *rs)
154 for (i = 0; i < rs->md.raid_disks; i++) {
155 if (rs->dev[i].meta_dev)
156 dm_put_device(rs->ti, rs->dev[i].meta_dev);
157 if (rs->dev[i].rdev.sb_page)
158 put_page(rs->dev[i].rdev.sb_page);
159 rs->dev[i].rdev.sb_page = NULL;
160 rs->dev[i].rdev.sb_loaded = 0;
161 if (rs->dev[i].data_dev)
162 dm_put_device(rs->ti, rs->dev[i].data_dev);
169 * For every device we have two words
170 * <meta_dev>: meta device name or '-' if missing
171 * <data_dev>: data device name or '-' if missing
173 * The following are permitted:
176 * <meta_dev> <data_dev>
178 * The following is not allowed:
181 * This code parses those words. If there is a failure,
182 * the caller must use context_free to unwind the operations.
184 static int dev_parms(struct raid_set *rs, char **argv)
188 int metadata_available = 0;
191 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
192 rs->dev[i].rdev.raid_disk = i;
194 rs->dev[i].meta_dev = NULL;
195 rs->dev[i].data_dev = NULL;
198 * There are no offsets, since there is a separate device
199 * for data and metadata.
201 rs->dev[i].rdev.data_offset = 0;
202 rs->dev[i].rdev.mddev = &rs->md;
204 if (strcmp(argv[0], "-")) {
205 ret = dm_get_device(rs->ti, argv[0],
206 dm_table_get_mode(rs->ti->table),
207 &rs->dev[i].meta_dev);
208 rs->ti->error = "RAID metadata device lookup failure";
212 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
213 if (!rs->dev[i].rdev.sb_page)
217 if (!strcmp(argv[1], "-")) {
218 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
219 (!rs->dev[i].rdev.recovery_offset)) {
220 rs->ti->error = "Drive designated for rebuild not specified";
224 rs->ti->error = "No data device supplied with metadata device";
225 if (rs->dev[i].meta_dev)
231 ret = dm_get_device(rs->ti, argv[1],
232 dm_table_get_mode(rs->ti->table),
233 &rs->dev[i].data_dev);
235 rs->ti->error = "RAID device lookup failure";
239 if (rs->dev[i].meta_dev) {
240 metadata_available = 1;
241 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
243 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
244 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
245 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
249 if (metadata_available) {
251 rs->md.persistent = 1;
252 rs->md.major_version = 2;
253 } else if (rebuild && !rs->md.recovery_cp) {
255 * Without metadata, we will not be able to tell if the array
256 * is in-sync or not - we must assume it is not. Therefore,
257 * it is impossible to rebuild a drive.
259 * Even if there is metadata, the on-disk information may
260 * indicate that the array is not in-sync and it will then
263 * User could specify 'nosync' option if desperate.
265 DMERR("Unable to rebuild drive while array is not in-sync");
266 rs->ti->error = "RAID device lookup failure";
274 * validate_region_size
276 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
278 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
279 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
281 * Returns: 0 on success, -EINVAL on failure.
283 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
285 unsigned long min_region_size = rs->ti->len / (1 << 21);
289 * Choose a reasonable default. All figures in sectors.
291 if (min_region_size > (1 << 13)) {
292 DMINFO("Choosing default region size of %lu sectors",
294 region_size = min_region_size;
296 DMINFO("Choosing default region size of 4MiB");
297 region_size = 1 << 13; /* sectors */
301 * Validate user-supplied value.
303 if (region_size > rs->ti->len) {
304 rs->ti->error = "Supplied region size is too large";
308 if (region_size < min_region_size) {
309 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
310 region_size, min_region_size);
311 rs->ti->error = "Supplied region size is too small";
315 if (!is_power_of_2(region_size)) {
316 rs->ti->error = "Region size is not a power of 2";
320 if (region_size < rs->md.chunk_sectors) {
321 rs->ti->error = "Region size is smaller than the chunk size";
327 * Convert sectors to bytes.
329 rs->md.bitmap_info.chunksize = (region_size << 9);
335 * Possible arguments are...
336 * <chunk_size> [optional_args]
338 * Argument definitions
339 * <chunk_size> The number of sectors per disk that
340 * will form the "stripe"
341 * [[no]sync] Force or prevent recovery of the
343 * [rebuild <idx>] Rebuild the drive indicated by the index
344 * [daemon_sleep <ms>] Time between bitmap daemon work to
346 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
347 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
348 * [write_mostly <idx>] Indicate a write mostly drive via index
349 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
350 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
351 * [region_size <sectors>] Defines granularity of bitmap
353 static int parse_raid_params(struct raid_set *rs, char **argv,
354 unsigned num_raid_params)
356 unsigned i, rebuild_cnt = 0;
357 unsigned long value, region_size = 0;
361 * First, parse the in-order required arguments
362 * "chunk_size" is the only argument of this type.
364 if ((strict_strtoul(argv[0], 10, &value) < 0)) {
365 rs->ti->error = "Bad chunk size";
367 } else if (rs->raid_type->level == 1) {
369 DMERR("Ignoring chunk size parameter for RAID 1");
371 } else if (!is_power_of_2(value)) {
372 rs->ti->error = "Chunk size must be a power of 2";
374 } else if (value < 8) {
375 rs->ti->error = "Chunk size value is too small";
379 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
384 * We set each individual device as In_sync with a completed
385 * 'recovery_offset'. If there has been a device failure or
386 * replacement then one of the following cases applies:
388 * 1) User specifies 'rebuild'.
389 * - Device is reset when param is read.
390 * 2) A new device is supplied.
391 * - No matching superblock found, resets device.
392 * 3) Device failure was transient and returns on reload.
393 * - Failure noticed, resets device for bitmap replay.
394 * 4) Device hadn't completed recovery after previous failure.
395 * - Superblock is read and overrides recovery_offset.
397 * What is found in the superblocks of the devices is always
398 * authoritative, unless 'rebuild' or '[no]sync' was specified.
400 for (i = 0; i < rs->md.raid_disks; i++) {
401 set_bit(In_sync, &rs->dev[i].rdev.flags);
402 rs->dev[i].rdev.recovery_offset = MaxSector;
406 * Second, parse the unordered optional arguments
408 for (i = 0; i < num_raid_params; i++) {
409 if (!strcasecmp(argv[i], "nosync")) {
410 rs->md.recovery_cp = MaxSector;
411 rs->print_flags |= DMPF_NOSYNC;
414 if (!strcasecmp(argv[i], "sync")) {
415 rs->md.recovery_cp = 0;
416 rs->print_flags |= DMPF_SYNC;
420 /* The rest of the optional arguments come in key/value pairs */
421 if ((i + 1) >= num_raid_params) {
422 rs->ti->error = "Wrong number of raid parameters given";
427 if (strict_strtoul(argv[i], 10, &value) < 0) {
428 rs->ti->error = "Bad numerical argument given in raid params";
432 if (!strcasecmp(key, "rebuild")) {
434 if (((rs->raid_type->level != 1) &&
435 (rebuild_cnt > rs->raid_type->parity_devs)) ||
436 ((rs->raid_type->level == 1) &&
437 (rebuild_cnt > (rs->md.raid_disks - 1)))) {
438 rs->ti->error = "Too many rebuild devices specified for given RAID type";
441 if (value > rs->md.raid_disks) {
442 rs->ti->error = "Invalid rebuild index given";
445 clear_bit(In_sync, &rs->dev[value].rdev.flags);
446 rs->dev[value].rdev.recovery_offset = 0;
447 rs->print_flags |= DMPF_REBUILD;
448 } else if (!strcasecmp(key, "write_mostly")) {
449 if (rs->raid_type->level != 1) {
450 rs->ti->error = "write_mostly option is only valid for RAID1";
453 if (value >= rs->md.raid_disks) {
454 rs->ti->error = "Invalid write_mostly drive index given";
457 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
458 } else if (!strcasecmp(key, "max_write_behind")) {
459 if (rs->raid_type->level != 1) {
460 rs->ti->error = "max_write_behind option is only valid for RAID1";
463 rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
466 * In device-mapper, we specify things in sectors, but
467 * MD records this value in kB
470 if (value > COUNTER_MAX) {
471 rs->ti->error = "Max write-behind limit out of range";
474 rs->md.bitmap_info.max_write_behind = value;
475 } else if (!strcasecmp(key, "daemon_sleep")) {
476 rs->print_flags |= DMPF_DAEMON_SLEEP;
477 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
478 rs->ti->error = "daemon sleep period out of range";
481 rs->md.bitmap_info.daemon_sleep = value;
482 } else if (!strcasecmp(key, "stripe_cache")) {
483 rs->print_flags |= DMPF_STRIPE_CACHE;
486 * In device-mapper, we specify things in sectors, but
487 * MD records this value in kB
491 if (rs->raid_type->level < 5) {
492 rs->ti->error = "Inappropriate argument: stripe_cache";
495 if (raid5_set_cache_size(&rs->md, (int)value)) {
496 rs->ti->error = "Bad stripe_cache size";
499 } else if (!strcasecmp(key, "min_recovery_rate")) {
500 rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
501 if (value > INT_MAX) {
502 rs->ti->error = "min_recovery_rate out of range";
505 rs->md.sync_speed_min = (int)value;
506 } else if (!strcasecmp(key, "max_recovery_rate")) {
507 rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
508 if (value > INT_MAX) {
509 rs->ti->error = "max_recovery_rate out of range";
512 rs->md.sync_speed_max = (int)value;
513 } else if (!strcasecmp(key, "region_size")) {
514 rs->print_flags |= DMPF_REGION_SIZE;
517 DMERR("Unable to parse RAID parameter: %s", key);
518 rs->ti->error = "Unable to parse RAID parameters";
523 if (validate_region_size(rs, region_size))
526 if (rs->md.chunk_sectors)
527 rs->ti->split_io = rs->md.chunk_sectors;
529 rs->ti->split_io = region_size;
531 if (rs->md.chunk_sectors)
532 rs->ti->split_io = rs->md.chunk_sectors;
534 rs->ti->split_io = region_size;
536 /* Assume there are no metadata devices until the drives are parsed */
537 rs->md.persistent = 0;
543 static void do_table_event(struct work_struct *ws)
545 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
547 dm_table_event(rs->ti->table);
550 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
552 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
554 if (rs->raid_type->level == 1)
555 return md_raid1_congested(&rs->md, bits);
557 return md_raid5_congested(&rs->md, bits);
561 * This structure is never routinely used by userspace, unlike md superblocks.
562 * Devices with this superblock should only ever be accessed via device-mapper.
564 #define DM_RAID_MAGIC 0x64526D44
565 struct dm_raid_superblock {
566 __le32 magic; /* "DmRd" */
567 __le32 features; /* Used to indicate possible future changes */
569 __le32 num_devices; /* Number of devices in this array. (Max 64) */
570 __le32 array_position; /* The position of this drive in the array */
572 __le64 events; /* Incremented by md when superblock updated */
573 __le64 failed_devices; /* Bit field of devices to indicate failures */
576 * This offset tracks the progress of the repair or replacement of
577 * an individual drive.
579 __le64 disk_recovery_offset;
582 * This offset tracks the progress of the initial array
583 * synchronisation/parity calculation.
585 __le64 array_resync_offset;
588 * RAID characteristics
592 __le32 stripe_sectors;
594 __u8 pad[452]; /* Round struct to 512 bytes. */
595 /* Always set to 0 when writing. */
598 static int read_disk_sb(struct md_rdev *rdev, int size)
600 BUG_ON(!rdev->sb_page);
605 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
606 DMERR("Failed to read device superblock");
615 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
617 struct md_rdev *r, *t;
618 uint64_t failed_devices;
619 struct dm_raid_superblock *sb;
621 sb = page_address(rdev->sb_page);
622 failed_devices = le64_to_cpu(sb->failed_devices);
624 rdev_for_each(r, t, mddev)
625 if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
626 failed_devices |= (1ULL << r->raid_disk);
628 memset(sb, 0, sizeof(*sb));
630 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
631 sb->features = cpu_to_le32(0); /* No features yet */
633 sb->num_devices = cpu_to_le32(mddev->raid_disks);
634 sb->array_position = cpu_to_le32(rdev->raid_disk);
636 sb->events = cpu_to_le64(mddev->events);
637 sb->failed_devices = cpu_to_le64(failed_devices);
639 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
640 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
642 sb->level = cpu_to_le32(mddev->level);
643 sb->layout = cpu_to_le32(mddev->layout);
644 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
650 * This function creates a superblock if one is not found on the device
651 * and will decide which superblock to use if there's a choice.
653 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
655 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
658 struct dm_raid_superblock *sb;
659 struct dm_raid_superblock *refsb;
660 uint64_t events_sb, events_refsb;
663 rdev->sb_size = sizeof(*sb);
665 ret = read_disk_sb(rdev, rdev->sb_size);
669 sb = page_address(rdev->sb_page);
672 * Two cases that we want to write new superblocks and rebuild:
673 * 1) New device (no matching magic number)
674 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
676 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
677 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
678 super_sync(rdev->mddev, rdev);
680 set_bit(FirstUse, &rdev->flags);
682 /* Force writing of superblocks to disk */
683 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
685 /* Any superblock is better than none, choose that if given */
686 return refdev ? 0 : 1;
692 events_sb = le64_to_cpu(sb->events);
694 refsb = page_address(refdev->sb_page);
695 events_refsb = le64_to_cpu(refsb->events);
697 return (events_sb > events_refsb) ? 1 : 0;
700 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
703 struct raid_set *rs = container_of(mddev, struct raid_set, md);
705 uint64_t failed_devices;
706 struct dm_raid_superblock *sb;
707 uint32_t new_devs = 0;
708 uint32_t rebuilds = 0;
709 struct md_rdev *r, *t;
710 struct dm_raid_superblock *sb2;
712 sb = page_address(rdev->sb_page);
713 events_sb = le64_to_cpu(sb->events);
714 failed_devices = le64_to_cpu(sb->failed_devices);
717 * Initialise to 1 if this is a new superblock.
719 mddev->events = events_sb ? : 1;
722 * Reshaping is not currently allowed
724 if ((le32_to_cpu(sb->level) != mddev->level) ||
725 (le32_to_cpu(sb->layout) != mddev->layout) ||
726 (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
727 DMERR("Reshaping arrays not yet supported.");
731 /* We can only change the number of devices in RAID1 right now */
732 if ((rs->raid_type->level != 1) &&
733 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
734 DMERR("Reshaping arrays not yet supported.");
738 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
739 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
742 * During load, we set FirstUse if a new superblock was written.
743 * There are two reasons we might not have a superblock:
744 * 1) The array is brand new - in which case, all of the
745 * devices must have their In_sync bit set. Also,
746 * recovery_cp must be 0, unless forced.
747 * 2) This is a new device being added to an old array
748 * and the new device needs to be rebuilt - in which
749 * case the In_sync bit will /not/ be set and
750 * recovery_cp must be MaxSector.
752 rdev_for_each(r, t, mddev) {
753 if (!test_bit(In_sync, &r->flags)) {
754 DMINFO("Device %d specified for rebuild: "
755 "Clearing superblock", r->raid_disk);
757 } else if (test_bit(FirstUse, &r->flags))
762 if (new_devs == mddev->raid_disks) {
763 DMINFO("Superblocks created for new array");
764 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
765 } else if (new_devs) {
766 DMERR("New device injected "
767 "into existing array without 'rebuild' "
768 "parameter specified");
771 } else if (new_devs) {
772 DMERR("'rebuild' devices cannot be "
773 "injected into an array with other first-time devices");
775 } else if (mddev->recovery_cp != MaxSector) {
776 DMERR("'rebuild' specified while array is not in-sync");
781 * Now we set the Faulty bit for those devices that are
782 * recorded in the superblock as failed.
784 rdev_for_each(r, t, mddev) {
787 sb2 = page_address(r->sb_page);
788 sb2->failed_devices = 0;
791 * Check for any device re-ordering.
793 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
794 role = le32_to_cpu(sb2->array_position);
795 if (role != r->raid_disk) {
796 if (rs->raid_type->level != 1) {
797 rs->ti->error = "Cannot change device "
798 "positions in RAID array";
801 DMINFO("RAID1 device #%d now at position #%d",
806 * Partial recovery is performed on
807 * returning failed devices.
809 if (failed_devices & (1 << role))
810 set_bit(Faulty, &r->flags);
817 static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
819 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
822 * If mddev->events is not set, we know we have not yet initialized
825 if (!mddev->events && super_init_validation(mddev, rdev))
828 mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
829 rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
830 if (!test_bit(FirstUse, &rdev->flags)) {
831 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
832 if (rdev->recovery_offset != MaxSector)
833 clear_bit(In_sync, &rdev->flags);
837 * If a device comes back, set it as not In_sync and no longer faulty.
839 if (test_bit(Faulty, &rdev->flags)) {
840 clear_bit(Faulty, &rdev->flags);
841 clear_bit(In_sync, &rdev->flags);
842 rdev->saved_raid_disk = rdev->raid_disk;
843 rdev->recovery_offset = 0;
846 clear_bit(FirstUse, &rdev->flags);
852 * Analyse superblocks and select the freshest.
854 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
857 struct md_rdev *rdev, *freshest, *tmp;
858 struct mddev *mddev = &rs->md;
861 rdev_for_each(rdev, tmp, mddev) {
862 if (!rdev->meta_bdev)
865 ret = super_load(rdev, freshest);
874 ti->error = "Failed to load superblock";
883 * Validation of the freshest device provides the source of
884 * validation for the remaining devices.
886 ti->error = "Unable to assemble array: Invalid superblocks";
887 if (super_validate(mddev, freshest))
890 rdev_for_each(rdev, tmp, mddev)
891 if ((rdev != freshest) && super_validate(mddev, rdev))
898 * Construct a RAID4/5/6 mapping:
900 * <raid_type> <#raid_params> <raid_params> \
901 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
903 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
904 * details on possible <raid_params>.
906 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
909 struct raid_type *rt;
910 unsigned long num_raid_params, num_raid_devs;
911 struct raid_set *rs = NULL;
913 /* Must have at least <raid_type> <#raid_params> */
915 ti->error = "Too few arguments";
920 rt = get_raid_type(argv[0]);
922 ti->error = "Unrecognised raid_type";
928 /* number of RAID parameters */
929 if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
930 ti->error = "Cannot understand number of RAID parameters";
936 /* Skip over RAID params for now and find out # of devices */
937 if (num_raid_params + 1 > argc) {
938 ti->error = "Arguments do not agree with counts given";
942 if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
943 (num_raid_devs >= INT_MAX)) {
944 ti->error = "Cannot understand number of raid devices";
948 rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
952 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
958 argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
959 argv += num_raid_params + 1;
961 if (argc != (num_raid_devs * 2)) {
962 ti->error = "Supplied RAID devices does not match the count given";
966 ret = dev_parms(rs, argv);
970 rs->md.sync_super = super_sync;
971 ret = analyse_superblocks(ti, rs);
975 INIT_WORK(&rs->md.event_work, do_table_event);
977 ti->num_flush_requests = 1;
979 mutex_lock(&rs->md.reconfig_mutex);
980 ret = md_run(&rs->md);
981 rs->md.in_sync = 0; /* Assume already marked dirty */
982 mutex_unlock(&rs->md.reconfig_mutex);
985 ti->error = "Fail to run raid array";
989 rs->callbacks.congested_fn = raid_is_congested;
990 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
992 mddev_suspend(&rs->md);
1001 static void raid_dtr(struct dm_target *ti)
1003 struct raid_set *rs = ti->private;
1005 list_del_init(&rs->callbacks.list);
1010 static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
1012 struct raid_set *rs = ti->private;
1013 struct mddev *mddev = &rs->md;
1015 mddev->pers->make_request(mddev, bio);
1017 return DM_MAPIO_SUBMITTED;
1020 static void raid_status(struct dm_target *ti, status_type_t type,
1021 char *result, unsigned maxlen)
1023 struct raid_set *rs = ti->private;
1024 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1026 int i, array_in_sync = 0;
1030 case STATUSTYPE_INFO:
1031 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1033 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1034 sync = rs->md.curr_resync_completed;
1036 sync = rs->md.recovery_cp;
1038 if (sync >= rs->md.resync_max_sectors) {
1040 sync = rs->md.resync_max_sectors;
1043 * The array may be doing an initial sync, or it may
1044 * be rebuilding individual components. If all the
1045 * devices are In_sync, then it is the array that is
1046 * being initialized.
1048 for (i = 0; i < rs->md.raid_disks; i++)
1049 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1053 * Status characters:
1054 * 'D' = Dead/Failed device
1055 * 'a' = Alive but not in-sync
1056 * 'A' = Alive and in-sync
1058 for (i = 0; i < rs->md.raid_disks; i++) {
1059 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1061 else if (!array_in_sync ||
1062 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1070 * The in-sync ratio shows the progress of:
1071 * - Initializing the array
1072 * - Rebuilding a subset of devices of the array
1073 * The user can distinguish between the two by referring
1074 * to the status characters.
1076 DMEMIT(" %llu/%llu",
1077 (unsigned long long) sync,
1078 (unsigned long long) rs->md.resync_max_sectors);
1081 case STATUSTYPE_TABLE:
1082 /* The string you would use to construct this array */
1083 for (i = 0; i < rs->md.raid_disks; i++) {
1084 if ((rs->print_flags & DMPF_REBUILD) &&
1085 rs->dev[i].data_dev &&
1086 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1087 raid_param_cnt += 2; /* for rebuilds */
1088 if (rs->dev[i].data_dev &&
1089 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1090 raid_param_cnt += 2;
1093 raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
1094 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
1097 DMEMIT("%s %u %u", rs->raid_type->name,
1098 raid_param_cnt, rs->md.chunk_sectors);
1100 if ((rs->print_flags & DMPF_SYNC) &&
1101 (rs->md.recovery_cp == MaxSector))
1103 if (rs->print_flags & DMPF_NOSYNC)
1106 for (i = 0; i < rs->md.raid_disks; i++)
1107 if ((rs->print_flags & DMPF_REBUILD) &&
1108 rs->dev[i].data_dev &&
1109 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1110 DMEMIT(" rebuild %u", i);
1112 if (rs->print_flags & DMPF_DAEMON_SLEEP)
1113 DMEMIT(" daemon_sleep %lu",
1114 rs->md.bitmap_info.daemon_sleep);
1116 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
1117 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1119 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
1120 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1122 for (i = 0; i < rs->md.raid_disks; i++)
1123 if (rs->dev[i].data_dev &&
1124 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1125 DMEMIT(" write_mostly %u", i);
1127 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
1128 DMEMIT(" max_write_behind %lu",
1129 rs->md.bitmap_info.max_write_behind);
1131 if (rs->print_flags & DMPF_STRIPE_CACHE) {
1132 struct r5conf *conf = rs->md.private;
1134 /* convert from kiB to sectors */
1135 DMEMIT(" stripe_cache %d",
1136 conf ? conf->max_nr_stripes * 2 : 0);
1139 if (rs->print_flags & DMPF_REGION_SIZE)
1140 DMEMIT(" region_size %lu",
1141 rs->md.bitmap_info.chunksize >> 9);
1143 DMEMIT(" %d", rs->md.raid_disks);
1144 for (i = 0; i < rs->md.raid_disks; i++) {
1145 if (rs->dev[i].meta_dev)
1146 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1150 if (rs->dev[i].data_dev)
1151 DMEMIT(" %s", rs->dev[i].data_dev->name);
1158 static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
1160 struct raid_set *rs = ti->private;
1164 for (i = 0; !ret && i < rs->md.raid_disks; i++)
1165 if (rs->dev[i].data_dev)
1167 rs->dev[i].data_dev,
1168 0, /* No offset on data devs */
1175 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1177 struct raid_set *rs = ti->private;
1178 unsigned chunk_size = rs->md.chunk_sectors << 9;
1179 struct r5conf *conf = rs->md.private;
1181 blk_limits_io_min(limits, chunk_size);
1182 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1185 static void raid_presuspend(struct dm_target *ti)
1187 struct raid_set *rs = ti->private;
1189 md_stop_writes(&rs->md);
1192 static void raid_postsuspend(struct dm_target *ti)
1194 struct raid_set *rs = ti->private;
1196 mddev_suspend(&rs->md);
1199 static void raid_resume(struct dm_target *ti)
1201 struct raid_set *rs = ti->private;
1203 bitmap_load(&rs->md);
1204 mddev_resume(&rs->md);
1207 static struct target_type raid_target = {
1209 .version = {1, 1, 1},
1210 .module = THIS_MODULE,
1214 .status = raid_status,
1215 .iterate_devices = raid_iterate_devices,
1216 .io_hints = raid_io_hints,
1217 .presuspend = raid_presuspend,
1218 .postsuspend = raid_postsuspend,
1219 .resume = raid_resume,
1222 static int __init dm_raid_init(void)
1224 return dm_register_target(&raid_target);
1227 static void __exit dm_raid_exit(void)
1229 dm_unregister_target(&raid_target);
1232 module_init(dm_raid_init);
1233 module_exit(dm_raid_exit);
1235 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1236 MODULE_ALIAS("dm-raid4");
1237 MODULE_ALIAS("dm-raid5");
1238 MODULE_ALIAS("dm-raid6");
1239 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1240 MODULE_LICENSE("GPL");