return 0;
}
-static void raid5_build_block(struct stripe_head *sh, int i);
+static void raid5_build_block(struct stripe_head *sh, int i, int previous);
static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
struct stripe_head *sh);
BUG();
}
dev->flags = 0;
- raid5_build_block(sh, i);
+ raid5_build_block(sh, i, previous);
}
insert_hash(conf, sh);
}
init_stripe(sh, sector, previous);
} else {
if (atomic_read(&sh->count)) {
- BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(!list_empty(&sh->lru)
+ && !test_bit(STRIPE_EXPANDING, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
return 0;
}
-#ifdef CONFIG_MD_RAID5_RESHAPE
static int resize_stripes(raid5_conf_t *conf, int newsize)
{
/* Make all the stripes able to hold 'newsize' devices.
conf->pool_size = newsize;
return err;
}
-#endif
static int drop_one_stripe(raid5_conf_t *conf)
{
}
-static sector_t compute_blocknr(struct stripe_head *sh, int i);
+static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
-static void raid5_build_block(struct stripe_head *sh, int i)
+static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
dev->req.bi_private = sh;
dev->flags = 0;
- dev->sector = compute_blocknr(sh, i);
+ dev->sector = compute_blocknr(sh, i, previous);
}
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
int pd_idx, qd_idx;
int ddf_layout = 0;
sector_t new_sector;
- int sectors_per_chunk = conf->chunk_size >> 9;
+ int algorithm = previous ? conf->prev_algo
+ : conf->algorithm;
+ int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
+ : (conf->chunk_size >> 9);
int raid_disks = previous ? conf->previous_raid_disks
: conf->raid_disks;
int data_disks = raid_disks - conf->max_degraded;
pd_idx = data_disks;
break;
case 5:
- switch (conf->algorithm) {
+ switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
pd_idx = data_disks - stripe % raid_disks;
if (*dd_idx >= pd_idx)
break;
default:
printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- conf->algorithm);
+ algorithm);
BUG();
}
break;
case 6:
- switch (conf->algorithm) {
+ switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
pd_idx = raid_disks - 1 - (stripe % raid_disks);
qd_idx = pd_idx + 1;
default:
printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- conf->algorithm);
+ algorithm);
BUG();
}
break;
}
-static sector_t compute_blocknr(struct stripe_head *sh, int i)
+static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
{
raid5_conf_t *conf = sh->raid_conf;
int raid_disks = sh->disks;
int data_disks = raid_disks - conf->max_degraded;
sector_t new_sector = sh->sector, check;
- int sectors_per_chunk = conf->chunk_size >> 9;
+ int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
+ : (conf->chunk_size >> 9);
+ int algorithm = previous ? conf->prev_algo
+ : conf->algorithm;
sector_t stripe;
int chunk_offset;
int chunk_number, dummy1, dd_idx = i;
switch(conf->level) {
case 4: break;
case 5:
- switch (conf->algorithm) {
+ switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
case ALGORITHM_RIGHT_ASYMMETRIC:
if (i > sh->pd_idx)
break;
default:
printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- conf->algorithm);
+ algorithm);
BUG();
}
break;
case 6:
if (i == sh->qd_idx)
return 0; /* It is the Q disk */
- switch (conf->algorithm) {
+ switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
case ALGORITHM_RIGHT_ASYMMETRIC:
case ALGORITHM_ROTATING_ZERO_RESTART:
break;
default:
printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- conf->algorithm);
+ algorithm);
BUG();
}
break;
r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
check = raid5_compute_sector(conf, r_sector,
- (raid_disks != conf->raid_disks),
- &dummy1, &sh2);
+ previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
printk(KERN_ERR "compute_blocknr: map not correct\n");
static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
struct stripe_head *sh)
{
- int sectors_per_chunk = conf->chunk_size >> 9;
+ int sectors_per_chunk =
+ previous ? (conf->prev_chunk >> 9)
+ : (conf->chunk_size >> 9);
int dd_idx;
int chunk_offset = sector_div(stripe, sectors_per_chunk);
int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
int dd_idx, j;
struct stripe_head *sh2;
- sector_t bn = compute_blocknr(sh, i);
+ sector_t bn = compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
sh2 = get_active_stripe(conf, s, 0, 1);
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
+ struct stripe_head *sh2
+ = get_active_stripe(conf, sh->sector, 1, 1);
+ if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
+ /* sh cannot be written until sh2 has been read.
+ * so arrange for sh to be delayed a little
+ */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
+ &sh2->state))
+ atomic_inc(&conf->preread_active_stripes);
+ release_stripe(sh2);
+ goto unlock;
+ }
+ if (sh2)
+ release_stripe(sh2);
+
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
for (i = conf->raid_disks; i--; ) {
}
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
+ struct stripe_head *sh2
+ = get_active_stripe(conf, sh->sector, 1, 1);
+ if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
+ /* sh cannot be written until sh2 has been read.
+ * so arrange for sh to be delayed a little
+ */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
+ &sh2->state))
+ atomic_inc(&conf->preread_active_stripes);
+ release_stripe(sh2);
+ goto unlock;
+ }
+ if (sh2)
+ release_stripe(sh2);
+
/* Need to write out all blocks after computing P&Q */
sh->disks = conf->raid_disks;
stripe_set_idx(sh->sector, conf, 0, sh);
if ((bvm->bi_rw & 1) == WRITE)
return biovec->bv_len; /* always allow writes to be mergeable */
+ if (mddev->new_chunk < mddev->chunk_size)
+ chunk_sectors = mddev->new_chunk >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
if (max < 0) max = 0;
if (max <= biovec->bv_len && bio_sectors == 0)
unsigned int chunk_sectors = mddev->chunk_size >> 9;
unsigned int bio_sectors = bio->bi_size >> 9;
+ if (mddev->new_chunk < mddev->chunk_size)
+ chunk_sectors = mddev->new_chunk >> 9;
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
}
retry:
previous = 0;
+ disks = conf->raid_disks;
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
- if (likely(conf->reshape_progress == MaxSector))
- disks = conf->raid_disks;
- else {
+ if (unlikely(conf->reshape_progress != MaxSector)) {
/* spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
* possible to see a half-updated value
* to check again.
*/
spin_lock_irq(&conf->device_lock);
- disks = conf->raid_disks;
if (mddev->delta_disks < 0
? logical_sector < conf->reshape_progress
: logical_sector >= conf->reshape_progress) {
sh = get_active_stripe(conf, new_sector, previous,
(bi->bi_rw&RWA_MASK));
if (sh) {
- if (unlikely(conf->reshape_progress != MaxSector)) {
+ if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
* stripe, so we must do the range check again.
* Expansion could still move past after this
*/
int must_retry = 0;
spin_lock_irq(&conf->device_lock);
- if ((mddev->delta_disks < 0
- ? logical_sector >= conf->reshape_progress
- : logical_sector < conf->reshape_progress)
- && previous)
+ if (mddev->delta_disks < 0
+ ? logical_sector >= conf->reshape_progress
+ : logical_sector < conf->reshape_progress)
/* mismatch, need to try again */
must_retry = 1;
spin_unlock_irq(&conf->device_lock);
int new_data_disks = conf->raid_disks - conf->max_degraded;
int i;
int dd_idx;
- sector_t writepos, safepos, gap;
+ sector_t writepos, readpos, safepos;
sector_t stripe_addr;
+ int reshape_sectors;
+ struct list_head stripes;
if (sector_nr == 0) {
/* If restarting in the middle, skip the initial sectors */
}
}
+ /* We need to process a full chunk at a time.
+ * If old and new chunk sizes differ, we need to process the
+ * largest of these
+ */
+ if (mddev->new_chunk > mddev->chunk_size)
+ reshape_sectors = mddev->new_chunk / 512;
+ else
+ reshape_sectors = mddev->chunk_size / 512;
+
/* we update the metadata when there is more than 3Meg
* in the block range (that is rather arbitrary, should
* probably be time based) or when the data about to be
*/
writepos = conf->reshape_progress;
sector_div(writepos, new_data_disks);
+ readpos = conf->reshape_progress;
+ sector_div(readpos, data_disks);
safepos = conf->reshape_safe;
sector_div(safepos, data_disks);
if (mddev->delta_disks < 0) {
- writepos -= conf->chunk_size/512;
- safepos += conf->chunk_size/512;
- gap = conf->reshape_safe - conf->reshape_progress;
+ writepos -= reshape_sectors;
+ readpos += reshape_sectors;
+ safepos += reshape_sectors;
} else {
- writepos += conf->chunk_size/512;
- safepos -= conf->chunk_size/512;
- gap = conf->reshape_progress - conf->reshape_safe;
+ writepos += reshape_sectors;
+ readpos -= reshape_sectors;
+ safepos -= reshape_sectors;
}
+ /* 'writepos' is the most advanced device address we might write.
+ * 'readpos' is the least advanced device address we might read.
+ * 'safepos' is the least address recorded in the metadata as having
+ * been reshaped.
+ * If 'readpos' is behind 'writepos', then there is no way that we can
+ * ensure safety in the face of a crash - that must be done by userspace
+ * making a backup of the data. So in that case there is no particular
+ * rush to update metadata.
+ * Otherwise if 'safepos' is behind 'writepos', then we really need to
+ * update the metadata to advance 'safepos' to match 'readpos' so that
+ * we can be safe in the event of a crash.
+ * So we insist on updating metadata if safepos is behind writepos and
+ * readpos is beyond writepos.
+ * In any case, update the metadata every 10 seconds.
+ * Maybe that number should be configurable, but I'm not sure it is
+ * worth it.... maybe it could be a multiple of safemode_delay???
+ */
if ((mddev->delta_disks < 0
- ? writepos < safepos
- : writepos > safepos) ||
- gap > (new_data_disks)*3000*2 /*3Meg*/) {
+ ? (safepos > writepos && readpos < writepos)
+ : (safepos < writepos && readpos > writepos)) ||
+ time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes)==0);
mddev->reshape_position = conf->reshape_progress;
+ conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
BUG_ON(conf->reshape_progress == 0);
stripe_addr = writepos;
BUG_ON((mddev->dev_sectors &
- ~((sector_t)mddev->chunk_size / 512 - 1))
- - (conf->chunk_size / 512) - stripe_addr
+ ~((sector_t)reshape_sectors - 1))
+ - reshape_sectors - stripe_addr
!= sector_nr);
} else {
- BUG_ON(writepos != sector_nr + conf->chunk_size / 512);
+ BUG_ON(writepos != sector_nr + reshape_sectors);
stripe_addr = sector_nr;
}
- for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
+ INIT_LIST_HEAD(&stripes);
+ for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
int skipped = 0;
sh = get_active_stripe(conf, stripe_addr+i, 0, 0);
if (conf->level == 6 &&
j == sh->qd_idx)
continue;
- s = compute_blocknr(sh, j);
+ s = compute_blocknr(sh, j, 0);
if (s < raid5_size(mddev, 0, 0)) {
skipped = 1;
continue;
set_bit(STRIPE_EXPAND_READY, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
- release_stripe(sh);
+ list_add(&sh->lru, &stripes);
}
spin_lock_irq(&conf->device_lock);
if (mddev->delta_disks < 0)
- conf->reshape_progress -= i * new_data_disks;
+ conf->reshape_progress -= reshape_sectors * new_data_disks;
else
- conf->reshape_progress += i * new_data_disks;
+ conf->reshape_progress += reshape_sectors * new_data_disks;
spin_unlock_irq(&conf->device_lock);
/* Ok, those stripe are ready. We can start scheduling
* reads on the source stripes.
release_stripe(sh);
first_sector += STRIPE_SECTORS;
}
+ /* Now that the sources are clearly marked, we can release
+ * the destination stripes
+ */
+ while (!list_empty(&stripes)) {
+ sh = list_entry(stripes.next, struct stripe_head, lru);
+ list_del_init(&sh->lru);
+ release_stripe(sh);
+ }
/* If this takes us to the resync_max point where we have to pause,
* then we need to write out the superblock.
*/
- sector_nr += conf->chunk_size>>9;
+ sector_nr += reshape_sectors;
if (sector_nr >= mddev->resync_max) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes) == 0);
mddev->reshape_position = conf->reshape_progress;
+ conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
}
- return conf->chunk_size>>9;
+ return reshape_sectors;
}
/* FIXME go_faster isn't used */
}
sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
+ sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
return sectors * (raid_disks - conf->max_degraded);
}
conf->algorithm = mddev->new_layout;
conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position;
+ if (conf->reshape_progress != MaxSector) {
+ conf->prev_chunk = mddev->chunk_size;
+ conf->prev_algo = mddev->layout;
+ }
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
int old_disks;
int max_degraded = (mddev->level == 6 ? 2 : 1);
- if (mddev->new_level != mddev->level ||
- mddev->new_layout != mddev->layout ||
- mddev->new_chunk != mddev->chunk_size) {
+ if (mddev->new_level != mddev->level) {
printk(KERN_ERR "raid5: %s: unsupported reshape "
"required - aborting.\n",
mdname(mddev));
* geometry.
*/
here_new = mddev->reshape_position;
- if (sector_div(here_new, (mddev->chunk_size>>9)*
+ if (sector_div(here_new, (mddev->new_chunk>>9)*
(mddev->raid_disks - max_degraded))) {
printk(KERN_ERR "raid5: reshape_position not "
"on a stripe boundary\n");
if (mddev->degraded == 0)
printk("raid5: raid level %d set %s active with %d out of %d"
- " devices, algorithm %d\n", conf->level, mdname(mddev),
- mddev->raid_disks-mddev->degraded, mddev->raid_disks,
- conf->algorithm);
+ " devices, algorithm %d\n", conf->level, mdname(mddev),
+ mddev->raid_disks-mddev->degraded, mddev->raid_disks,
+ mddev->new_layout);
else
printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
" out of %d devices, algorithm %d\n", conf->level,
mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, conf->algorithm);
+ mddev->raid_disks, mddev->new_layout);
print_raid5_conf(conf);
return 0;
}
-#ifdef CONFIG_MD_RAID5_RESHAPE
static int raid5_check_reshape(mddev_t *mddev)
{
raid5_conf_t *conf = mddev_to_conf(mddev);
- if (mddev->delta_disks == 0)
- return 0; /* nothing to do */
+ if (mddev->delta_disks == 0 &&
+ mddev->new_layout == mddev->layout &&
+ mddev->new_chunk == mddev->chunk_size)
+ return -EINVAL; /* nothing to do */
if (mddev->bitmap)
/* Cannot grow a bitmap yet */
return -EBUSY;
if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
(mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
- (mddev->chunk_size / STRIPE_SIZE)*4);
+ (max(mddev->chunk_size, mddev->new_chunk)
+ / STRIPE_SIZE)*4);
return -ENOSPC;
}
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->raid_disks += mddev->delta_disks;
+ conf->prev_chunk = conf->chunk_size;
+ conf->chunk_size = mddev->new_chunk;
+ conf->prev_algo = conf->algorithm;
+ conf->algorithm = mddev->new_layout;
if (mddev->delta_disks < 0)
conf->reshape_progress = raid5_size(mddev, 0, 0);
else
spin_unlock_irq(&conf->device_lock);
return -EAGAIN;
}
+ conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
md_new_event(mddev);
return 0;
}
-#endif
/* This is called from the reshape thread and should make any
* changes needed in 'conf'
conf->previous_raid_disks = conf->raid_disks;
conf->reshape_progress = MaxSector;
spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_for_overlap);
/* read-ahead size must cover two whole stripes, which is
* 2 * (datadisks) * chunksize where 'n' is the number of raid devices
static void raid5_finish_reshape(mddev_t *mddev)
{
struct block_device *bdev;
+ raid5_conf_t *conf = mddev_to_conf(mddev);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
}
} else {
int d;
- raid5_conf_t *conf = mddev_to_conf(mddev);
mddev->degraded = conf->raid_disks;
for (d = 0; d < conf->raid_disks ; d++)
if (conf->disks[d].rdev &&
d++)
raid5_remove_disk(mddev, d);
}
+ mddev->layout = conf->algorithm;
+ mddev->chunk_size = conf->chunk_size;
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
}
static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
{
- /* Currently the layout and chunk size can only be changed
- * for a 2-drive raid array, as in that case no data shuffling
- * is required.
- * Later we might validate these and set new_* so a reshape
- * can complete the change.
+ /* For a 2-drive array, the layout and chunk size can be changed
+ * immediately as not restriping is needed.
+ * For larger arrays we record the new value - after validation
+ * to be used by a reshape pass.
*/
raid5_conf_t *conf = mddev_to_conf(mddev);
/* They look valid */
- if (mddev->raid_disks != 2)
- return -EINVAL;
+ if (mddev->raid_disks == 2) {
- if (new_layout >= 0) {
- conf->algorithm = new_layout;
- mddev->layout = mddev->new_layout = new_layout;
+ if (new_layout >= 0) {
+ conf->algorithm = new_layout;
+ mddev->layout = mddev->new_layout = new_layout;
+ }
+ if (new_chunk > 0) {
+ conf->chunk_size = new_chunk;
+ mddev->chunk_size = mddev->new_chunk = new_chunk;
+ }
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ } else {
+ if (new_layout >= 0)
+ mddev->new_layout = new_layout;
+ if (new_chunk > 0)
+ mddev->new_chunk = new_chunk;
}
+ return 0;
+}
+
+static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
+{
+ if (new_layout >= 0 && !algorithm_valid_raid6(new_layout))
+ return -EINVAL;
if (new_chunk > 0) {
- conf->chunk_size = new_chunk;
- mddev->chunk_size = mddev->new_chunk = new_chunk;
+ if (new_chunk & (new_chunk-1))
+ /* not a power of 2 */
+ return -EINVAL;
+ if (new_chunk < PAGE_SIZE)
+ return -EINVAL;
+ if (mddev->array_sectors & ((new_chunk>>9)-1))
+ /* not factor of array size */
+ return -EINVAL;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- md_wakeup_thread(mddev->thread);
+
+ /* They look valid */
+
+ if (new_layout >= 0)
+ mddev->new_layout = new_layout;
+ if (new_chunk > 0)
+ mddev->new_chunk = new_chunk;
+
return 0;
}
.sync_request = sync_request,
.resize = raid5_resize,
.size = raid5_size,
-#ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
-#endif
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
+ .reconfig = raid6_reconfig,
};
static struct mdk_personality raid5_personality =
{
.sync_request = sync_request,
.resize = raid5_resize,
.size = raid5_size,
-#ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
-#endif
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.reconfig = raid5_reconfig,
.sync_request = sync_request,
.resize = raid5_resize,
.size = raid5_size,
-#ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
-#endif
.quiesce = raid5_quiesce,
};