Merge linux-block/for-4.3/core into md/for-linux
authorNeilBrown <neilb@suse.com>
Sat, 5 Sep 2015 09:07:04 +0000 (11:07 +0200)
committerNeilBrown <neilb@suse.com>
Sat, 5 Sep 2015 09:08:32 +0000 (11:08 +0200)
There were a few conflicts that are fairly easy to resolve.

Signed-off-by: NeilBrown <neilb@suse.com>
1  2 
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c

diff --cc drivers/md/md.c
Simple merge
@@@ -203,9 -188,10 +203,6 @@@ static int create_strip_zones(struct md
                }
                dev[j] = rdev1;
  
-               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
-                       conf->has_merge_bvec = 1;
 -              if (mddev->queue)
 -                      disk_stack_limits(mddev->gendisk, rdev1->bdev,
 -                                        rdev1->data_offset << 9);
--
                if (!smallest || (rdev1->sectors < smallest->sectors))
                        smallest = rdev1;
                cnt++;
Simple merge
@@@ -2750,9 -2633,7 +2635,8 @@@ static void handle_write_completed(stru
                                        r10_bio->devs[m].addr,
                                        r10_bio->sectors, 0);
                                rdev_dec_pending(rdev, conf->mddev);
-                       } else if (bio != NULL &&
-                                  !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+                       } else if (bio != NULL && bio->bi_error) {
 +                              fail = true;
                                if (!narrow_write_error(r10_bio, m)) {
                                        md_error(conf->mddev, rdev);
                                        set_bit(R10BIO_Degraded,
@@@ -230,7 -233,8 +230,7 @@@ static void return_io(struct bio_list *
                bi->bi_iter.bi_size = 0;
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);
-               bio_endio(bi, 0);
+               bio_endio(bi);
 -              bi = return_bi;
        }
  }
  
@@@ -3107,10 -3110,12 +3105,11 @@@ handle_failed_stripe(struct r5conf *con
                while (bi && bi->bi_iter.bi_sector <
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
-                       clear_bit(BIO_UPTODATE, &bi->bi_flags);
+                       bi->bi_error = -EIO;
                        if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
 -                              bi->bi_next = *return_bi;
 -                              *return_bi = bi;
 +                              bio_list_add(return_bi, bi);
                        }
                        bi = nextbi;
                }
                while (bi && bi->bi_iter.bi_sector <
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
-                       clear_bit(BIO_UPTODATE, &bi->bi_flags);
+                       bi->bi_error = -EIO;
                        if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
 -                              bi->bi_next = *return_bi;
 -                              *return_bi = bi;
 +                              bio_list_add(return_bi, bi);
                        }
                        bi = bi2;
                }
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
-                               clear_bit(BIO_UPTODATE, &bi->bi_flags);
+                               bi->bi_error = -EIO;
 -                              if (!raid5_dec_bi_active_stripes(bi)) {
 -                                      bi->bi_next = *return_bi;
 -                                      *return_bi = bi;
 -                              }
 +                              if (!raid5_dec_bi_active_stripes(bi))
 +                                      bio_list_add(return_bi, bi);
                                bi = nextbi;
                        }
                }
@@@ -4667,43 -4670,14 +4668,14 @@@ static int raid5_congested(struct mdde
        return 0;
  }
  
- /* We want read requests to align with chunks where possible,
-  * but write requests don't need to.
-  */
- static int raid5_mergeable_bvec(struct mddev *mddev,
-                               struct bvec_merge_data *bvm,
-                               struct bio_vec *biovec)
- {
-       struct r5conf *conf = mddev->private;
-       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-       int max;
-       unsigned int chunk_sectors;
-       unsigned int bio_sectors = bvm->bi_size >> 9;
-       /*
-        * always allow writes to be mergeable, read as well if array
-        * is degraded as we'll go through stripe cache anyway.
-        */
-       if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
-               return biovec->bv_len;
-       chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
-       max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
-       if (max < 0) max = 0;
-       if (max <= biovec->bv_len && bio_sectors == 0)
-               return biovec->bv_len;
-       else
-               return max;
- }
  static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
  {
 +      struct r5conf *conf = mddev->private;
        sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
 -      unsigned int chunk_sectors = mddev->chunk_sectors;
 +      unsigned int chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
  
 -      if (mddev->new_chunk_sectors < mddev->chunk_sectors)
 -              chunk_sectors = mddev->new_chunk_sectors;
 +      chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
        return  chunk_sectors >=
                ((sector & (chunk_sectors - 1)) + bio_sectors);
  }