Linux 3.2.82
[pandora-kernel.git] / drivers / md / raid5.c
index 297e260..6056ee7 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -542,6 +544,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -1546,7 +1554,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
 
        conf->slab_cache = sc;
        conf->active_name = 1-conf->active_name;
-       conf->pool_size = newsize;
+       if (!err)
+               conf->pool_size = newsize;
        return err;
 }
 
@@ -1685,6 +1694,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
        }
 
        if (!uptodate) {
+               set_bit(STRIPE_DEGRADED, &sh->state);
                set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
                set_bit(R5_WriteError, &sh->dev[i].flags);
        } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
@@ -3036,6 +3046,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                if (dev->written)
                        s->written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = NULL;
                if (rdev) {
                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
                                             &first_bad, &bad_sectors);
@@ -3063,11 +3075,17 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else if (!test_bit(Faulty, &rdev->flags)) {
+               else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                        /* in sync if before recovery_offset */
-                       if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
-                               set_bit(R5_Insync, &dev->flags);
-               }
+                       set_bit(R5_Insync, &dev->flags);
+               else if (test_bit(R5_UPTODATE, &dev->flags) &&
+                        test_bit(R5_Expanded, &dev->flags))
+                       /* If we've reshaped into here, we assume it is Insync.
+                        * We will shortly update recovery_offset to make
+                        * it official.
+                        */
+                       set_bit(R5_Insync, &dev->flags);
+
                if (test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
@@ -3223,6 +3241,8 @@ static void handle_stripe(struct stripe_head *sh)
                                set_bit(R5_Wantwrite, &dev->flags);
                                if (prexor)
                                        continue;
+                               if (s.failed > 1)
+                                       continue;
                                if (!test_bit(R5_Insync, &dev->flags) ||
                                    ((i == sh->pd_idx || i == sh->qd_idx)  &&
                                     s.failed == 0))
@@ -3613,7 +3633,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3624,6 +3643,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
@@ -4431,23 +4453,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
        return sectors * (raid_disks - conf->max_degraded);
 }
 
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+       safe_put_page(percpu->spare_page);
+       kfree(percpu->scribble);
+       percpu->spare_page = NULL;
+       percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+       if (conf->level == 6 && !percpu->spare_page)
+               percpu->spare_page = alloc_page(GFP_KERNEL);
+       if (!percpu->scribble)
+               percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+       if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+               free_scratch_buffer(conf, percpu);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 static void raid5_free_percpu(struct r5conf *conf)
 {
-       struct raid5_percpu *percpu;
        unsigned long cpu;
 
        if (!conf->percpu)
                return;
 
-       get_online_cpus();
-       for_each_possible_cpu(cpu) {
-               percpu = per_cpu_ptr(conf->percpu, cpu);
-               safe_put_page(percpu->spare_page);
-               kfree(percpu->scribble);
-       }
 #ifdef CONFIG_HOTPLUG_CPU
        unregister_cpu_notifier(&conf->cpu_notify);
 #endif
+
+       get_online_cpus();
+       for_each_possible_cpu(cpu)
+               free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
        put_online_cpus();
 
        free_percpu(conf->percpu);
@@ -4473,15 +4515,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               if (conf->level == 6 && !percpu->spare_page)
-                       percpu->spare_page = alloc_page(GFP_KERNEL);
-               if (!percpu->scribble)
-                       percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
-               if (!percpu->scribble ||
-                   (conf->level == 6 && !percpu->spare_page)) {
-                       safe_put_page(percpu->spare_page);
-                       kfree(percpu->scribble);
+               if (alloc_scratch_buffer(conf, percpu)) {
                        pr_err("%s: failed memory allocation for cpu%ld\n",
                               __func__, cpu);
                        return notifier_from_errno(-ENOMEM);
@@ -4489,10 +4523,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
                break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
-               safe_put_page(percpu->spare_page);
-               kfree(percpu->scribble);
-               percpu->spare_page = NULL;
-               percpu->scribble = NULL;
+               free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
                break;
        default:
                break;
@@ -4504,40 +4535,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
 static int raid5_alloc_percpu(struct r5conf *conf)
 {
        unsigned long cpu;
-       struct page *spare_page;
-       struct raid5_percpu __percpu *allcpus;
-       void *scribble;
-       int err;
+       int err = 0;
 
-       allcpus = alloc_percpu(struct raid5_percpu);
-       if (!allcpus)
+       conf->percpu = alloc_percpu(struct raid5_percpu);
+       if (!conf->percpu)
                return -ENOMEM;
-       conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+       conf->cpu_notify.notifier_call = raid456_cpu_notify;
+       conf->cpu_notify.priority = 0;
+       err = register_cpu_notifier(&conf->cpu_notify);
+       if (err)
+               return err;
+#endif
 
        get_online_cpus();
-       err = 0;
        for_each_present_cpu(cpu) {
-               if (conf->level == 6) {
-                       spare_page = alloc_page(GFP_KERNEL);
-                       if (!spare_page) {
-                               err = -ENOMEM;
-                               break;
-                       }
-                       per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
-               }
-               scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-               if (!scribble) {
-                       err = -ENOMEM;
+               err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+               if (err) {
+                       pr_err("%s: failed memory allocation for cpu%ld\n",
+                              __func__, cpu);
                        break;
                }
-               per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
        }
-#ifdef CONFIG_HOTPLUG_CPU
-       conf->cpu_notify.notifier_call = raid456_cpu_notify;
-       conf->cpu_notify.priority = 0;
-       if (err == 0)
-               err = register_cpu_notifier(&conf->cpu_notify);
-#endif
        put_online_cpus();
 
        return err;