pandora: reserve CMA area for c64_tools
[pandora-kernel.git] / drivers / md / md.c
index f47f1f8..2d0544c 100644 (file)
@@ -345,6 +345,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
                bio_io_error(bio);
                return;
        }
+       if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+               bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+               return;
+       }
        smp_rmb(); /* Ensure implications of  'active' are visible */
        rcu_read_lock();
        if (mddev->suspended) {
@@ -392,6 +396,8 @@ void mddev_suspend(struct mddev *mddev)
        synchronize_rcu();
        wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
        mddev->pers->quiesce(mddev, 1);
+
+       del_timer_sync(&mddev->safemode_timer);
 }
 EXPORT_SYMBOL_GPL(mddev_suspend);
 
@@ -451,7 +457,7 @@ static void submit_flushes(struct work_struct *ws)
                        atomic_inc(&rdev->nr_pending);
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
-                       bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
+                       bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
                        bi->bi_end_io = md_end_flush;
                        bi->bi_private = rdev;
                        bi->bi_bdev = rdev->bdev;
@@ -1142,8 +1148,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
                        ret = 0;
        }
        rdev->sectors = rdev->sb_start;
-       /* Limit to 4TB as metadata cannot record more than that */
-       if (rdev->sectors >= (2ULL << 32))
+       /* Limit to 4TB as metadata cannot record more than that.
+        * (not needed for Linear and RAID0 as metadata doesn't
+        * record this size)
+        */
+       if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
                rdev->sectors = (2ULL << 32) - 2;
 
        if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1425,7 +1434,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
        /* Limit to 4TB as metadata cannot record more than that.
         * 4TB == 2^32 KB, or 2*2^32 sectors.
         */
-       if (num_sectors >= (2ULL << 32))
+       if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
                num_sectors = (2ULL << 32) - 2;
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
@@ -1579,8 +1588,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
                                             sector, count, 1) == 0)
                                return -EINVAL;
                }
-       } else if (sb->bblog_offset == 0)
-               rdev->badblocks.shift = -1;
+       } else if (sb->bblog_offset != 0)
+               rdev->badblocks.shift = 0;
 
        if (!refdev) {
                ret = 1;
@@ -1796,18 +1805,18 @@ retry:
                        memset(bbp, 0xff, PAGE_SIZE);
 
                        for (i = 0 ; i < bb->count ; i++) {
-                               u64 internal_bb = *p++;
+                               u64 internal_bb = p[i];
                                u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
                                                | BB_LEN(internal_bb));
-                               *bbp++ = cpu_to_le64(store_bb);
+                               bbp[i] = cpu_to_le64(store_bb);
                        }
+                       bb->changed = 0;
                        if (read_seqretry(&bb->lock, seq))
                                goto retry;
 
                        bb->sector = (rdev->sb_start +
                                      (int)le32_to_cpu(sb->bblog_offset));
                        bb->size = le16_to_cpu(sb->bblog_size);
-                       bb->changed = 0;
                }
        }
 
@@ -2362,6 +2371,7 @@ repeat:
                        clear_bit(MD_CHANGE_PENDING, &mddev->flags);
                        list_for_each_entry(rdev, &mddev->disks, same_set) {
                                if (rdev->badblocks.changed) {
+                                       rdev->badblocks.changed = 0;
                                        md_ack_all_badblocks(&rdev->badblocks);
                                        md_error(mddev, rdev);
                                }
@@ -2832,6 +2842,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
                } else if (!sectors)
                        sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
                                rdev->data_offset;
+               if (!my_mddev->pers->resize)
+                       /* Cannot change size for RAID0 or Linear etc */
+                       return -EINVAL;
        }
        if (sectors < my_mddev->dev_sectors)
                return -EINVAL; /* component must fit device */
@@ -3050,7 +3063,7 @@ int md_rdev_init(struct md_rdev *rdev)
         * be used - I wonder if that matters
         */
        rdev->badblocks.count = 0;
-       rdev->badblocks.shift = 0;
+       rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
        rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
        seqlock_init(&rdev->badblocks.lock);
        if (rdev->badblocks.page == NULL)
@@ -3122,9 +3135,6 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
                        goto abort_free;
                }
        }
-       if (super_format == -1)
-               /* hot-add for 0.90, or non-persistent: so no badblocks */
-               rdev->badblocks.shift = -1;
 
        return rdev;
 
@@ -3697,8 +3707,8 @@ array_state_show(struct mddev *mddev, char *page)
        return sprintf(page, "%s\n", array_states[st]);
 }
 
-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
-static int md_set_readonly(struct mddev * mddev, int is_open);
+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
 static int do_md_run(struct mddev * mddev);
 static int restart_array(struct mddev *mddev);
 
@@ -3714,14 +3724,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                /* stopping an active array */
                if (atomic_read(&mddev->openers) > 0)
                        return -EBUSY;
-               err = do_md_stop(mddev, 0, 0);
+               err = do_md_stop(mddev, 0, NULL);
                break;
        case inactive:
                /* stopping an active array */
                if (mddev->pers) {
                        if (atomic_read(&mddev->openers) > 0)
                                return -EBUSY;
-                       err = do_md_stop(mddev, 2, 0);
+                       err = do_md_stop(mddev, 2, NULL);
                } else
                        err = 0; /* already inactive */
                break;
@@ -3729,7 +3739,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                break; /* not supported yet */
        case readonly:
                if (mddev->pers)
-                       err = md_set_readonly(mddev, 0);
+                       err = md_set_readonly(mddev, NULL);
                else {
                        mddev->ro = 1;
                        set_disk_ro(mddev->gendisk, 1);
@@ -3739,7 +3749,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        case read_auto:
                if (mddev->pers) {
                        if (mddev->ro == 0)
-                               err = md_set_readonly(mddev, 0);
+                               err = md_set_readonly(mddev, NULL);
                        else if (mddev->ro == 1)
                                err = restart_array(mddev);
                        if (err == 0) {
@@ -5075,15 +5085,17 @@ void md_stop(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_stop);
 
-static int md_set_readonly(struct mddev *mddev, int is_open)
+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
 {
        int err = 0;
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open) {
+       if (atomic_read(&mddev->openers) > !!bdev) {
                printk("md: %s still in use.\n",mdname(mddev));
                err = -EBUSY;
                goto out;
        }
+       if (bdev)
+               sync_blockdev(bdev);
        if (mddev->pers) {
                __md_stop_writes(mddev);
 
@@ -5105,18 +5117,26 @@ out:
  *   0 - completely stop and dis-assemble array
  *   2 - stop but do not disassemble array
  */
-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
+static int do_md_stop(struct mddev * mddev, int mode,
+                     struct block_device *bdev)
 {
        struct gendisk *disk = mddev->gendisk;
        struct md_rdev *rdev;
 
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open ||
+       if (atomic_read(&mddev->openers) > !!bdev ||
            mddev->sysfs_active) {
                printk("md: %s still in use.\n",mdname(mddev));
                mutex_unlock(&mddev->open_mutex);
                return -EBUSY;
        }
+       if (bdev)
+               /* It is possible IO was issued on some other
+                * open file which was closed before we took ->open_mutex.
+                * As that was not the last close __blkdev_put will not
+                * have called sync_blockdev, so we must.
+                */
+               sync_blockdev(bdev);
 
        if (mddev->pers) {
                if (mddev->ro)
@@ -5190,7 +5210,7 @@ static void autorun_array(struct mddev *mddev)
        err = do_md_run(mddev);
        if (err) {
                printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
-               do_md_stop(mddev, 0, 0);
+               do_md_stop(mddev, 0, NULL);
        }
 }
 
@@ -6181,11 +6201,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        goto done_unlock;
 
                case STOP_ARRAY:
-                       err = do_md_stop(mddev, 0, 1);
+                       err = do_md_stop(mddev, 0, bdev);
                        goto done_unlock;
 
                case STOP_ARRAY_RO:
-                       err = md_set_readonly(mddev, 1);
+                       err = md_set_readonly(mddev, bdev);
                        goto done_unlock;
 
                case BLKROSET:
@@ -7634,9 +7654,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
                   sector_t *first_bad, int *bad_sectors)
 {
        int hi;
-       int lo = 0;
+       int lo;
        u64 *p = bb->page;
-       int rv = 0;
+       int rv;
        sector_t target = s + sectors;
        unsigned seq;
 
@@ -7651,7 +7671,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
 
 retry:
        seq = read_seqbegin(&bb->lock);
-
+       lo = 0;
+       rv = 0;
        hi = bb->count;
 
        /* Binary search between lo and hi for 'target'
@@ -8097,30 +8118,24 @@ static int md_notify_reboot(struct notifier_block *this,
        struct mddev *mddev;
        int need_delay = 0;
 
-       if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
-
-               printk(KERN_INFO "md: stopping all md devices.\n");
-
-               for_each_mddev(mddev, tmp) {
-                       if (mddev_trylock(mddev)) {
-                               /* Force a switch to readonly even array
-                                * appears to still be in use.  Hence
-                                * the '100'.
-                                */
-                               md_set_readonly(mddev, 100);
-                               mddev_unlock(mddev);
-                       }
-                       need_delay = 1;
+       for_each_mddev(mddev, tmp) {
+               if (mddev_trylock(mddev)) {
+                       if (mddev->pers)
+                               __md_stop_writes(mddev);
+                       mddev->safemode = 2;
+                       mddev_unlock(mddev);
                }
-               /*
-                * certain more exotic SCSI devices are known to be
-                * volatile wrt too early system reboots. While the
-                * right place to handle this issue is the given
-                * driver, we do want to have a safe RAID driver ...
-                */
-               if (need_delay)
-                       mdelay(1000*1);
+               need_delay = 1;
        }
+       /*
+        * certain more exotic SCSI devices are known to be
+        * volatile wrt too early system reboots. While the
+        * right place to handle this issue is the given
+        * driver, we do want to have a safe RAID driver ...
+        */
+       if (need_delay)
+               mdelay(1000*1);
+
        return NOTIFY_DONE;
 }