swapfile: swap allocation use discard
[pandora-kernel.git] / mm / swapfile.c
index 90cb67a..ca75b9e 100644 (file)
@@ -35,6 +35,7 @@
 
 static DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
+long nr_swap_pages;
 long total_swap_pages;
 static int swap_overflow;
 static int least_priority;
@@ -83,15 +84,95 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
        up_read(&swap_unplug_sem);
 }
 
+/*
+ * swapon tell device that all the old swap contents can be discarded,
+ * to allow the swap device to optimize its wear-levelling.
+ */
+static int discard_swap(struct swap_info_struct *si)
+{
+       struct swap_extent *se;
+       int err = 0;
+
+       list_for_each_entry(se, &si->extent_list, list) {
+               sector_t start_block = se->start_block << (PAGE_SHIFT - 9);
+               pgoff_t nr_blocks = se->nr_pages << (PAGE_SHIFT - 9);
+
+               if (se->start_page == 0) {
+                       /* Do not discard the swap header page! */
+                       start_block += 1 << (PAGE_SHIFT - 9);
+                       nr_blocks -= 1 << (PAGE_SHIFT - 9);
+                       if (!nr_blocks)
+                               continue;
+               }
+
+               err = blkdev_issue_discard(si->bdev, start_block,
+                                               nr_blocks, GFP_KERNEL);
+               if (err)
+                       break;
+
+               cond_resched();
+       }
+       return err;             /* That will often be -EOPNOTSUPP */
+}
+
+/*
+ * swap allocation tell device that a cluster of swap can now be discarded,
+ * to allow the swap device to optimize its wear-levelling.
+ */
+static void discard_swap_cluster(struct swap_info_struct *si,
+                                pgoff_t start_page, pgoff_t nr_pages)
+{
+       struct swap_extent *se = si->curr_swap_extent;
+       int found_extent = 0;
+
+       while (nr_pages) {
+               struct list_head *lh;
+
+               if (se->start_page <= start_page &&
+                   start_page < se->start_page + se->nr_pages) {
+                       pgoff_t offset = start_page - se->start_page;
+                       sector_t start_block = se->start_block + offset;
+                       pgoff_t nr_blocks = se->nr_pages - offset;
+
+                       if (nr_blocks > nr_pages)
+                               nr_blocks = nr_pages;
+                       start_page += nr_blocks;
+                       nr_pages -= nr_blocks;
+
+                       if (!found_extent++)
+                               si->curr_swap_extent = se;
+
+                       start_block <<= PAGE_SHIFT - 9;
+                       nr_blocks <<= PAGE_SHIFT - 9;
+                       if (blkdev_issue_discard(si->bdev, start_block,
+                                                       nr_blocks, GFP_NOIO))
+                               break;
+               }
+
+               lh = se->list.next;
+               if (lh == &si->extent_list)
+                       lh = lh->next;
+               se = list_entry(lh, struct swap_extent, list);
+       }
+}
+
+static int wait_for_discard(void *word)
+{
+       schedule();
+       return 0;
+}
+
 #define SWAPFILE_CLUSTER       256
 #define LATENCY_LIMIT          256
 
 static inline unsigned long scan_swap_map(struct swap_info_struct *si)
 {
-       unsigned long offset, last_in_cluster;
+       unsigned long offset;
+       unsigned long last_in_cluster = 0;
        int latency_ration = LATENCY_LIMIT;
+       int found_free_cluster = 0;
 
-       /* 
+       /*
         * We try to cluster swap pages by allocating them sequentially
         * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
         * way, however, we resort to first-free allocation, starting
@@ -102,10 +183,26 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
         */
 
        si->flags += SWP_SCANNING;
-       if (unlikely(!si->cluster_nr)) {
-               si->cluster_nr = SWAPFILE_CLUSTER - 1;
-               if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER)
-                       goto lowest;
+       offset = si->cluster_next;
+
+       if (unlikely(!si->cluster_nr--)) {
+               if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
+                       si->cluster_nr = SWAPFILE_CLUSTER - 1;
+                       goto checks;
+               }
+               if (si->flags & SWP_DISCARDABLE) {
+                       /*
+                        * Start range check on racing allocations, in case
+                        * they overlap the cluster we eventually decide on
+                        * (we scan without swap_lock to allow preemption).
+                        * It's hardly conceivable that cluster_nr could be
+                        * wrapped during our scan, but don't depend on it.
+                        */
+                       if (si->lowest_alloc)
+                               goto checks;
+                       si->lowest_alloc = si->max;
+                       si->highest_alloc = 0;
+               }
                spin_unlock(&swap_lock);
 
                offset = si->lowest_bit;
@@ -117,43 +214,103 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
                                last_in_cluster = offset + SWAPFILE_CLUSTER;
                        else if (offset == last_in_cluster) {
                                spin_lock(&swap_lock);
-                               si->cluster_next = offset-SWAPFILE_CLUSTER+1;
-                               goto cluster;
+                               offset -= SWAPFILE_CLUSTER - 1;
+                               si->cluster_next = offset;
+                               si->cluster_nr = SWAPFILE_CLUSTER - 1;
+                               found_free_cluster = 1;
+                               goto checks;
                        }
                        if (unlikely(--latency_ration < 0)) {
                                cond_resched();
                                latency_ration = LATENCY_LIMIT;
                        }
                }
+
+               offset = si->lowest_bit;
                spin_lock(&swap_lock);
-               goto lowest;
+               si->cluster_nr = SWAPFILE_CLUSTER - 1;
+               si->lowest_alloc = 0;
        }
 
-       si->cluster_nr--;
-cluster:
-       offset = si->cluster_next;
-       if (offset > si->highest_bit)
-lowest:                offset = si->lowest_bit;
-checks:        if (!(si->flags & SWP_WRITEOK))
+checks:
+       if (!(si->flags & SWP_WRITEOK))
                goto no_page;
        if (!si->highest_bit)
                goto no_page;
-       if (!si->swap_map[offset]) {
-               if (offset == si->lowest_bit)
-                       si->lowest_bit++;
-               if (offset == si->highest_bit)
-                       si->highest_bit--;
-               si->inuse_pages++;
-               if (si->inuse_pages == si->pages) {
-                       si->lowest_bit = si->max;
-                       si->highest_bit = 0;
+       if (offset > si->highest_bit)
+               offset = si->lowest_bit;
+       if (si->swap_map[offset])
+               goto scan;
+
+       if (offset == si->lowest_bit)
+               si->lowest_bit++;
+       if (offset == si->highest_bit)
+               si->highest_bit--;
+       si->inuse_pages++;
+       if (si->inuse_pages == si->pages) {
+               si->lowest_bit = si->max;
+               si->highest_bit = 0;
+       }
+       si->swap_map[offset] = 1;
+       si->cluster_next = offset + 1;
+       si->flags -= SWP_SCANNING;
+
+       if (si->lowest_alloc) {
+               /*
+                * Only set when SWP_DISCARDABLE, and there's a scan
+                * for a free cluster in progress or just completed.
+                */
+               if (found_free_cluster) {
+                       /*
+                        * To optimize wear-levelling, discard the
+                        * old data of the cluster, taking care not to
+                        * discard any of its pages that have already
+                        * been allocated by racing tasks (offset has
+                        * already stepped over any at the beginning).
+                        */
+                       if (offset < si->highest_alloc &&
+                           si->lowest_alloc <= last_in_cluster)
+                               last_in_cluster = si->lowest_alloc - 1;
+                       si->flags |= SWP_DISCARDING;
+                       spin_unlock(&swap_lock);
+
+                       if (offset < last_in_cluster)
+                               discard_swap_cluster(si, offset,
+                                       last_in_cluster - offset + 1);
+
+                       spin_lock(&swap_lock);
+                       si->lowest_alloc = 0;
+                       si->flags &= ~SWP_DISCARDING;
+
+                       smp_mb();       /* wake_up_bit advises this */
+                       wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
+
+               } else if (si->flags & SWP_DISCARDING) {
+                       /*
+                        * Delay using pages allocated by racing tasks
+                        * until the whole discard has been issued. We
+                        * could defer that delay until swap_writepage,
+                        * but it's easier to keep this self-contained.
+                        */
+                       spin_unlock(&swap_lock);
+                       wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
+                               wait_for_discard, TASK_UNINTERRUPTIBLE);
+                       spin_lock(&swap_lock);
+               } else {
+                       /*
+                        * Note pages allocated by racing tasks while
+                        * scan for a free cluster is in progress, so
+                        * that its final discard can exclude them.
+                        */
+                       if (offset < si->lowest_alloc)
+                               si->lowest_alloc = offset;
+                       if (offset > si->highest_alloc)
+                               si->highest_alloc = offset;
                }
-               si->swap_map[offset] = 1;
-               si->cluster_next = offset + 1;
-               si->flags -= SWP_SCANNING;
-               return offset;
        }
+       return offset;
 
+scan:
        spin_unlock(&swap_lock);
        while (++offset <= si->highest_bit) {
                if (!si->swap_map[offset]) {
@@ -166,7 +323,7 @@ checks:     if (!(si->flags & SWP_WRITEOK))
                }
        }
        spin_lock(&swap_lock);
-       goto lowest;
+       goto checks;
 
 no_page:
        si->flags -= SWP_SCANNING;
@@ -268,7 +425,7 @@ bad_nofile:
        printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
 out:
        return NULL;
-}      
+}
 
 static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
 {
@@ -326,84 +483,45 @@ static inline int page_swapcount(struct page *page)
 }
 
 /*
- * We can use this swap cache entry directly
- * if there are no other references to it.
+ * We can write to an anon page without COW if there are no other references
+ * to it.  And as a side-effect, free up its swap: because the old content
+ * on disk will never be read, and seeking back there to write new content
+ * later would only waste time away from clustering.
  */
-int can_share_swap_page(struct page *page)
+int reuse_swap_page(struct page *page)
 {
        int count;
 
-       BUG_ON(!PageLocked(page));
+       VM_BUG_ON(!PageLocked(page));
        count = page_mapcount(page);
-       if (count <= 1 && PageSwapCache(page))
+       if (count <= 1 && PageSwapCache(page)) {
                count += page_swapcount(page);
+               if (count == 1 && !PageWriteback(page)) {
+                       delete_from_swap_cache(page);
+                       SetPageDirty(page);
+               }
+       }
        return count == 1;
 }
 
 /*
- * Work out if there are any other processes sharing this
- * swap cache page. Free it if you can. Return success.
+ * If swap is getting full, or if there are no more mappings of this page,
+ * then try_to_free_swap is called to free its swap space.
  */
-static int remove_exclusive_swap_page_count(struct page *page, int count)
+int try_to_free_swap(struct page *page)
 {
-       int retval;
-       struct swap_info_struct * p;
-       swp_entry_t entry;
-
-       BUG_ON(PagePrivate(page));
-       BUG_ON(!PageLocked(page));
+       VM_BUG_ON(!PageLocked(page));
 
        if (!PageSwapCache(page))
                return 0;
        if (PageWriteback(page))
                return 0;
-       if (page_count(page) != count) /* us + cache + ptes */
+       if (page_swapcount(page))
                return 0;
 
-       entry.val = page_private(page);
-       p = swap_info_get(entry);
-       if (!p)
-               return 0;
-
-       /* Is the only swap cache user the cache itself? */
-       retval = 0;
-       if (p->swap_map[swp_offset(entry)] == 1) {
-               /* Recheck the page count with the swapcache lock held.. */
-               spin_lock_irq(&swapper_space.tree_lock);
-               if ((page_count(page) == count) && !PageWriteback(page)) {
-                       __delete_from_swap_cache(page);
-                       SetPageDirty(page);
-                       retval = 1;
-               }
-               spin_unlock_irq(&swapper_space.tree_lock);
-       }
-       spin_unlock(&swap_lock);
-
-       if (retval) {
-               swap_free(entry);
-               page_cache_release(page);
-       }
-
-       return retval;
-}
-
-/*
- * Most of the time the page should have two references: one for the
- * process and one for the swap cache.
- */
-int remove_exclusive_swap_page(struct page *page)
-{
-       return remove_exclusive_swap_page_count(page, 2);
-}
-
-/*
- * The pageout code holds an extra reference to the page.  That raises
- * the reference count to test for to 2 for a page that is only in the
- * swap cache plus 1 for each process that maps the page.
- */
-int remove_exclusive_swap_page_ref(struct page *page)
-{
-       return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page));
+       delete_from_swap_cache(page);
+       SetPageDirty(page);
+       return 1;
 }
 
 /*
@@ -430,14 +548,12 @@ void free_swap_and_cache(swp_entry_t entry)
                spin_unlock(&swap_lock);
        }
        if (page) {
-               int one_user;
-
-               BUG_ON(PagePrivate(page));
-               one_user = (page_count(page) == 2);
-               /* Only cache user (+us), or swap space full? Free it! */
-               /* Also recheck PageSwapCache after page is locked (above) */
+               /*
+                * Not mapped elsewhere, or swap space full? Free it!
+                * Also recheck PageSwapCache now page is locked (above).
+                */
                if (PageSwapCache(page) && !PageWriteback(page) &&
-                                       (one_user || vm_swap_full())) {
+                               (!page_mapped(page) || vm_swap_full())) {
                        delete_from_swap_cache(page);
                        SetPageDirty(page);
                }
@@ -776,10 +892,10 @@ static int try_to_unuse(unsigned int type)
                        break;
                }
 
-               /* 
+               /*
                 * Get a page for the entry, using the existing swap
                 * cache page if there is one.  Otherwise, get a clean
-                * page and read the swap into it. 
+                * page and read the swap into it.
                 */
                swap_map = &si->swap_map[i];
                entry = swp_entry(type, i);
@@ -930,7 +1046,16 @@ static int try_to_unuse(unsigned int type)
                        lock_page(page);
                        wait_on_page_writeback(page);
                }
-               if (PageSwapCache(page))
+
+               /*
+                * It is conceivable that a racing task removed this page from
+                * swap cache just before we acquired the page lock at the top,
+                * or while we dropped it in unuse_mm().  The page might even
+                * be back in swap cache on another swap area: that we must not
+                * delete, since it may not have been written out to swap yet.
+                */
+               if (PageSwapCache(page) &&
+                   likely(page_private(page) == entry.val))
                        delete_from_swap_cache(page);
 
                /*
@@ -1209,7 +1334,7 @@ int page_queue_congested(struct page *page)
 {
        struct backing_dev_info *bdi;
 
-       BUG_ON(!PageLocked(page));      /* It pins the swap_info_struct */
+       VM_BUG_ON(!PageLocked(page));   /* It pins the swap_info_struct */
 
        if (PageSwapCache(page)) {
                swp_entry_t entry = { .val = page_private(page) };
@@ -1233,7 +1358,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
        char * pathname;
        int i, type, prev;
        int err;
-       
+
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
@@ -1253,7 +1378,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
        spin_lock(&swap_lock);
        for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
                p = swap_info + type;
-               if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) {
+               if (p->flags & SWP_WRITEOK) {
                        if (p->swap_file->f_mapping == mapping)
                                break;
                }
@@ -1426,12 +1551,12 @@ static int swap_show(struct seq_file *swap, void *v)
        file = ptr->swap_file;
        len = seq_path(swap, &file->f_path, " \t\n\\");
        seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
-                      len < 40 ? 40 - len : 1, " ",
-                      S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
+                       len < 40 ? 40 - len : 1, " ",
+                       S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
                                "partition" : "file\t",
-                      ptr->pages << (PAGE_SHIFT - 10),
-                      ptr->inuse_pages << (PAGE_SHIFT - 10),
-                      ptr->prio);
+                       ptr->pages << (PAGE_SHIFT - 10),
+                       ptr->inuse_pages << (PAGE_SHIFT - 10),
+                       ptr->prio);
        return 0;
 }
 
@@ -1462,6 +1587,15 @@ static int __init procswaps_init(void)
 __initcall(procswaps_init);
 #endif /* CONFIG_PROC_FS */
 
+#ifdef MAX_SWAPFILES_CHECK
+static int __init max_swapfiles_check(void)
+{
+       MAX_SWAPFILES_CHECK();
+       return 0;
+}
+late_initcall(max_swapfiles_check);
+#endif
+
 /*
  * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
  *
@@ -1478,12 +1612,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
        int i, prev;
        int error;
        union swap_header *swap_header = NULL;
-       int swap_header_version;
        unsigned int nr_good_pages = 0;
        int nr_extents = 0;
        sector_t span;
        unsigned long maxpages = 1;
-       int swapfilesize;
+       unsigned long swapfilepages;
        unsigned short *swap_map = NULL;
        struct page *page = NULL;
        struct inode *inode = NULL;
@@ -1561,7 +1694,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                goto bad_swap;
        }
 
-       swapfilesize = i_size_read(inode) >> PAGE_SHIFT;
+       swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
 
        /*
         * Read the swap header.
@@ -1575,101 +1708,86 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                error = PTR_ERR(page);
                goto bad_swap;
        }
-       kmap(page);
-       swap_header = page_address(page);
+       swap_header = kmap(page);
 
-       if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
-               swap_header_version = 1;
-       else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
-               swap_header_version = 2;
-       else {
+       if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
                printk(KERN_ERR "Unable to find swap-space signature\n");
                error = -EINVAL;
                goto bad_swap;
        }
-       
-       switch (swap_header_version) {
-       case 1:
-               printk(KERN_ERR "version 0 swap is no longer supported. "
-                       "Use mkswap -v1 %s\n", name);
+
+       /* swap partition endianess hack... */
+       if (swab32(swap_header->info.version) == 1) {
+               swab32s(&swap_header->info.version);
+               swab32s(&swap_header->info.last_page);
+               swab32s(&swap_header->info.nr_badpages);
+               for (i = 0; i < swap_header->info.nr_badpages; i++)
+                       swab32s(&swap_header->info.badpages[i]);
+       }
+       /* Check the swap header's sub-version */
+       if (swap_header->info.version != 1) {
+               printk(KERN_WARNING
+                      "Unable to handle swap header version %d\n",
+                      swap_header->info.version);
                error = -EINVAL;
                goto bad_swap;
-       case 2:
-               /* swap partition endianess hack... */
-               if (swab32(swap_header->info.version) == 1) {
-                       swab32s(&swap_header->info.version);
-                       swab32s(&swap_header->info.last_page);
-                       swab32s(&swap_header->info.nr_badpages);
-                       for (i = 0; i < swap_header->info.nr_badpages; i++)
-                               swab32s(&swap_header->info.badpages[i]);
-               }
-               /* Check the swap header's sub-version and the size of
-                   the swap file and bad block lists */
-               if (swap_header->info.version != 1) {
-                       printk(KERN_WARNING
-                              "Unable to handle swap header version %d\n",
-                              swap_header->info.version);
-                       error = -EINVAL;
-                       goto bad_swap;
-               }
+       }
 
-               p->lowest_bit  = 1;
-               p->cluster_next = 1;
+       p->lowest_bit  = 1;
+       p->cluster_next = 1;
 
-               /*
-                * Find out how many pages are allowed for a single swap
-                * device. There are two limiting factors: 1) the number of
-                * bits for the swap offset in the swp_entry_t type and
-                * 2) the number of bits in the a swap pte as defined by
-                * the different architectures. In order to find the
-                * largest possible bit mask a swap entry with swap type 0
-                * and swap offset ~0UL is created, encoded to a swap pte,
-                * decoded to a swp_entry_t again and finally the swap
-                * offset is extracted. This will mask all the bits from
-                * the initial ~0UL mask that can't be encoded in either
-                * the swp_entry_t or the architecture definition of a
-                * swap pte.
-                */
-               maxpages = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0,~0UL)))) - 1;
-               if (maxpages > swap_header->info.last_page)
-                       maxpages = swap_header->info.last_page;
-               p->highest_bit = maxpages - 1;
+       /*
+        * Find out how many pages are allowed for a single swap
+        * device. There are two limiting factors: 1) the number of
+        * bits for the swap offset in the swp_entry_t type and
+        * 2) the number of bits in the a swap pte as defined by
+        * the different architectures. In order to find the
+        * largest possible bit mask a swap entry with swap type 0
+        * and swap offset ~0UL is created, encoded to a swap pte,
+        * decoded to a swp_entry_t again and finally the swap
+        * offset is extracted. This will mask all the bits from
+        * the initial ~0UL mask that can't be encoded in either
+        * the swp_entry_t or the architecture definition of a
+        * swap pte.
+        */
+       maxpages = swp_offset(pte_to_swp_entry(
+                       swp_entry_to_pte(swp_entry(0, ~0UL)))) - 1;
+       if (maxpages > swap_header->info.last_page)
+               maxpages = swap_header->info.last_page;
+       p->highest_bit = maxpages - 1;
 
-               error = -EINVAL;
-               if (!maxpages)
-                       goto bad_swap;
-               if (swapfilesize && maxpages > swapfilesize) {
-                       printk(KERN_WARNING
-                              "Swap area shorter than signature indicates\n");
-                       goto bad_swap;
-               }
-               if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
-                       goto bad_swap;
-               if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
-                       goto bad_swap;
+       error = -EINVAL;
+       if (!maxpages)
+               goto bad_swap;
+       if (swapfilepages && maxpages > swapfilepages) {
+               printk(KERN_WARNING
+                      "Swap area shorter than signature indicates\n");
+               goto bad_swap;
+       }
+       if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
+               goto bad_swap;
+       if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+               goto bad_swap;
 
-               /* OK, set up the swap map and apply the bad block list */
-               swap_map = vmalloc(maxpages * sizeof(short));
-               if (!swap_map) {
-                       error = -ENOMEM;
-                       goto bad_swap;
-               }
+       /* OK, set up the swap map and apply the bad block list */
+       swap_map = vmalloc(maxpages * sizeof(short));
+       if (!swap_map) {
+               error = -ENOMEM;
+               goto bad_swap;
+       }
 
-               error = 0;
-               memset(swap_map, 0, maxpages * sizeof(short));
-               for (i = 0; i < swap_header->info.nr_badpages; i++) {
-                       int page_nr = swap_header->info.badpages[i];
-                       if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
-                               error = -EINVAL;
-                       else
-                               swap_map[page_nr] = SWAP_MAP_BAD;
-               }
-               nr_good_pages = swap_header->info.last_page -
-                               swap_header->info.nr_badpages -
-                               1 /* header page */;
-               if (error)
+       memset(swap_map, 0, maxpages * sizeof(short));
+       for (i = 0; i < swap_header->info.nr_badpages; i++) {
+               int page_nr = swap_header->info.badpages[i];
+               if (page_nr <= 0 || page_nr >= swap_header->info.last_page) {
+                       error = -EINVAL;
                        goto bad_swap;
+               }
+               swap_map[page_nr] = SWAP_MAP_BAD;
        }
+       nr_good_pages = swap_header->info.last_page -
+                       swap_header->info.nr_badpages -
+                       1 /* header page */;
 
        if (nr_good_pages) {
                swap_map[0] = SWAP_MAP_BAD;
@@ -1688,6 +1806,9 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                goto bad_swap;
        }
 
+       if (discard_swap(p) == 0)
+               p->flags |= SWP_DISCARDABLE;
+
        mutex_lock(&swapon_mutex);
        spin_lock(&swap_lock);
        if (swap_flags & SWAP_FLAG_PREFER)
@@ -1696,14 +1817,15 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
        else
                p->prio = --least_priority;
        p->swap_map = swap_map;
-       p->flags = SWP_ACTIVE;
+       p->flags |= SWP_WRITEOK;
        nr_swap_pages += nr_good_pages;
        total_swap_pages += nr_good_pages;
 
        printk(KERN_INFO "Adding %uk swap on %s.  "
-                       "Priority:%d extents:%d across:%lluk\n",
+                       "Priority:%d extents:%d across:%lluk%s\n",
                nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
-               nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10));
+               nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
+               (p->flags & SWP_DISCARDABLE) ? " D" : "");
 
        /* insert swap space into swap_list: */
        prev = -1;