avr32: fix copy_from_user()
[pandora-kernel.git] / kernel / power / swap.c
index 11a594c..64f8f97 100644 (file)
@@ -6,7 +6,7 @@
  *
  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
+ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  *
  * This file is released under the GPLv2.
  *
 
 #define MAP_PAGE_ENTRIES       (PAGE_SIZE / sizeof(sector_t) - 1)
 
+/*
+ * Number of free pages that are not high.
+ */
+static inline unsigned long low_free_pages(void)
+{
+       return nr_free_pages() - nr_free_highpages();
+}
+
+/*
+ * Number of pages required to be kept free while writing the image. Always
+ * half of all available low pages before the writing starts.
+ */
+static inline unsigned long reqd_free_pages(void)
+{
+       return low_free_pages() / 2;
+}
+
 struct swap_map_page {
        sector_t entries[MAP_PAGE_ENTRIES];
        sector_t next_swap;
@@ -73,7 +90,7 @@ struct swap_map_handle {
        sector_t cur_swap;
        sector_t first_sector;
        unsigned int k;
-       unsigned long nr_free_pages, written;
+       unsigned long reqd_free_pages;
        u32 crc32;
 };
 
@@ -266,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
                return -ENOSPC;
 
        if (bio_chain) {
-               src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+               src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
+                                             __GFP_NORETRY);
                if (src) {
                        copy_page(src, buf);
                } else {
                        ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
                        if (ret)
                                return ret;
-                       src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+                       src = (void *)__get_free_page(__GFP_WAIT |
+                                                     __GFP_NOWARN |
+                                                     __GFP_NORETRY);
                        if (src) {
                                copy_page(src, buf);
                        } else {
@@ -317,8 +337,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
                goto err_rel;
        }
        handle->k = 0;
-       handle->nr_free_pages = nr_free_pages() >> 1;
-       handle->written = 0;
+       handle->reqd_free_pages = reqd_free_pages();
        handle->first_sector = handle->cur_swap;
        return 0;
 err_rel:
@@ -352,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
                clear_page(handle->cur);
                handle->cur_swap = offset;
                handle->k = 0;
-       }
-       if (bio_chain && ++handle->written > handle->nr_free_pages) {
-               error = hib_wait_on_bio_chain(bio_chain);
-               if (error)
-                       goto out;
-               handle->written = 0;
+
+               if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
+                       error = hib_wait_on_bio_chain(bio_chain);
+                       if (error)
+                               goto out;
+                       /*
+                        * Recalculate the number of required free pages, to
+                        * make sure we never take more than half.
+                        */
+                       handle->reqd_free_pages = reqd_free_pages();
+               }
        }
  out:
        return error;
@@ -404,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
 /* Maximum number of threads for compression/decompression. */
 #define LZO_THREADS    3
 
-/* Maximum number of pages for read buffering. */
-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
+/* Minimum/maximum number of pages for read buffering. */
+#define LZO_MIN_RD_PAGES       1024
+#define LZO_MAX_RD_PAGES       8192
 
 
 /**
@@ -615,12 +640,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
                }
        }
 
-       /*
-        * Adjust number of free pages after all allocations have been done.
-        * We don't want to run out of pages when writing.
-        */
-       handle->nr_free_pages = nr_free_pages() >> 1;
-
        /*
         * Start the CRC32 thread.
         */
@@ -642,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
                goto out_clean;
        }
 
+       /*
+        * Adjust the number of required free pages after all allocations have
+        * been done. We don't want to run out of pages when writing.
+        */
+       handle->reqd_free_pages = reqd_free_pages();
+
        printk(KERN_INFO
                "PM: Using %u thread(s) for compression.\n"
                "PM: Compressing and saving image data (%u pages) ...     ",
@@ -1051,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
        unsigned i, thr, run_threads, nr_threads;
        unsigned ring = 0, pg = 0, ring_size = 0,
                 have = 0, want, need, asked = 0;
-       unsigned long read_pages;
+       unsigned long read_pages = 0;
        unsigned char **page = NULL;
        struct dec_data *data = NULL;
        struct crc_data *crc = NULL;
@@ -1063,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
        nr_threads = num_online_cpus() - 1;
        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 
-       page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
+       page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
        if (!page) {
                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
                ret = -ENOMEM;
@@ -1128,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
        }
 
        /*
-        * Adjust number of pages for read buffering, in case we are short.
+        * Set the number of pages for read buffering.
+        * This is complete guesswork, because we'll only know the real
+        * picture once prepare_image() is called, which is much later on
+        * during the image load phase. We'll assume the worst case and
+        * say that none of the image pages are from high memory.
         */
-       read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
-       read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
+       if (low_free_pages() > snapshot_get_image_size())
+               read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
+       read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
 
        for (i = 0; i < read_pages; i++) {
                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
                                                  __GFP_WAIT | __GFP_HIGH :
-                                                 __GFP_WAIT);
+                                                 __GFP_WAIT | __GFP_NOWARN |
+                                                 __GFP_NORETRY);
+
                if (!page[i]) {
                        if (i < LZO_CMP_PAGES) {
                                ring_size = i;