libata: convert to chained sg
[pandora-kernel.git] / mm / page-writeback.c
index 3348521..3d3848f 100644 (file)
@@ -37,7 +37,7 @@
 
 /*
  * The maximum number of pages to writeout in a single bdflush/kupdate
- * operation.  We do this so we don't hold I_LOCK against an inode for
+ * operation.  We do this so we don't hold I_SYNC against an inode for
  * enormous amounts of time, which would block a userspace task which has
  * been forced to throttle against that inode.  Also, the code reevaluates
  * the dirty each time it has written this many pages.
@@ -297,20 +297,12 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
 {
        int background_ratio;           /* Percentages */
        int dirty_ratio;
-       int unmapped_ratio;
        long background;
        long dirty;
        unsigned long available_memory = determine_dirtyable_memory();
        struct task_struct *tsk;
 
-       unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
-                               global_page_state(NR_ANON_PAGES)) * 100) /
-                                       available_memory;
-
        dirty_ratio = vm_dirty_ratio;
-       if (dirty_ratio > unmapped_ratio / 2)
-               dirty_ratio = unmapped_ratio / 2;
-
        if (dirty_ratio < 5)
                dirty_ratio = 5;
 
@@ -355,8 +347,8 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
  */
 static void balance_dirty_pages(struct address_space *mapping)
 {
-       long bdi_nr_reclaimable;
-       long bdi_nr_writeback;
+       long nr_reclaimable, bdi_nr_reclaimable;
+       long nr_writeback, bdi_nr_writeback;
        long background_thresh;
        long dirty_thresh;
        long bdi_thresh;
@@ -376,11 +368,26 @@ static void balance_dirty_pages(struct address_space *mapping)
 
                get_dirty_limits(&background_thresh, &dirty_thresh,
                                &bdi_thresh, bdi);
+
+               nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
+                                       global_page_state(NR_UNSTABLE_NFS);
+               nr_writeback = global_page_state(NR_WRITEBACK);
+
                bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
                bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
+
                if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
                        break;
 
+               /*
+                * Throttle it only when the background writeback cannot
+                * catch-up. This avoids (excessively) small writeouts
+                * when the bdi limits are ramping up.
+                */
+               if (nr_reclaimable + nr_writeback <
+                               (background_thresh + dirty_thresh) / 2)
+                       break;
+
                if (!bdi->dirty_exceeded)
                        bdi->dirty_exceeded = 1;
 
@@ -502,16 +509,6 @@ void throttle_vm_writeout(gfp_t gfp_mask)
        long background_thresh;
        long dirty_thresh;
 
-       if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
-               /*
-                * The caller might hold locks which can prevent IO completion
-                * or progress in the filesystem.  So we cannot just sit here
-                * waiting for IO to complete.
-                */
-               congestion_wait(WRITE, HZ/10);
-               return;
-       }
-
         for ( ; ; ) {
                get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
 
@@ -525,6 +522,14 @@ void throttle_vm_writeout(gfp_t gfp_mask)
                        global_page_state(NR_WRITEBACK) <= dirty_thresh)
                                break;
                 congestion_wait(WRITE, HZ/10);
+
+               /*
+                * The caller might hold locks which can prevent IO completion
+                * or progress in the filesystem.  So we cannot just sit here
+                * waiting for IO to complete.
+                */
+               if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
+                       break;
         }
 }
 
@@ -988,7 +993,7 @@ int __set_page_dirty_no_writeback(struct page *page)
  * mapping is pinned by the vma's ->vm_file reference.
  *
  * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() insode tree_lock.
+ * mapping by re-checking page_mapping() inside tree_lock.
  */
 int __set_page_dirty_nobuffers(struct page *page)
 {