[Bluetooth] Enable SCO support for Broadcom HID proxy dongle
[pandora-kernel.git] / mm / page-writeback.c
1 /*
2  * mm/page-writeback.c.
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains functions related to writing back dirty pages at the
7  * address_space level.
8  *
9  * 10Apr2002    akpm@zip.com.au
10  *              Initial version
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/percpu.h>
27 #include <linux/notifier.h>
28 #include <linux/smp.h>
29 #include <linux/sysctl.h>
30 #include <linux/cpu.h>
31 #include <linux/syscalls.h>
32
33 /*
34  * The maximum number of pages to writeout in a single bdflush/kupdate
35  * operation.  We do this so we don't hold I_LOCK against an inode for
36  * enormous amounts of time, which would block a userspace task which has
37  * been forced to throttle against that inode.  Also, the code reevaluates
38  * the dirty each time it has written this many pages.
39  */
40 #define MAX_WRITEBACK_PAGES     1024
41
42 /*
43  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
44  * will look to see if it needs to force writeback or throttling.
45  */
46 static long ratelimit_pages = 32;
47
48 static long total_pages;        /* The total number of pages in the machine. */
49 static int dirty_exceeded __cacheline_aligned_in_smp;   /* Dirty mem may be over limit */
50
51 /*
52  * When balance_dirty_pages decides that the caller needs to perform some
53  * non-background writeback, this is how many pages it will attempt to write.
54  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
55  * large amounts of I/O are submitted.
56  */
57 static inline long sync_writeback_pages(void)
58 {
59         return ratelimit_pages + ratelimit_pages / 2;
60 }
61
62 /* The following parameters are exported via /proc/sys/vm */
63
64 /*
65  * Start background writeback (via pdflush) at this percentage
66  */
67 int dirty_background_ratio = 10;
68
69 /*
70  * The generator of dirty data starts writeback at this percentage
71  */
72 int vm_dirty_ratio = 40;
73
74 /*
75  * The interval between `kupdate'-style writebacks, in jiffies
76  */
77 int dirty_writeback_interval = 5 * HZ;
78
79 /*
80  * The longest number of jiffies for which data is allowed to remain dirty
81  */
82 int dirty_expire_interval = 30 * HZ;
83
84 /*
85  * Flag that makes the machine dump writes/reads and block dirtyings.
86  */
87 int block_dump;
88
89 /*
90  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
91  * a full sync is triggered after this time elapses without any disk activity.
92  */
93 int laptop_mode;
94
95 EXPORT_SYMBOL(laptop_mode);
96
97 /* End of sysctl-exported parameters */
98
99
100 static void background_writeout(unsigned long _min_pages);
101
102 /*
103  * Work out the current dirty-memory clamping and background writeout
104  * thresholds.
105  *
106  * The main aim here is to lower them aggressively if there is a lot of mapped
107  * memory around.  To avoid stressing page reclaim with lots of unreclaimable
108  * pages.  It is better to clamp down on writers than to start swapping, and
109  * performing lots of scanning.
110  *
111  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
112  *
113  * We don't permit the clamping level to fall below 5% - that is getting rather
114  * excessive.
115  *
116  * We make sure that the background writeout level is below the adjusted
117  * clamping level.
118  */
119 static void
120 get_dirty_limits(long *pbackground, long *pdirty,
121                                         struct address_space *mapping)
122 {
123         int background_ratio;           /* Percentages */
124         int dirty_ratio;
125         int unmapped_ratio;
126         long background;
127         long dirty;
128         unsigned long available_memory = total_pages;
129         struct task_struct *tsk;
130
131 #ifdef CONFIG_HIGHMEM
132         /*
133          * If this mapping can only allocate from low memory,
134          * we exclude high memory from our count.
135          */
136         if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
137                 available_memory -= totalhigh_pages;
138 #endif
139
140
141         unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
142                                 global_page_state(NR_ANON_PAGES)) * 100) /
143                                         total_pages;
144
145         dirty_ratio = vm_dirty_ratio;
146         if (dirty_ratio > unmapped_ratio / 2)
147                 dirty_ratio = unmapped_ratio / 2;
148
149         if (dirty_ratio < 5)
150                 dirty_ratio = 5;
151
152         background_ratio = dirty_background_ratio;
153         if (background_ratio >= dirty_ratio)
154                 background_ratio = dirty_ratio / 2;
155
156         background = (background_ratio * available_memory) / 100;
157         dirty = (dirty_ratio * available_memory) / 100;
158         tsk = current;
159         if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
160                 background += background / 4;
161                 dirty += dirty / 4;
162         }
163         *pbackground = background;
164         *pdirty = dirty;
165 }
166
167 /*
168  * balance_dirty_pages() must be called by processes which are generating dirty
169  * data.  It looks at the number of dirty pages in the machine and will force
170  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
171  * If we're over `background_thresh' then pdflush is woken to perform some
172  * writeout.
173  */
174 static void balance_dirty_pages(struct address_space *mapping)
175 {
176         long nr_reclaimable;
177         long background_thresh;
178         long dirty_thresh;
179         unsigned long pages_written = 0;
180         unsigned long write_chunk = sync_writeback_pages();
181
182         struct backing_dev_info *bdi = mapping->backing_dev_info;
183
184         for (;;) {
185                 struct writeback_control wbc = {
186                         .bdi            = bdi,
187                         .sync_mode      = WB_SYNC_NONE,
188                         .older_than_this = NULL,
189                         .nr_to_write    = write_chunk,
190                         .range_cyclic   = 1,
191                 };
192
193                 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
194                 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
195                                         global_page_state(NR_UNSTABLE_NFS);
196                 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
197                         dirty_thresh)
198                                 break;
199
200                 if (!dirty_exceeded)
201                         dirty_exceeded = 1;
202
203                 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
204                  * Unstable writes are a feature of certain networked
205                  * filesystems (i.e. NFS) in which data may have been
206                  * written to the server's write cache, but has not yet
207                  * been flushed to permanent storage.
208                  */
209                 if (nr_reclaimable) {
210                         writeback_inodes(&wbc);
211                         get_dirty_limits(&background_thresh,
212                                                 &dirty_thresh, mapping);
213                         nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
214                                         global_page_state(NR_UNSTABLE_NFS);
215                         if (nr_reclaimable +
216                                 global_page_state(NR_WRITEBACK)
217                                         <= dirty_thresh)
218                                                 break;
219                         pages_written += write_chunk - wbc.nr_to_write;
220                         if (pages_written >= write_chunk)
221                                 break;          /* We've done our duty */
222                 }
223                 blk_congestion_wait(WRITE, HZ/10);
224         }
225
226         if (nr_reclaimable + global_page_state(NR_WRITEBACK)
227                 <= dirty_thresh && dirty_exceeded)
228                         dirty_exceeded = 0;
229
230         if (writeback_in_progress(bdi))
231                 return;         /* pdflush is already working this queue */
232
233         /*
234          * In laptop mode, we wait until hitting the higher threshold before
235          * starting background writeout, and then write out all the way down
236          * to the lower threshold.  So slow writers cause minimal disk activity.
237          *
238          * In normal mode, we start background writeout at the lower
239          * background_thresh, to keep the amount of dirty memory low.
240          */
241         if ((laptop_mode && pages_written) ||
242              (!laptop_mode && (nr_reclaimable > background_thresh)))
243                 pdflush_operation(background_writeout, 0);
244 }
245
246 /**
247  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
248  * @mapping: address_space which was dirtied
249  * @nr_pages_dirtied: number of pages which the caller has just dirtied
250  *
251  * Processes which are dirtying memory should call in here once for each page
252  * which was newly dirtied.  The function will periodically check the system's
253  * dirty state and will initiate writeback if needed.
254  *
255  * On really big machines, get_writeback_state is expensive, so try to avoid
256  * calling it too often (ratelimiting).  But once we're over the dirty memory
257  * limit we decrease the ratelimiting by a lot, to prevent individual processes
258  * from overshooting the limit by (ratelimit_pages) each.
259  */
260 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
261                                         unsigned long nr_pages_dirtied)
262 {
263         static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
264         unsigned long ratelimit;
265         unsigned long *p;
266
267         ratelimit = ratelimit_pages;
268         if (dirty_exceeded)
269                 ratelimit = 8;
270
271         /*
272          * Check the rate limiting. Also, we do not want to throttle real-time
273          * tasks in balance_dirty_pages(). Period.
274          */
275         preempt_disable();
276         p =  &__get_cpu_var(ratelimits);
277         *p += nr_pages_dirtied;
278         if (unlikely(*p >= ratelimit)) {
279                 *p = 0;
280                 preempt_enable();
281                 balance_dirty_pages(mapping);
282                 return;
283         }
284         preempt_enable();
285 }
286 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
287
288 void throttle_vm_writeout(void)
289 {
290         long background_thresh;
291         long dirty_thresh;
292
293         for ( ; ; ) {
294                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
295
296                 /*
297                  * Boost the allowable dirty threshold a bit for page
298                  * allocators so they don't get DoS'ed by heavy writers
299                  */
300                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
301
302                 if (global_page_state(NR_UNSTABLE_NFS) +
303                         global_page_state(NR_WRITEBACK) <= dirty_thresh)
304                                 break;
305                 blk_congestion_wait(WRITE, HZ/10);
306         }
307 }
308
309
310 /*
311  * writeback at least _min_pages, and keep writing until the amount of dirty
312  * memory is less than the background threshold, or until we're all clean.
313  */
314 static void background_writeout(unsigned long _min_pages)
315 {
316         long min_pages = _min_pages;
317         struct writeback_control wbc = {
318                 .bdi            = NULL,
319                 .sync_mode      = WB_SYNC_NONE,
320                 .older_than_this = NULL,
321                 .nr_to_write    = 0,
322                 .nonblocking    = 1,
323                 .range_cyclic   = 1,
324         };
325
326         for ( ; ; ) {
327                 long background_thresh;
328                 long dirty_thresh;
329
330                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
331                 if (global_page_state(NR_FILE_DIRTY) +
332                         global_page_state(NR_UNSTABLE_NFS) < background_thresh
333                                 && min_pages <= 0)
334                         break;
335                 wbc.encountered_congestion = 0;
336                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
337                 wbc.pages_skipped = 0;
338                 writeback_inodes(&wbc);
339                 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
340                 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
341                         /* Wrote less than expected */
342                         blk_congestion_wait(WRITE, HZ/10);
343                         if (!wbc.encountered_congestion)
344                                 break;
345                 }
346         }
347 }
348
349 /*
350  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
351  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
352  * -1 if all pdflush threads were busy.
353  */
354 int wakeup_pdflush(long nr_pages)
355 {
356         if (nr_pages == 0)
357                 nr_pages = global_page_state(NR_FILE_DIRTY) +
358                                 global_page_state(NR_UNSTABLE_NFS);
359         return pdflush_operation(background_writeout, nr_pages);
360 }
361
362 static void wb_timer_fn(unsigned long unused);
363 static void laptop_timer_fn(unsigned long unused);
364
365 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
366 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
367
368 /*
369  * Periodic writeback of "old" data.
370  *
371  * Define "old": the first time one of an inode's pages is dirtied, we mark the
372  * dirtying-time in the inode's address_space.  So this periodic writeback code
373  * just walks the superblock inode list, writing back any inodes which are
374  * older than a specific point in time.
375  *
376  * Try to run once per dirty_writeback_interval.  But if a writeback event
377  * takes longer than a dirty_writeback_interval interval, then leave a
378  * one-second gap.
379  *
380  * older_than_this takes precedence over nr_to_write.  So we'll only write back
381  * all dirty pages if they are all attached to "old" mappings.
382  */
383 static void wb_kupdate(unsigned long arg)
384 {
385         unsigned long oldest_jif;
386         unsigned long start_jif;
387         unsigned long next_jif;
388         long nr_to_write;
389         struct writeback_control wbc = {
390                 .bdi            = NULL,
391                 .sync_mode      = WB_SYNC_NONE,
392                 .older_than_this = &oldest_jif,
393                 .nr_to_write    = 0,
394                 .nonblocking    = 1,
395                 .for_kupdate    = 1,
396                 .range_cyclic   = 1,
397         };
398
399         sync_supers();
400
401         oldest_jif = jiffies - dirty_expire_interval;
402         start_jif = jiffies;
403         next_jif = start_jif + dirty_writeback_interval;
404         nr_to_write = global_page_state(NR_FILE_DIRTY) +
405                         global_page_state(NR_UNSTABLE_NFS) +
406                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
407         while (nr_to_write > 0) {
408                 wbc.encountered_congestion = 0;
409                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
410                 writeback_inodes(&wbc);
411                 if (wbc.nr_to_write > 0) {
412                         if (wbc.encountered_congestion)
413                                 blk_congestion_wait(WRITE, HZ/10);
414                         else
415                                 break;  /* All the old data is written */
416                 }
417                 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
418         }
419         if (time_before(next_jif, jiffies + HZ))
420                 next_jif = jiffies + HZ;
421         if (dirty_writeback_interval)
422                 mod_timer(&wb_timer, next_jif);
423 }
424
425 /*
426  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
427  */
428 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
429                 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
430 {
431         proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
432         if (dirty_writeback_interval) {
433                 mod_timer(&wb_timer,
434                         jiffies + dirty_writeback_interval);
435                 } else {
436                 del_timer(&wb_timer);
437         }
438         return 0;
439 }
440
441 static void wb_timer_fn(unsigned long unused)
442 {
443         if (pdflush_operation(wb_kupdate, 0) < 0)
444                 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
445 }
446
447 static void laptop_flush(unsigned long unused)
448 {
449         sys_sync();
450 }
451
452 static void laptop_timer_fn(unsigned long unused)
453 {
454         pdflush_operation(laptop_flush, 0);
455 }
456
457 /*
458  * We've spun up the disk and we're in laptop mode: schedule writeback
459  * of all dirty data a few seconds from now.  If the flush is already scheduled
460  * then push it back - the user is still using the disk.
461  */
462 void laptop_io_completion(void)
463 {
464         mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
465 }
466
467 /*
468  * We're in laptop mode and we've just synced. The sync's writes will have
469  * caused another writeback to be scheduled by laptop_io_completion.
470  * Nothing needs to be written back anymore, so we unschedule the writeback.
471  */
472 void laptop_sync_completion(void)
473 {
474         del_timer(&laptop_mode_wb_timer);
475 }
476
477 /*
478  * If ratelimit_pages is too high then we can get into dirty-data overload
479  * if a large number of processes all perform writes at the same time.
480  * If it is too low then SMP machines will call the (expensive)
481  * get_writeback_state too often.
482  *
483  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
484  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
485  * thresholds before writeback cuts in.
486  *
487  * But the limit should not be set too high.  Because it also controls the
488  * amount of memory which the balance_dirty_pages() caller has to write back.
489  * If this is too large then the caller will block on the IO queue all the
490  * time.  So limit it to four megabytes - the balance_dirty_pages() caller
491  * will write six megabyte chunks, max.
492  */
493
494 static void set_ratelimit(void)
495 {
496         ratelimit_pages = total_pages / (num_online_cpus() * 32);
497         if (ratelimit_pages < 16)
498                 ratelimit_pages = 16;
499         if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
500                 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
501 }
502
503 static int __cpuinit
504 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
505 {
506         set_ratelimit();
507         return 0;
508 }
509
510 static struct notifier_block __cpuinitdata ratelimit_nb = {
511         .notifier_call  = ratelimit_handler,
512         .next           = NULL,
513 };
514
515 /*
516  * If the machine has a large highmem:lowmem ratio then scale back the default
517  * dirty memory thresholds: allowing too much dirty highmem pins an excessive
518  * number of buffer_heads.
519  */
520 void __init page_writeback_init(void)
521 {
522         long buffer_pages = nr_free_buffer_pages();
523         long correction;
524
525         total_pages = nr_free_pagecache_pages();
526
527         correction = (100 * 4 * buffer_pages) / total_pages;
528
529         if (correction < 100) {
530                 dirty_background_ratio *= correction;
531                 dirty_background_ratio /= 100;
532                 vm_dirty_ratio *= correction;
533                 vm_dirty_ratio /= 100;
534
535                 if (dirty_background_ratio <= 0)
536                         dirty_background_ratio = 1;
537                 if (vm_dirty_ratio <= 0)
538                         vm_dirty_ratio = 1;
539         }
540         mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
541         set_ratelimit();
542         register_cpu_notifier(&ratelimit_nb);
543 }
544
545 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
546 {
547         int ret;
548
549         if (wbc->nr_to_write <= 0)
550                 return 0;
551         wbc->for_writepages = 1;
552         if (mapping->a_ops->writepages)
553                 ret =  mapping->a_ops->writepages(mapping, wbc);
554         else
555                 ret = generic_writepages(mapping, wbc);
556         wbc->for_writepages = 0;
557         return ret;
558 }
559
560 /**
561  * write_one_page - write out a single page and optionally wait on I/O
562  *
563  * @page: the page to write
564  * @wait: if true, wait on writeout
565  *
566  * The page must be locked by the caller and will be unlocked upon return.
567  *
568  * write_one_page() returns a negative error code if I/O failed.
569  */
570 int write_one_page(struct page *page, int wait)
571 {
572         struct address_space *mapping = page->mapping;
573         int ret = 0;
574         struct writeback_control wbc = {
575                 .sync_mode = WB_SYNC_ALL,
576                 .nr_to_write = 1,
577         };
578
579         BUG_ON(!PageLocked(page));
580
581         if (wait)
582                 wait_on_page_writeback(page);
583
584         if (clear_page_dirty_for_io(page)) {
585                 page_cache_get(page);
586                 ret = mapping->a_ops->writepage(page, &wbc);
587                 if (ret == 0 && wait) {
588                         wait_on_page_writeback(page);
589                         if (PageError(page))
590                                 ret = -EIO;
591                 }
592                 page_cache_release(page);
593         } else {
594                 unlock_page(page);
595         }
596         return ret;
597 }
598 EXPORT_SYMBOL(write_one_page);
599
600 /*
601  * For address_spaces which do not use buffers.  Just tag the page as dirty in
602  * its radix tree.
603  *
604  * This is also used when a single buffer is being dirtied: we want to set the
605  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
606  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
607  *
608  * Most callers have locked the page, which pins the address_space in memory.
609  * But zap_pte_range() does not lock the page, however in that case the
610  * mapping is pinned by the vma's ->vm_file reference.
611  *
612  * We take care to handle the case where the page was truncated from the
613  * mapping by re-checking page_mapping() insode tree_lock.
614  */
615 int __set_page_dirty_nobuffers(struct page *page)
616 {
617         if (!TestSetPageDirty(page)) {
618                 struct address_space *mapping = page_mapping(page);
619                 struct address_space *mapping2;
620
621                 if (mapping) {
622                         write_lock_irq(&mapping->tree_lock);
623                         mapping2 = page_mapping(page);
624                         if (mapping2) { /* Race with truncate? */
625                                 BUG_ON(mapping2 != mapping);
626                                 if (mapping_cap_account_dirty(mapping))
627                                         __inc_zone_page_state(page,
628                                                                 NR_FILE_DIRTY);
629                                 radix_tree_tag_set(&mapping->page_tree,
630                                         page_index(page), PAGECACHE_TAG_DIRTY);
631                         }
632                         write_unlock_irq(&mapping->tree_lock);
633                         if (mapping->host) {
634                                 /* !PageAnon && !swapper_space */
635                                 __mark_inode_dirty(mapping->host,
636                                                         I_DIRTY_PAGES);
637                         }
638                 }
639                 return 1;
640         }
641         return 0;
642 }
643 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
644
645 /*
646  * When a writepage implementation decides that it doesn't want to write this
647  * page for some reason, it should redirty the locked page via
648  * redirty_page_for_writepage() and it should then unlock the page and return 0
649  */
650 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
651 {
652         wbc->pages_skipped++;
653         return __set_page_dirty_nobuffers(page);
654 }
655 EXPORT_SYMBOL(redirty_page_for_writepage);
656
657 /*
658  * If the mapping doesn't provide a set_page_dirty a_op, then
659  * just fall through and assume that it wants buffer_heads.
660  */
661 int fastcall set_page_dirty(struct page *page)
662 {
663         struct address_space *mapping = page_mapping(page);
664
665         if (likely(mapping)) {
666                 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
667                 if (spd)
668                         return (*spd)(page);
669                 return __set_page_dirty_buffers(page);
670         }
671         if (!PageDirty(page)) {
672                 if (!TestSetPageDirty(page))
673                         return 1;
674         }
675         return 0;
676 }
677 EXPORT_SYMBOL(set_page_dirty);
678
679 /*
680  * set_page_dirty() is racy if the caller has no reference against
681  * page->mapping->host, and if the page is unlocked.  This is because another
682  * CPU could truncate the page off the mapping and then free the mapping.
683  *
684  * Usually, the page _is_ locked, or the caller is a user-space process which
685  * holds a reference on the inode by having an open file.
686  *
687  * In other cases, the page should be locked before running set_page_dirty().
688  */
689 int set_page_dirty_lock(struct page *page)
690 {
691         int ret;
692
693         lock_page(page);
694         ret = set_page_dirty(page);
695         unlock_page(page);
696         return ret;
697 }
698 EXPORT_SYMBOL(set_page_dirty_lock);
699
700 /*
701  * Clear a page's dirty flag, while caring for dirty memory accounting. 
702  * Returns true if the page was previously dirty.
703  */
704 int test_clear_page_dirty(struct page *page)
705 {
706         struct address_space *mapping = page_mapping(page);
707         unsigned long flags;
708
709         if (mapping) {
710                 write_lock_irqsave(&mapping->tree_lock, flags);
711                 if (TestClearPageDirty(page)) {
712                         radix_tree_tag_clear(&mapping->page_tree,
713                                                 page_index(page),
714                                                 PAGECACHE_TAG_DIRTY);
715                         if (mapping_cap_account_dirty(mapping))
716                                 __dec_zone_page_state(page, NR_FILE_DIRTY);
717                         write_unlock_irqrestore(&mapping->tree_lock, flags);
718                         return 1;
719                 }
720                 write_unlock_irqrestore(&mapping->tree_lock, flags);
721                 return 0;
722         }
723         return TestClearPageDirty(page);
724 }
725 EXPORT_SYMBOL(test_clear_page_dirty);
726
727 /*
728  * Clear a page's dirty flag, while caring for dirty memory accounting.
729  * Returns true if the page was previously dirty.
730  *
731  * This is for preparing to put the page under writeout.  We leave the page
732  * tagged as dirty in the radix tree so that a concurrent write-for-sync
733  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
734  * implementation will run either set_page_writeback() or set_page_dirty(),
735  * at which stage we bring the page's dirty flag and radix-tree dirty tag
736  * back into sync.
737  *
738  * This incoherency between the page's dirty flag and radix-tree tag is
739  * unfortunate, but it only exists while the page is locked.
740  */
741 int clear_page_dirty_for_io(struct page *page)
742 {
743         struct address_space *mapping = page_mapping(page);
744
745         if (mapping) {
746                 if (TestClearPageDirty(page)) {
747                         if (mapping_cap_account_dirty(mapping))
748                                 dec_zone_page_state(page, NR_FILE_DIRTY);
749                         return 1;
750                 }
751                 return 0;
752         }
753         return TestClearPageDirty(page);
754 }
755 EXPORT_SYMBOL(clear_page_dirty_for_io);
756
757 int test_clear_page_writeback(struct page *page)
758 {
759         struct address_space *mapping = page_mapping(page);
760         int ret;
761
762         if (mapping) {
763                 unsigned long flags;
764
765                 write_lock_irqsave(&mapping->tree_lock, flags);
766                 ret = TestClearPageWriteback(page);
767                 if (ret)
768                         radix_tree_tag_clear(&mapping->page_tree,
769                                                 page_index(page),
770                                                 PAGECACHE_TAG_WRITEBACK);
771                 write_unlock_irqrestore(&mapping->tree_lock, flags);
772         } else {
773                 ret = TestClearPageWriteback(page);
774         }
775         return ret;
776 }
777
778 int test_set_page_writeback(struct page *page)
779 {
780         struct address_space *mapping = page_mapping(page);
781         int ret;
782
783         if (mapping) {
784                 unsigned long flags;
785
786                 write_lock_irqsave(&mapping->tree_lock, flags);
787                 ret = TestSetPageWriteback(page);
788                 if (!ret)
789                         radix_tree_tag_set(&mapping->page_tree,
790                                                 page_index(page),
791                                                 PAGECACHE_TAG_WRITEBACK);
792                 if (!PageDirty(page))
793                         radix_tree_tag_clear(&mapping->page_tree,
794                                                 page_index(page),
795                                                 PAGECACHE_TAG_DIRTY);
796                 write_unlock_irqrestore(&mapping->tree_lock, flags);
797         } else {
798                 ret = TestSetPageWriteback(page);
799         }
800         return ret;
801
802 }
803 EXPORT_SYMBOL(test_set_page_writeback);
804
805 /*
806  * Return true if any of the pages in the mapping are marged with the
807  * passed tag.
808  */
809 int mapping_tagged(struct address_space *mapping, int tag)
810 {
811         unsigned long flags;
812         int ret;
813
814         read_lock_irqsave(&mapping->tree_lock, flags);
815         ret = radix_tree_tagged(&mapping->page_tree, tag);
816         read_unlock_irqrestore(&mapping->tree_lock, flags);
817         return ret;
818 }
819 EXPORT_SYMBOL(mapping_tagged);