net: moxa: Fix module autoload for OF platform driver
[pandora-kernel.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
17 struct backing_dev_info noop_backing_dev_info = {
18         .name           = "noop",
19         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
20 };
21 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
22
23 static struct class *bdi_class;
24
25 /*
26  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
27  * locking.
28  */
29 DEFINE_SPINLOCK(bdi_lock);
30 LIST_HEAD(bdi_list);
31
32 /* bdi_wq serves all asynchronous writeback tasks */
33 struct workqueue_struct *bdi_wq;
34
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38
39 static struct dentry *bdi_debug_root;
40
41 static void bdi_debug_init(void)
42 {
43         bdi_debug_root = debugfs_create_dir("bdi", NULL);
44 }
45
46 static int bdi_debug_stats_show(struct seq_file *m, void *v)
47 {
48         struct backing_dev_info *bdi = m->private;
49         struct bdi_writeback *wb = &bdi->wb;
50         unsigned long background_thresh;
51         unsigned long dirty_thresh;
52         unsigned long wb_thresh;
53         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
54         struct inode *inode;
55
56         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
57         spin_lock(&wb->list_lock);
58         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
59                 nr_dirty++;
60         list_for_each_entry(inode, &wb->b_io, i_io_list)
61                 nr_io++;
62         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
63                 nr_more_io++;
64         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
65                 if (inode->i_state & I_DIRTY_TIME)
66                         nr_dirty_time++;
67         spin_unlock(&wb->list_lock);
68
69         global_dirty_limits(&background_thresh, &dirty_thresh);
70         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
71
72 #define K(x) ((x) << (PAGE_SHIFT - 10))
73         seq_printf(m,
74                    "BdiWriteback:       %10lu kB\n"
75                    "BdiReclaimable:     %10lu kB\n"
76                    "BdiDirtyThresh:     %10lu kB\n"
77                    "DirtyThresh:        %10lu kB\n"
78                    "BackgroundThresh:   %10lu kB\n"
79                    "BdiDirtied:         %10lu kB\n"
80                    "BdiWritten:         %10lu kB\n"
81                    "BdiWriteBandwidth:  %10lu kBps\n"
82                    "b_dirty:            %10lu\n"
83                    "b_io:               %10lu\n"
84                    "b_more_io:          %10lu\n"
85                    "b_dirty_time:       %10lu\n"
86                    "bdi_list:           %10u\n"
87                    "state:              %10lx\n",
88                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
89                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
90                    K(wb_thresh),
91                    K(dirty_thresh),
92                    K(background_thresh),
93                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
94                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
95                    (unsigned long) K(wb->write_bandwidth),
96                    nr_dirty,
97                    nr_io,
98                    nr_more_io,
99                    nr_dirty_time,
100                    !list_empty(&bdi->bdi_list), bdi->wb.state);
101 #undef K
102
103         return 0;
104 }
105
106 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
107 {
108         return single_open(file, bdi_debug_stats_show, inode->i_private);
109 }
110
111 static const struct file_operations bdi_debug_stats_fops = {
112         .open           = bdi_debug_stats_open,
113         .read           = seq_read,
114         .llseek         = seq_lseek,
115         .release        = single_release,
116 };
117
118 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
119 {
120         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
121         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
122                                                bdi, &bdi_debug_stats_fops);
123 }
124
125 static void bdi_debug_unregister(struct backing_dev_info *bdi)
126 {
127         debugfs_remove(bdi->debug_stats);
128         debugfs_remove(bdi->debug_dir);
129 }
130 #else
131 static inline void bdi_debug_init(void)
132 {
133 }
134 static inline void bdi_debug_register(struct backing_dev_info *bdi,
135                                       const char *name)
136 {
137 }
138 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
139 {
140 }
141 #endif
142
143 static ssize_t read_ahead_kb_store(struct device *dev,
144                                   struct device_attribute *attr,
145                                   const char *buf, size_t count)
146 {
147         struct backing_dev_info *bdi = dev_get_drvdata(dev);
148         unsigned long read_ahead_kb;
149         ssize_t ret;
150
151         ret = kstrtoul(buf, 10, &read_ahead_kb);
152         if (ret < 0)
153                 return ret;
154
155         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
156
157         return count;
158 }
159
160 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
161
162 #define BDI_SHOW(name, expr)                                            \
163 static ssize_t name##_show(struct device *dev,                          \
164                            struct device_attribute *attr, char *page)   \
165 {                                                                       \
166         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
167                                                                         \
168         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
169 }                                                                       \
170 static DEVICE_ATTR_RW(name);
171
172 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
173
174 static ssize_t min_ratio_store(struct device *dev,
175                 struct device_attribute *attr, const char *buf, size_t count)
176 {
177         struct backing_dev_info *bdi = dev_get_drvdata(dev);
178         unsigned int ratio;
179         ssize_t ret;
180
181         ret = kstrtouint(buf, 10, &ratio);
182         if (ret < 0)
183                 return ret;
184
185         ret = bdi_set_min_ratio(bdi, ratio);
186         if (!ret)
187                 ret = count;
188
189         return ret;
190 }
191 BDI_SHOW(min_ratio, bdi->min_ratio)
192
193 static ssize_t max_ratio_store(struct device *dev,
194                 struct device_attribute *attr, const char *buf, size_t count)
195 {
196         struct backing_dev_info *bdi = dev_get_drvdata(dev);
197         unsigned int ratio;
198         ssize_t ret;
199
200         ret = kstrtouint(buf, 10, &ratio);
201         if (ret < 0)
202                 return ret;
203
204         ret = bdi_set_max_ratio(bdi, ratio);
205         if (!ret)
206                 ret = count;
207
208         return ret;
209 }
210 BDI_SHOW(max_ratio, bdi->max_ratio)
211
212 static ssize_t stable_pages_required_show(struct device *dev,
213                                           struct device_attribute *attr,
214                                           char *page)
215 {
216         struct backing_dev_info *bdi = dev_get_drvdata(dev);
217
218         return snprintf(page, PAGE_SIZE-1, "%d\n",
219                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
220 }
221 static DEVICE_ATTR_RO(stable_pages_required);
222
223 static struct attribute *bdi_dev_attrs[] = {
224         &dev_attr_read_ahead_kb.attr,
225         &dev_attr_min_ratio.attr,
226         &dev_attr_max_ratio.attr,
227         &dev_attr_stable_pages_required.attr,
228         NULL,
229 };
230 ATTRIBUTE_GROUPS(bdi_dev);
231
232 static __init int bdi_class_init(void)
233 {
234         bdi_class = class_create(THIS_MODULE, "bdi");
235         if (IS_ERR(bdi_class))
236                 return PTR_ERR(bdi_class);
237
238         bdi_class->dev_groups = bdi_dev_groups;
239         bdi_debug_init();
240         return 0;
241 }
242 postcore_initcall(bdi_class_init);
243
244 static int __init default_bdi_init(void)
245 {
246         int err;
247
248         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
249                                               WQ_UNBOUND | WQ_SYSFS, 0);
250         if (!bdi_wq)
251                 return -ENOMEM;
252
253         err = bdi_init(&noop_backing_dev_info);
254
255         return err;
256 }
257 subsys_initcall(default_bdi_init);
258
259 /*
260  * This function is used when the first inode for this wb is marked dirty. It
261  * wakes-up the corresponding bdi thread which should then take care of the
262  * periodic background write-out of dirty inodes. Since the write-out would
263  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
264  * set up a timer which wakes the bdi thread up later.
265  *
266  * Note, we wouldn't bother setting up the timer, but this function is on the
267  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
268  * by delaying the wake-up.
269  *
270  * We have to be careful not to postpone flush work if it is scheduled for
271  * earlier. Thus we use queue_delayed_work().
272  */
273 void wb_wakeup_delayed(struct bdi_writeback *wb)
274 {
275         unsigned long timeout;
276
277         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
278         spin_lock_bh(&wb->work_lock);
279         if (test_bit(WB_registered, &wb->state))
280                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
281         spin_unlock_bh(&wb->work_lock);
282 }
283
284 /*
285  * Initial write bandwidth: 100 MB/s
286  */
287 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
288
289 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
290                    int blkcg_id, gfp_t gfp)
291 {
292         int i, err;
293
294         memset(wb, 0, sizeof(*wb));
295
296         wb->bdi = bdi;
297         wb->last_old_flush = jiffies;
298         INIT_LIST_HEAD(&wb->b_dirty);
299         INIT_LIST_HEAD(&wb->b_io);
300         INIT_LIST_HEAD(&wb->b_more_io);
301         INIT_LIST_HEAD(&wb->b_dirty_time);
302         spin_lock_init(&wb->list_lock);
303
304         wb->bw_time_stamp = jiffies;
305         wb->balanced_dirty_ratelimit = INIT_BW;
306         wb->dirty_ratelimit = INIT_BW;
307         wb->write_bandwidth = INIT_BW;
308         wb->avg_write_bandwidth = INIT_BW;
309
310         spin_lock_init(&wb->work_lock);
311         INIT_LIST_HEAD(&wb->work_list);
312         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
313
314         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
315         if (!wb->congested)
316                 return -ENOMEM;
317
318         err = fprop_local_init_percpu(&wb->completions, gfp);
319         if (err)
320                 goto out_put_cong;
321
322         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
323                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
324                 if (err)
325                         goto out_destroy_stat;
326         }
327
328         return 0;
329
330 out_destroy_stat:
331         while (--i)
332                 percpu_counter_destroy(&wb->stat[i]);
333         fprop_local_destroy_percpu(&wb->completions);
334 out_put_cong:
335         wb_congested_put(wb->congested);
336         return err;
337 }
338
339 /*
340  * Remove bdi from the global list and shutdown any threads we have running
341  */
342 static void wb_shutdown(struct bdi_writeback *wb)
343 {
344         /* Make sure nobody queues further work */
345         spin_lock_bh(&wb->work_lock);
346         if (!test_and_clear_bit(WB_registered, &wb->state)) {
347                 spin_unlock_bh(&wb->work_lock);
348                 return;
349         }
350         spin_unlock_bh(&wb->work_lock);
351
352         /*
353          * Drain work list and shutdown the delayed_work.  !WB_registered
354          * tells wb_workfn() that @wb is dying and its work_list needs to
355          * be drained no matter what.
356          */
357         mod_delayed_work(bdi_wq, &wb->dwork, 0);
358         flush_delayed_work(&wb->dwork);
359         WARN_ON(!list_empty(&wb->work_list));
360 }
361
362 static void wb_exit(struct bdi_writeback *wb)
363 {
364         int i;
365
366         WARN_ON(delayed_work_pending(&wb->dwork));
367
368         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
369                 percpu_counter_destroy(&wb->stat[i]);
370
371         fprop_local_destroy_percpu(&wb->completions);
372         wb_congested_put(wb->congested);
373 }
374
375 #ifdef CONFIG_CGROUP_WRITEBACK
376
377 #include <linux/memcontrol.h>
378
379 /*
380  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
381  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
382  * protected.  cgwb_release_wait is used to wait for the completion of cgwb
383  * releases from bdi destruction path.
384  */
385 static DEFINE_SPINLOCK(cgwb_lock);
386 static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
387
388 /**
389  * wb_congested_get_create - get or create a wb_congested
390  * @bdi: associated bdi
391  * @blkcg_id: ID of the associated blkcg
392  * @gfp: allocation mask
393  *
394  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
395  * The returned wb_congested has its reference count incremented.  Returns
396  * NULL on failure.
397  */
398 struct bdi_writeback_congested *
399 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
400 {
401         struct bdi_writeback_congested *new_congested = NULL, *congested;
402         struct rb_node **node, *parent;
403         unsigned long flags;
404 retry:
405         spin_lock_irqsave(&cgwb_lock, flags);
406
407         node = &bdi->cgwb_congested_tree.rb_node;
408         parent = NULL;
409
410         while (*node != NULL) {
411                 parent = *node;
412                 congested = container_of(parent, struct bdi_writeback_congested,
413                                          rb_node);
414                 if (congested->blkcg_id < blkcg_id)
415                         node = &parent->rb_left;
416                 else if (congested->blkcg_id > blkcg_id)
417                         node = &parent->rb_right;
418                 else
419                         goto found;
420         }
421
422         if (new_congested) {
423                 /* !found and storage for new one already allocated, insert */
424                 congested = new_congested;
425                 new_congested = NULL;
426                 rb_link_node(&congested->rb_node, parent, node);
427                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
428                 goto found;
429         }
430
431         spin_unlock_irqrestore(&cgwb_lock, flags);
432
433         /* allocate storage for new one and retry */
434         new_congested = kzalloc(sizeof(*new_congested), gfp);
435         if (!new_congested)
436                 return NULL;
437
438         atomic_set(&new_congested->refcnt, 0);
439         new_congested->bdi = bdi;
440         new_congested->blkcg_id = blkcg_id;
441         goto retry;
442
443 found:
444         atomic_inc(&congested->refcnt);
445         spin_unlock_irqrestore(&cgwb_lock, flags);
446         kfree(new_congested);
447         return congested;
448 }
449
450 /**
451  * wb_congested_put - put a wb_congested
452  * @congested: wb_congested to put
453  *
454  * Put @congested and destroy it if the refcnt reaches zero.
455  */
456 void wb_congested_put(struct bdi_writeback_congested *congested)
457 {
458         unsigned long flags;
459
460         local_irq_save(flags);
461         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
462                 local_irq_restore(flags);
463                 return;
464         }
465
466         /* bdi might already have been destroyed leaving @congested unlinked */
467         if (congested->bdi) {
468                 rb_erase(&congested->rb_node,
469                          &congested->bdi->cgwb_congested_tree);
470                 congested->bdi = NULL;
471         }
472
473         spin_unlock_irqrestore(&cgwb_lock, flags);
474         kfree(congested);
475 }
476
477 static void cgwb_release_workfn(struct work_struct *work)
478 {
479         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
480                                                 release_work);
481         struct backing_dev_info *bdi = wb->bdi;
482
483         wb_shutdown(wb);
484
485         css_put(wb->memcg_css);
486         css_put(wb->blkcg_css);
487
488         fprop_local_destroy_percpu(&wb->memcg_completions);
489         percpu_ref_exit(&wb->refcnt);
490         wb_exit(wb);
491         kfree_rcu(wb, rcu);
492
493         if (atomic_dec_and_test(&bdi->usage_cnt))
494                 wake_up_all(&cgwb_release_wait);
495 }
496
497 static void cgwb_release(struct percpu_ref *refcnt)
498 {
499         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
500                                                 refcnt);
501         schedule_work(&wb->release_work);
502 }
503
504 static void cgwb_kill(struct bdi_writeback *wb)
505 {
506         lockdep_assert_held(&cgwb_lock);
507
508         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
509         list_del(&wb->memcg_node);
510         list_del(&wb->blkcg_node);
511         percpu_ref_kill(&wb->refcnt);
512 }
513
514 static int cgwb_create(struct backing_dev_info *bdi,
515                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
516 {
517         struct mem_cgroup *memcg;
518         struct cgroup_subsys_state *blkcg_css;
519         struct blkcg *blkcg;
520         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
521         struct bdi_writeback *wb;
522         unsigned long flags;
523         int ret = 0;
524
525         memcg = mem_cgroup_from_css(memcg_css);
526         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
527         blkcg = css_to_blkcg(blkcg_css);
528         memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
529         blkcg_cgwb_list = &blkcg->cgwb_list;
530
531         /* look up again under lock and discard on blkcg mismatch */
532         spin_lock_irqsave(&cgwb_lock, flags);
533         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
534         if (wb && wb->blkcg_css != blkcg_css) {
535                 cgwb_kill(wb);
536                 wb = NULL;
537         }
538         spin_unlock_irqrestore(&cgwb_lock, flags);
539         if (wb)
540                 goto out_put;
541
542         /* need to create a new one */
543         wb = kmalloc(sizeof(*wb), gfp);
544         if (!wb)
545                 return -ENOMEM;
546
547         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
548         if (ret)
549                 goto err_free;
550
551         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
552         if (ret)
553                 goto err_wb_exit;
554
555         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
556         if (ret)
557                 goto err_ref_exit;
558
559         wb->memcg_css = memcg_css;
560         wb->blkcg_css = blkcg_css;
561         INIT_WORK(&wb->release_work, cgwb_release_workfn);
562         set_bit(WB_registered, &wb->state);
563
564         /*
565          * The root wb determines the registered state of the whole bdi and
566          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
567          * whether they're still online.  Don't link @wb if any is dead.
568          * See wb_memcg_offline() and wb_blkcg_offline().
569          */
570         ret = -ENODEV;
571         spin_lock_irqsave(&cgwb_lock, flags);
572         if (test_bit(WB_registered, &bdi->wb.state) &&
573             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
574                 /* we might have raced another instance of this function */
575                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
576                 if (!ret) {
577                         atomic_inc(&bdi->usage_cnt);
578                         list_add(&wb->memcg_node, memcg_cgwb_list);
579                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
580                         css_get(memcg_css);
581                         css_get(blkcg_css);
582                 }
583         }
584         spin_unlock_irqrestore(&cgwb_lock, flags);
585         if (ret) {
586                 if (ret == -EEXIST)
587                         ret = 0;
588                 goto err_fprop_exit;
589         }
590         goto out_put;
591
592 err_fprop_exit:
593         fprop_local_destroy_percpu(&wb->memcg_completions);
594 err_ref_exit:
595         percpu_ref_exit(&wb->refcnt);
596 err_wb_exit:
597         wb_exit(wb);
598 err_free:
599         kfree(wb);
600 out_put:
601         css_put(blkcg_css);
602         return ret;
603 }
604
605 /**
606  * wb_get_create - get wb for a given memcg, create if necessary
607  * @bdi: target bdi
608  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
609  * @gfp: allocation mask to use
610  *
611  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
612  * create one.  The returned wb has its refcount incremented.
613  *
614  * This function uses css_get() on @memcg_css and thus expects its refcnt
615  * to be positive on invocation.  IOW, rcu_read_lock() protection on
616  * @memcg_css isn't enough.  try_get it before calling this function.
617  *
618  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
619  * memcg on the default hierarchy, memcg association is guaranteed to be
620  * more specific (equal or descendant to the associated blkcg) and thus can
621  * identify both the memcg and blkcg associations.
622  *
623  * Because the blkcg associated with a memcg may change as blkcg is enabled
624  * and disabled closer to root in the hierarchy, each wb keeps track of
625  * both the memcg and blkcg associated with it and verifies the blkcg on
626  * each lookup.  On mismatch, the existing wb is discarded and a new one is
627  * created.
628  */
629 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
630                                     struct cgroup_subsys_state *memcg_css,
631                                     gfp_t gfp)
632 {
633         struct bdi_writeback *wb;
634
635         might_sleep_if(gfp & __GFP_WAIT);
636
637         if (!memcg_css->parent)
638                 return &bdi->wb;
639
640         do {
641                 rcu_read_lock();
642                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
643                 if (wb) {
644                         struct cgroup_subsys_state *blkcg_css;
645
646                         /* see whether the blkcg association has changed */
647                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
648                                                      &io_cgrp_subsys);
649                         if (unlikely(wb->blkcg_css != blkcg_css ||
650                                      !wb_tryget(wb)))
651                                 wb = NULL;
652                         css_put(blkcg_css);
653                 }
654                 rcu_read_unlock();
655         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
656
657         return wb;
658 }
659
660 static int cgwb_bdi_init(struct backing_dev_info *bdi)
661 {
662         int ret;
663
664         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
665         bdi->cgwb_congested_tree = RB_ROOT;
666         atomic_set(&bdi->usage_cnt, 1);
667
668         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
669         if (!ret) {
670                 bdi->wb.memcg_css = mem_cgroup_root_css;
671                 bdi->wb.blkcg_css = blkcg_root_css;
672         }
673         return ret;
674 }
675
676 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
677 {
678         struct radix_tree_iter iter;
679         struct bdi_writeback_congested *congested, *congested_n;
680         void **slot;
681
682         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
683
684         spin_lock_irq(&cgwb_lock);
685
686         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
687                 cgwb_kill(*slot);
688
689         rbtree_postorder_for_each_entry_safe(congested, congested_n,
690                                         &bdi->cgwb_congested_tree, rb_node) {
691                 rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
692                 congested->bdi = NULL;  /* mark @congested unlinked */
693         }
694
695         spin_unlock_irq(&cgwb_lock);
696
697         /*
698          * All cgwb's and their congested states must be shutdown and
699          * released before returning.  Drain the usage counter to wait for
700          * all cgwb's and cgwb_congested's ever created on @bdi.
701          */
702         atomic_dec(&bdi->usage_cnt);
703         wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
704 }
705
706 /**
707  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
708  * @memcg: memcg being offlined
709  *
710  * Also prevents creation of any new wb's associated with @memcg.
711  */
712 void wb_memcg_offline(struct mem_cgroup *memcg)
713 {
714         LIST_HEAD(to_destroy);
715         struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
716         struct bdi_writeback *wb, *next;
717
718         spin_lock_irq(&cgwb_lock);
719         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
720                 cgwb_kill(wb);
721         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
722         spin_unlock_irq(&cgwb_lock);
723 }
724
725 /**
726  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
727  * @blkcg: blkcg being offlined
728  *
729  * Also prevents creation of any new wb's associated with @blkcg.
730  */
731 void wb_blkcg_offline(struct blkcg *blkcg)
732 {
733         LIST_HEAD(to_destroy);
734         struct bdi_writeback *wb, *next;
735
736         spin_lock_irq(&cgwb_lock);
737         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
738                 cgwb_kill(wb);
739         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
740         spin_unlock_irq(&cgwb_lock);
741 }
742
743 #else   /* CONFIG_CGROUP_WRITEBACK */
744
745 static int cgwb_bdi_init(struct backing_dev_info *bdi)
746 {
747         int err;
748
749         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
750         if (!bdi->wb_congested)
751                 return -ENOMEM;
752
753         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
754         if (err) {
755                 kfree(bdi->wb_congested);
756                 return err;
757         }
758         return 0;
759 }
760
761 static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
762
763 #endif  /* CONFIG_CGROUP_WRITEBACK */
764
765 int bdi_init(struct backing_dev_info *bdi)
766 {
767         bdi->dev = NULL;
768
769         bdi->min_ratio = 0;
770         bdi->max_ratio = 100;
771         bdi->max_prop_frac = FPROP_FRAC_BASE;
772         INIT_LIST_HEAD(&bdi->bdi_list);
773         init_waitqueue_head(&bdi->wb_waitq);
774
775         return cgwb_bdi_init(bdi);
776 }
777 EXPORT_SYMBOL(bdi_init);
778
779 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
780                 const char *fmt, ...)
781 {
782         va_list args;
783         struct device *dev;
784
785         if (bdi->dev)   /* The driver needs to use separate queues per device */
786                 return 0;
787
788         va_start(args, fmt);
789         dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
790         va_end(args);
791         if (IS_ERR(dev))
792                 return PTR_ERR(dev);
793
794         bdi->dev = dev;
795
796         bdi_debug_register(bdi, dev_name(dev));
797         set_bit(WB_registered, &bdi->wb.state);
798
799         spin_lock_bh(&bdi_lock);
800         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
801         spin_unlock_bh(&bdi_lock);
802
803         trace_writeback_bdi_register(bdi);
804         return 0;
805 }
806 EXPORT_SYMBOL(bdi_register);
807
808 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
809 {
810         return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
811 }
812 EXPORT_SYMBOL(bdi_register_dev);
813
814 /*
815  * Remove bdi from bdi_list, and ensure that it is no longer visible
816  */
817 static void bdi_remove_from_list(struct backing_dev_info *bdi)
818 {
819         spin_lock_bh(&bdi_lock);
820         list_del_rcu(&bdi->bdi_list);
821         spin_unlock_bh(&bdi_lock);
822
823         synchronize_rcu_expedited();
824 }
825
826 void bdi_destroy(struct backing_dev_info *bdi)
827 {
828         /* make sure nobody finds us on the bdi_list anymore */
829         bdi_remove_from_list(bdi);
830         wb_shutdown(&bdi->wb);
831         cgwb_bdi_destroy(bdi);
832
833         if (bdi->dev) {
834                 bdi_debug_unregister(bdi);
835                 device_unregister(bdi->dev);
836                 bdi->dev = NULL;
837         }
838
839         wb_exit(&bdi->wb);
840 }
841 EXPORT_SYMBOL(bdi_destroy);
842
843 /*
844  * For use from filesystems to quickly init and register a bdi associated
845  * with dirty writeback
846  */
847 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
848 {
849         int err;
850
851         bdi->name = name;
852         bdi->capabilities = 0;
853         err = bdi_init(bdi);
854         if (err)
855                 return err;
856
857         err = bdi_register(bdi, NULL, "%.28s-%ld", name,
858                            atomic_long_inc_return(&bdi_seq));
859         if (err) {
860                 bdi_destroy(bdi);
861                 return err;
862         }
863
864         return 0;
865 }
866 EXPORT_SYMBOL(bdi_setup_and_register);
867
868 static wait_queue_head_t congestion_wqh[2] = {
869                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
870                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
871         };
872 static atomic_t nr_wb_congested[2];
873
874 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
875 {
876         wait_queue_head_t *wqh = &congestion_wqh[sync];
877         enum wb_state bit;
878
879         bit = sync ? WB_sync_congested : WB_async_congested;
880         if (test_and_clear_bit(bit, &congested->state))
881                 atomic_dec(&nr_wb_congested[sync]);
882         smp_mb__after_atomic();
883         if (waitqueue_active(wqh))
884                 wake_up(wqh);
885 }
886 EXPORT_SYMBOL(clear_wb_congested);
887
888 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
889 {
890         enum wb_state bit;
891
892         bit = sync ? WB_sync_congested : WB_async_congested;
893         if (!test_and_set_bit(bit, &congested->state))
894                 atomic_inc(&nr_wb_congested[sync]);
895 }
896 EXPORT_SYMBOL(set_wb_congested);
897
898 /**
899  * congestion_wait - wait for a backing_dev to become uncongested
900  * @sync: SYNC or ASYNC IO
901  * @timeout: timeout in jiffies
902  *
903  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
904  * write congestion.  If no backing_devs are congested then just wait for the
905  * next write to be completed.
906  */
907 long congestion_wait(int sync, long timeout)
908 {
909         long ret;
910         unsigned long start = jiffies;
911         DEFINE_WAIT(wait);
912         wait_queue_head_t *wqh = &congestion_wqh[sync];
913
914         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
915         ret = io_schedule_timeout(timeout);
916         finish_wait(wqh, &wait);
917
918         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
919                                         jiffies_to_usecs(jiffies - start));
920
921         return ret;
922 }
923 EXPORT_SYMBOL(congestion_wait);
924
925 /**
926  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
927  * @zone: A zone to check if it is heavily congested
928  * @sync: SYNC or ASYNC IO
929  * @timeout: timeout in jiffies
930  *
931  * In the event of a congested backing_dev (any backing_dev) and the given
932  * @zone has experienced recent congestion, this waits for up to @timeout
933  * jiffies for either a BDI to exit congestion of the given @sync queue
934  * or a write to complete.
935  *
936  * In the absence of zone congestion, cond_resched() is called to yield
937  * the processor if necessary but otherwise does not sleep.
938  *
939  * The return value is 0 if the sleep is for the full timeout. Otherwise,
940  * it is the number of jiffies that were still remaining when the function
941  * returned. return_value == timeout implies the function did not sleep.
942  */
943 long wait_iff_congested(struct zone *zone, int sync, long timeout)
944 {
945         long ret;
946         unsigned long start = jiffies;
947         DEFINE_WAIT(wait);
948         wait_queue_head_t *wqh = &congestion_wqh[sync];
949
950         /*
951          * If there is no congestion, or heavy congestion is not being
952          * encountered in the current zone, yield if necessary instead
953          * of sleeping on the congestion queue
954          */
955         if (atomic_read(&nr_wb_congested[sync]) == 0 ||
956             !test_bit(ZONE_CONGESTED, &zone->flags)) {
957                 cond_resched();
958
959                 /* In case we scheduled, work out time remaining */
960                 ret = timeout - (jiffies - start);
961                 if (ret < 0)
962                         ret = 0;
963
964                 goto out;
965         }
966
967         /* Sleep until uncongested or a write happens */
968         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
969         ret = io_schedule_timeout(timeout);
970         finish_wait(wqh, &wait);
971
972 out:
973         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
974                                         jiffies_to_usecs(jiffies - start));
975
976         return ret;
977 }
978 EXPORT_SYMBOL(wait_iff_congested);
979
980 int pdflush_proc_obsolete(struct ctl_table *table, int write,
981                         void __user *buffer, size_t *lenp, loff_t *ppos)
982 {
983         char kbuf[] = "0\n";
984
985         if (*ppos || *lenp < sizeof(kbuf)) {
986                 *lenp = 0;
987                 return 0;
988         }
989
990         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
991                 return -EFAULT;
992         printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
993                         table->procname);
994
995         *lenp = 2;
996         *ppos += *lenp;
997         return 2;
998 }