md/raid10: ensure device failure recorded before write request returns.
[pandora-kernel.git] / drivers / md / dm-bufio.c
1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8
9 #include "dm-bufio.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/vmalloc.h>
16 #include <linux/shrinker.h>
17 #include <linux/module.h>
18 #include <linux/rbtree.h>
19
20 #define DM_MSG_PREFIX "bufio"
21
22 /*
23  * Memory management policy:
24  *      Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
25  *      or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
26  *      Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
27  *      Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28  *      dirty buffers.
29  */
30 #define DM_BUFIO_MIN_BUFFERS            8
31
32 #define DM_BUFIO_MEMORY_PERCENT         2
33 #define DM_BUFIO_VMALLOC_PERCENT        25
34 #define DM_BUFIO_WRITEBACK_PERCENT      75
35
36 /*
37  * Check buffer ages in this interval (seconds)
38  */
39 #define DM_BUFIO_WORK_TIMER_SECS        30
40
41 /*
42  * Free buffers when they are older than this (seconds)
43  */
44 #define DM_BUFIO_DEFAULT_AGE_SECS       300
45
46 /*
47  * The nr of bytes of cached data to keep around.
48  */
49 #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
50
51 /*
52  * The number of bvec entries that are embedded directly in the buffer.
53  * If the chunk size is larger, dm-io is used to do the io.
54  */
55 #define DM_BUFIO_INLINE_VECS            16
56
57 /*
58  * Don't try to use kmem_cache_alloc for blocks larger than this.
59  * For explanation, see alloc_buffer_data below.
60  */
61 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT  (PAGE_SIZE >> 1)
62 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT   (PAGE_SIZE << (MAX_ORDER - 1))
63
64 /*
65  * dm_buffer->list_mode
66  */
67 #define LIST_CLEAN      0
68 #define LIST_DIRTY      1
69 #define LIST_SIZE       2
70
71 /*
72  * Linking of buffers:
73  *      All buffers are linked to cache_hash with their hash_list field.
74  *
75  *      Clean buffers that are not being written (B_WRITING not set)
76  *      are linked to lru[LIST_CLEAN] with their lru_list field.
77  *
78  *      Dirty and clean buffers that are being written are linked to
79  *      lru[LIST_DIRTY] with their lru_list field. When the write
80  *      finishes, the buffer cannot be relinked immediately (because we
81  *      are in an interrupt context and relinking requires process
82  *      context), so some clean-not-writing buffers can be held on
83  *      dirty_lru too.  They are later added to lru in the process
84  *      context.
85  */
86 struct dm_bufio_client {
87         struct mutex lock;
88
89         struct list_head lru[LIST_SIZE];
90         unsigned long n_buffers[LIST_SIZE];
91
92         struct block_device *bdev;
93         unsigned block_size;
94         unsigned char sectors_per_block_bits;
95         unsigned char pages_per_block_bits;
96         unsigned char blocks_per_page_bits;
97         unsigned aux_size;
98         void (*alloc_callback)(struct dm_buffer *);
99         void (*write_callback)(struct dm_buffer *);
100
101         struct dm_io_client *dm_io;
102
103         struct list_head reserved_buffers;
104         unsigned need_reserved_buffers;
105
106         unsigned minimum_buffers;
107
108         struct rb_root buffer_tree;
109         wait_queue_head_t free_buffer_wait;
110
111         int async_write_error;
112
113         struct list_head client_list;
114         struct shrinker shrinker;
115 };
116
117 /*
118  * Buffer state bits.
119  */
120 #define B_READING       0
121 #define B_WRITING       1
122 #define B_DIRTY         2
123
124 /*
125  * Describes how the block was allocated:
126  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
127  * See the comment at alloc_buffer_data.
128  */
129 enum data_mode {
130         DATA_MODE_SLAB = 0,
131         DATA_MODE_GET_FREE_PAGES = 1,
132         DATA_MODE_VMALLOC = 2,
133         DATA_MODE_LIMIT = 3
134 };
135
136 struct dm_buffer {
137         struct rb_node node;
138         struct list_head lru_list;
139         sector_t block;
140         void *data;
141         enum data_mode data_mode;
142         unsigned char list_mode;                /* LIST_* */
143         unsigned hold_count;
144         int read_error;
145         int write_error;
146         unsigned long state;
147         unsigned long last_accessed;
148         struct dm_bufio_client *c;
149         struct list_head write_list;
150         struct bio bio;
151         struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
152 };
153
154 /*----------------------------------------------------------------*/
155
156 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
157 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
158
159 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
160 {
161         unsigned ret = c->blocks_per_page_bits - 1;
162
163         BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
164
165         return ret;
166 }
167
168 #define DM_BUFIO_CACHE(c)       (dm_bufio_caches[dm_bufio_cache_index(c)])
169 #define DM_BUFIO_CACHE_NAME(c)  (dm_bufio_cache_names[dm_bufio_cache_index(c)])
170
171 #define dm_bufio_in_request()   (!!current->bio_list)
172
173 static void dm_bufio_lock(struct dm_bufio_client *c)
174 {
175         mutex_lock_nested(&c->lock, dm_bufio_in_request());
176 }
177
178 static int dm_bufio_trylock(struct dm_bufio_client *c)
179 {
180         return mutex_trylock(&c->lock);
181 }
182
183 static void dm_bufio_unlock(struct dm_bufio_client *c)
184 {
185         mutex_unlock(&c->lock);
186 }
187
188 /*
189  * FIXME Move to sched.h?
190  */
191 #ifdef CONFIG_PREEMPT_VOLUNTARY
192 #  define dm_bufio_cond_resched()               \
193 do {                                            \
194         if (unlikely(need_resched()))           \
195                 _cond_resched();                \
196 } while (0)
197 #else
198 #  define dm_bufio_cond_resched()                do { } while (0)
199 #endif
200
201 /*----------------------------------------------------------------*/
202
203 /*
204  * Default cache size: available memory divided by the ratio.
205  */
206 static unsigned long dm_bufio_default_cache_size;
207
208 /*
209  * Total cache size set by the user.
210  */
211 static unsigned long dm_bufio_cache_size;
212
213 /*
214  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
215  * at any time.  If it disagrees, the user has changed cache size.
216  */
217 static unsigned long dm_bufio_cache_size_latch;
218
219 static DEFINE_SPINLOCK(param_spinlock);
220
221 /*
222  * Buffers are freed after this timeout
223  */
224 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
225 static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
226
227 static unsigned long dm_bufio_peak_allocated;
228 static unsigned long dm_bufio_allocated_kmem_cache;
229 static unsigned long dm_bufio_allocated_get_free_pages;
230 static unsigned long dm_bufio_allocated_vmalloc;
231 static unsigned long dm_bufio_current_allocated;
232
233 /*----------------------------------------------------------------*/
234
235 /*
236  * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
237  */
238 static unsigned long dm_bufio_cache_size_per_client;
239
240 /*
241  * The current number of clients.
242  */
243 static int dm_bufio_client_count;
244
245 /*
246  * The list of all clients.
247  */
248 static LIST_HEAD(dm_bufio_all_clients);
249
250 /*
251  * This mutex protects dm_bufio_cache_size_latch,
252  * dm_bufio_cache_size_per_client and dm_bufio_client_count
253  */
254 static DEFINE_MUTEX(dm_bufio_clients_lock);
255
256 /*----------------------------------------------------------------
257  * A red/black tree acts as an index for all the buffers.
258  *--------------------------------------------------------------*/
259 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
260 {
261         struct rb_node *n = c->buffer_tree.rb_node;
262         struct dm_buffer *b;
263
264         while (n) {
265                 b = container_of(n, struct dm_buffer, node);
266
267                 if (b->block == block)
268                         return b;
269
270                 n = (b->block < block) ? n->rb_left : n->rb_right;
271         }
272
273         return NULL;
274 }
275
276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
277 {
278         struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
279         struct dm_buffer *found;
280
281         while (*new) {
282                 found = container_of(*new, struct dm_buffer, node);
283
284                 if (found->block == b->block) {
285                         BUG_ON(found != b);
286                         return;
287                 }
288
289                 parent = *new;
290                 new = (found->block < b->block) ?
291                         &((*new)->rb_left) : &((*new)->rb_right);
292         }
293
294         rb_link_node(&b->node, parent, new);
295         rb_insert_color(&b->node, &c->buffer_tree);
296 }
297
298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
299 {
300         rb_erase(&b->node, &c->buffer_tree);
301 }
302
303 /*----------------------------------------------------------------*/
304
305 static void adjust_total_allocated(enum data_mode data_mode, long diff)
306 {
307         static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
308                 &dm_bufio_allocated_kmem_cache,
309                 &dm_bufio_allocated_get_free_pages,
310                 &dm_bufio_allocated_vmalloc,
311         };
312
313         spin_lock(&param_spinlock);
314
315         *class_ptr[data_mode] += diff;
316
317         dm_bufio_current_allocated += diff;
318
319         if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
320                 dm_bufio_peak_allocated = dm_bufio_current_allocated;
321
322         spin_unlock(&param_spinlock);
323 }
324
325 /*
326  * Change the number of clients and recalculate per-client limit.
327  */
328 static void __cache_size_refresh(void)
329 {
330         BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
331         BUG_ON(dm_bufio_client_count < 0);
332
333         dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
334
335         /*
336          * Use default if set to 0 and report the actual cache size used.
337          */
338         if (!dm_bufio_cache_size_latch) {
339                 (void)cmpxchg(&dm_bufio_cache_size, 0,
340                               dm_bufio_default_cache_size);
341                 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
342         }
343
344         dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
345                                          (dm_bufio_client_count ? : 1);
346 }
347
348 /*
349  * Allocating buffer data.
350  *
351  * Small buffers are allocated with kmem_cache, to use space optimally.
352  *
353  * For large buffers, we choose between get_free_pages and vmalloc.
354  * Each has advantages and disadvantages.
355  *
356  * __get_free_pages can randomly fail if the memory is fragmented.
357  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
358  * as low as 128M) so using it for caching is not appropriate.
359  *
360  * If the allocation may fail we use __get_free_pages. Memory fragmentation
361  * won't have a fatal effect here, but it just causes flushes of some other
362  * buffers and more I/O will be performed. Don't use __get_free_pages if it
363  * always fails (i.e. order >= MAX_ORDER).
364  *
365  * If the allocation shouldn't fail we use __vmalloc. This is only for the
366  * initial reserve allocation, so there's no risk of wasting all vmalloc
367  * space.
368  */
369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
370                                enum data_mode *data_mode)
371 {
372         unsigned noio_flag;
373         void *ptr;
374
375         if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
376                 *data_mode = DATA_MODE_SLAB;
377                 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
378         }
379
380         if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
381             gfp_mask & __GFP_NORETRY) {
382                 *data_mode = DATA_MODE_GET_FREE_PAGES;
383                 return (void *)__get_free_pages(gfp_mask,
384                                                 c->pages_per_block_bits);
385         }
386
387         *data_mode = DATA_MODE_VMALLOC;
388
389         /*
390          * __vmalloc allocates the data pages and auxiliary structures with
391          * gfp_flags that were specified, but pagetables are always allocated
392          * with GFP_KERNEL, no matter what was specified as gfp_mask.
393          *
394          * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
395          * all allocations done by this process (including pagetables) are done
396          * as if GFP_NOIO was specified.
397          */
398
399         if (gfp_mask & __GFP_NORETRY)
400                 noio_flag = memalloc_noio_save();
401
402         ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
403
404         if (gfp_mask & __GFP_NORETRY)
405                 memalloc_noio_restore(noio_flag);
406
407         return ptr;
408 }
409
410 /*
411  * Free buffer's data.
412  */
413 static void free_buffer_data(struct dm_bufio_client *c,
414                              void *data, enum data_mode data_mode)
415 {
416         switch (data_mode) {
417         case DATA_MODE_SLAB:
418                 kmem_cache_free(DM_BUFIO_CACHE(c), data);
419                 break;
420
421         case DATA_MODE_GET_FREE_PAGES:
422                 free_pages((unsigned long)data, c->pages_per_block_bits);
423                 break;
424
425         case DATA_MODE_VMALLOC:
426                 vfree(data);
427                 break;
428
429         default:
430                 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
431                        data_mode);
432                 BUG();
433         }
434 }
435
436 /*
437  * Allocate buffer and its data.
438  */
439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
440 {
441         struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
442                                       gfp_mask);
443
444         if (!b)
445                 return NULL;
446
447         b->c = c;
448
449         b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
450         if (!b->data) {
451                 kfree(b);
452                 return NULL;
453         }
454
455         adjust_total_allocated(b->data_mode, (long)c->block_size);
456
457         return b;
458 }
459
460 /*
461  * Free buffer and its data.
462  */
463 static void free_buffer(struct dm_buffer *b)
464 {
465         struct dm_bufio_client *c = b->c;
466
467         adjust_total_allocated(b->data_mode, -(long)c->block_size);
468
469         free_buffer_data(c, b->data, b->data_mode);
470         kfree(b);
471 }
472
473 /*
474  * Link buffer to the hash list and clean or dirty queue.
475  */
476 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
477 {
478         struct dm_bufio_client *c = b->c;
479
480         c->n_buffers[dirty]++;
481         b->block = block;
482         b->list_mode = dirty;
483         list_add(&b->lru_list, &c->lru[dirty]);
484         __insert(b->c, b);
485         b->last_accessed = jiffies;
486 }
487
488 /*
489  * Unlink buffer from the hash list and dirty or clean queue.
490  */
491 static void __unlink_buffer(struct dm_buffer *b)
492 {
493         struct dm_bufio_client *c = b->c;
494
495         BUG_ON(!c->n_buffers[b->list_mode]);
496
497         c->n_buffers[b->list_mode]--;
498         __remove(b->c, b);
499         list_del(&b->lru_list);
500 }
501
502 /*
503  * Place the buffer to the head of dirty or clean LRU queue.
504  */
505 static void __relink_lru(struct dm_buffer *b, int dirty)
506 {
507         struct dm_bufio_client *c = b->c;
508
509         BUG_ON(!c->n_buffers[b->list_mode]);
510
511         c->n_buffers[b->list_mode]--;
512         c->n_buffers[dirty]++;
513         b->list_mode = dirty;
514         list_move(&b->lru_list, &c->lru[dirty]);
515         b->last_accessed = jiffies;
516 }
517
518 /*----------------------------------------------------------------
519  * Submit I/O on the buffer.
520  *
521  * Bio interface is faster but it has some problems:
522  *      the vector list is limited (increasing this limit increases
523  *      memory-consumption per buffer, so it is not viable);
524  *
525  *      the memory must be direct-mapped, not vmalloced;
526  *
527  *      the I/O driver can reject requests spuriously if it thinks that
528  *      the requests are too big for the device or if they cross a
529  *      controller-defined memory boundary.
530  *
531  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
532  * it is not vmalloced, try using the bio interface.
533  *
534  * If the buffer is big, if it is vmalloced or if the underlying device
535  * rejects the bio because it is too large, use dm-io layer to do the I/O.
536  * The dm-io layer splits the I/O into multiple requests, avoiding the above
537  * shortcomings.
538  *--------------------------------------------------------------*/
539
540 /*
541  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
542  * that the request was handled directly with bio interface.
543  */
544 static void dmio_complete(unsigned long error, void *context)
545 {
546         struct dm_buffer *b = context;
547
548         b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
549 }
550
551 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
552                      bio_end_io_t *end_io)
553 {
554         int r;
555         struct dm_io_request io_req = {
556                 .bi_rw = rw,
557                 .notify.fn = dmio_complete,
558                 .notify.context = b,
559                 .client = b->c->dm_io,
560         };
561         struct dm_io_region region = {
562                 .bdev = b->c->bdev,
563                 .sector = block << b->c->sectors_per_block_bits,
564                 .count = b->c->block_size >> SECTOR_SHIFT,
565         };
566
567         if (b->data_mode != DATA_MODE_VMALLOC) {
568                 io_req.mem.type = DM_IO_KMEM;
569                 io_req.mem.ptr.addr = b->data;
570         } else {
571                 io_req.mem.type = DM_IO_VMA;
572                 io_req.mem.ptr.vma = b->data;
573         }
574
575         b->bio.bi_end_io = end_io;
576
577         r = dm_io(&io_req, 1, &region, NULL);
578         if (r)
579                 end_io(&b->bio, r);
580 }
581
582 static void inline_endio(struct bio *bio, int error)
583 {
584         bio_end_io_t *end_fn = bio->bi_private;
585
586         /*
587          * Reset the bio to free any attached resources
588          * (e.g. bio integrity profiles).
589          */
590         bio_reset(bio);
591
592         end_fn(bio, error);
593 }
594
595 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
596                            bio_end_io_t *end_io)
597 {
598         char *ptr;
599         int len;
600
601         bio_init(&b->bio);
602         b->bio.bi_io_vec = b->bio_vec;
603         b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
604         b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
605         b->bio.bi_bdev = b->c->bdev;
606         b->bio.bi_end_io = inline_endio;
607         /*
608          * Use of .bi_private isn't a problem here because
609          * the dm_buffer's inline bio is local to bufio.
610          */
611         b->bio.bi_private = end_io;
612
613         /*
614          * We assume that if len >= PAGE_SIZE ptr is page-aligned.
615          * If len < PAGE_SIZE the buffer doesn't cross page boundary.
616          */
617         ptr = b->data;
618         len = b->c->block_size;
619
620         if (len >= PAGE_SIZE)
621                 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
622         else
623                 BUG_ON((unsigned long)ptr & (len - 1));
624
625         do {
626                 if (!bio_add_page(&b->bio, virt_to_page(ptr),
627                                   len < PAGE_SIZE ? len : PAGE_SIZE,
628                                   virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
629                         BUG_ON(b->c->block_size <= PAGE_SIZE);
630                         use_dmio(b, rw, block, end_io);
631                         return;
632                 }
633
634                 len -= PAGE_SIZE;
635                 ptr += PAGE_SIZE;
636         } while (len > 0);
637
638         submit_bio(rw, &b->bio);
639 }
640
641 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
642                       bio_end_io_t *end_io)
643 {
644         if (rw == WRITE && b->c->write_callback)
645                 b->c->write_callback(b);
646
647         if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
648             b->data_mode != DATA_MODE_VMALLOC)
649                 use_inline_bio(b, rw, block, end_io);
650         else
651                 use_dmio(b, rw, block, end_io);
652 }
653
654 /*----------------------------------------------------------------
655  * Writing dirty buffers
656  *--------------------------------------------------------------*/
657
658 /*
659  * The endio routine for write.
660  *
661  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
662  * it.
663  */
664 static void write_endio(struct bio *bio, int error)
665 {
666         struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
667
668         b->write_error = error;
669         if (unlikely(error)) {
670                 struct dm_bufio_client *c = b->c;
671                 (void)cmpxchg(&c->async_write_error, 0, error);
672         }
673
674         BUG_ON(!test_bit(B_WRITING, &b->state));
675
676         smp_mb__before_atomic();
677         clear_bit(B_WRITING, &b->state);
678         smp_mb__after_atomic();
679
680         wake_up_bit(&b->state, B_WRITING);
681 }
682
683 /*
684  * Initiate a write on a dirty buffer, but don't wait for it.
685  *
686  * - If the buffer is not dirty, exit.
687  * - If there some previous write going on, wait for it to finish (we can't
688  *   have two writes on the same buffer simultaneously).
689  * - Submit our write and don't wait on it. We set B_WRITING indicating
690  *   that there is a write in progress.
691  */
692 static void __write_dirty_buffer(struct dm_buffer *b,
693                                  struct list_head *write_list)
694 {
695         if (!test_bit(B_DIRTY, &b->state))
696                 return;
697
698         clear_bit(B_DIRTY, &b->state);
699         wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
700
701         if (!write_list)
702                 submit_io(b, WRITE, b->block, write_endio);
703         else
704                 list_add_tail(&b->write_list, write_list);
705 }
706
707 static void __flush_write_list(struct list_head *write_list)
708 {
709         struct blk_plug plug;
710         blk_start_plug(&plug);
711         while (!list_empty(write_list)) {
712                 struct dm_buffer *b =
713                         list_entry(write_list->next, struct dm_buffer, write_list);
714                 list_del(&b->write_list);
715                 submit_io(b, WRITE, b->block, write_endio);
716                 dm_bufio_cond_resched();
717         }
718         blk_finish_plug(&plug);
719 }
720
721 /*
722  * Wait until any activity on the buffer finishes.  Possibly write the
723  * buffer if it is dirty.  When this function finishes, there is no I/O
724  * running on the buffer and the buffer is not dirty.
725  */
726 static void __make_buffer_clean(struct dm_buffer *b)
727 {
728         BUG_ON(b->hold_count);
729
730         if (!b->state)  /* fast case */
731                 return;
732
733         wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
734         __write_dirty_buffer(b, NULL);
735         wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
736 }
737
738 /*
739  * Find some buffer that is not held by anybody, clean it, unlink it and
740  * return it.
741  */
742 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
743 {
744         struct dm_buffer *b;
745
746         list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
747                 BUG_ON(test_bit(B_WRITING, &b->state));
748                 BUG_ON(test_bit(B_DIRTY, &b->state));
749
750                 if (!b->hold_count) {
751                         __make_buffer_clean(b);
752                         __unlink_buffer(b);
753                         return b;
754                 }
755                 dm_bufio_cond_resched();
756         }
757
758         list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
759                 BUG_ON(test_bit(B_READING, &b->state));
760
761                 if (!b->hold_count) {
762                         __make_buffer_clean(b);
763                         __unlink_buffer(b);
764                         return b;
765                 }
766                 dm_bufio_cond_resched();
767         }
768
769         return NULL;
770 }
771
772 /*
773  * Wait until some other threads free some buffer or release hold count on
774  * some buffer.
775  *
776  * This function is entered with c->lock held, drops it and regains it
777  * before exiting.
778  */
779 static void __wait_for_free_buffer(struct dm_bufio_client *c)
780 {
781         DECLARE_WAITQUEUE(wait, current);
782
783         add_wait_queue(&c->free_buffer_wait, &wait);
784         set_task_state(current, TASK_UNINTERRUPTIBLE);
785         dm_bufio_unlock(c);
786
787         io_schedule();
788
789         remove_wait_queue(&c->free_buffer_wait, &wait);
790
791         dm_bufio_lock(c);
792 }
793
794 enum new_flag {
795         NF_FRESH = 0,
796         NF_READ = 1,
797         NF_GET = 2,
798         NF_PREFETCH = 3
799 };
800
801 /*
802  * Allocate a new buffer. If the allocation is not possible, wait until
803  * some other thread frees a buffer.
804  *
805  * May drop the lock and regain it.
806  */
807 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
808 {
809         struct dm_buffer *b;
810
811         /*
812          * dm-bufio is resistant to allocation failures (it just keeps
813          * one buffer reserved in cases all the allocations fail).
814          * So set flags to not try too hard:
815          *      GFP_NOIO: don't recurse into the I/O layer
816          *      __GFP_NORETRY: don't retry and rather return failure
817          *      __GFP_NOMEMALLOC: don't use emergency reserves
818          *      __GFP_NOWARN: don't print a warning in case of failure
819          *
820          * For debugging, if we set the cache size to 1, no new buffers will
821          * be allocated.
822          */
823         while (1) {
824                 if (dm_bufio_cache_size_latch != 1) {
825                         b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
826                         if (b)
827                                 return b;
828                 }
829
830                 if (nf == NF_PREFETCH)
831                         return NULL;
832
833                 if (!list_empty(&c->reserved_buffers)) {
834                         b = list_entry(c->reserved_buffers.next,
835                                        struct dm_buffer, lru_list);
836                         list_del(&b->lru_list);
837                         c->need_reserved_buffers++;
838
839                         return b;
840                 }
841
842                 b = __get_unclaimed_buffer(c);
843                 if (b)
844                         return b;
845
846                 __wait_for_free_buffer(c);
847         }
848 }
849
850 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
851 {
852         struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
853
854         if (!b)
855                 return NULL;
856
857         if (c->alloc_callback)
858                 c->alloc_callback(b);
859
860         return b;
861 }
862
863 /*
864  * Free a buffer and wake other threads waiting for free buffers.
865  */
866 static void __free_buffer_wake(struct dm_buffer *b)
867 {
868         struct dm_bufio_client *c = b->c;
869
870         if (!c->need_reserved_buffers)
871                 free_buffer(b);
872         else {
873                 list_add(&b->lru_list, &c->reserved_buffers);
874                 c->need_reserved_buffers--;
875         }
876
877         wake_up(&c->free_buffer_wait);
878 }
879
880 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
881                                         struct list_head *write_list)
882 {
883         struct dm_buffer *b, *tmp;
884
885         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
886                 BUG_ON(test_bit(B_READING, &b->state));
887
888                 if (!test_bit(B_DIRTY, &b->state) &&
889                     !test_bit(B_WRITING, &b->state)) {
890                         __relink_lru(b, LIST_CLEAN);
891                         continue;
892                 }
893
894                 if (no_wait && test_bit(B_WRITING, &b->state))
895                         return;
896
897                 __write_dirty_buffer(b, write_list);
898                 dm_bufio_cond_resched();
899         }
900 }
901
902 /*
903  * Get writeback threshold and buffer limit for a given client.
904  */
905 static void __get_memory_limit(struct dm_bufio_client *c,
906                                unsigned long *threshold_buffers,
907                                unsigned long *limit_buffers)
908 {
909         unsigned long buffers;
910
911         if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
912                 mutex_lock(&dm_bufio_clients_lock);
913                 __cache_size_refresh();
914                 mutex_unlock(&dm_bufio_clients_lock);
915         }
916
917         buffers = dm_bufio_cache_size_per_client >>
918                   (c->sectors_per_block_bits + SECTOR_SHIFT);
919
920         if (buffers < c->minimum_buffers)
921                 buffers = c->minimum_buffers;
922
923         *limit_buffers = buffers;
924         *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
925 }
926
927 /*
928  * Check if we're over watermark.
929  * If we are over threshold_buffers, start freeing buffers.
930  * If we're over "limit_buffers", block until we get under the limit.
931  */
932 static void __check_watermark(struct dm_bufio_client *c,
933                               struct list_head *write_list)
934 {
935         unsigned long threshold_buffers, limit_buffers;
936
937         __get_memory_limit(c, &threshold_buffers, &limit_buffers);
938
939         while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
940                limit_buffers) {
941
942                 struct dm_buffer *b = __get_unclaimed_buffer(c);
943
944                 if (!b)
945                         return;
946
947                 __free_buffer_wake(b);
948                 dm_bufio_cond_resched();
949         }
950
951         if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
952                 __write_dirty_buffers_async(c, 1, write_list);
953 }
954
955 /*----------------------------------------------------------------
956  * Getting a buffer
957  *--------------------------------------------------------------*/
958
959 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
960                                      enum new_flag nf, int *need_submit,
961                                      struct list_head *write_list)
962 {
963         struct dm_buffer *b, *new_b = NULL;
964
965         *need_submit = 0;
966
967         b = __find(c, block);
968         if (b)
969                 goto found_buffer;
970
971         if (nf == NF_GET)
972                 return NULL;
973
974         new_b = __alloc_buffer_wait(c, nf);
975         if (!new_b)
976                 return NULL;
977
978         /*
979          * We've had a period where the mutex was unlocked, so need to
980          * recheck the hash table.
981          */
982         b = __find(c, block);
983         if (b) {
984                 __free_buffer_wake(new_b);
985                 goto found_buffer;
986         }
987
988         __check_watermark(c, write_list);
989
990         b = new_b;
991         b->hold_count = 1;
992         b->read_error = 0;
993         b->write_error = 0;
994         __link_buffer(b, block, LIST_CLEAN);
995
996         if (nf == NF_FRESH) {
997                 b->state = 0;
998                 return b;
999         }
1000
1001         b->state = 1 << B_READING;
1002         *need_submit = 1;
1003
1004         return b;
1005
1006 found_buffer:
1007         if (nf == NF_PREFETCH)
1008                 return NULL;
1009         /*
1010          * Note: it is essential that we don't wait for the buffer to be
1011          * read if dm_bufio_get function is used. Both dm_bufio_get and
1012          * dm_bufio_prefetch can be used in the driver request routine.
1013          * If the user called both dm_bufio_prefetch and dm_bufio_get on
1014          * the same buffer, it would deadlock if we waited.
1015          */
1016         if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1017                 return NULL;
1018
1019         b->hold_count++;
1020         __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1021                      test_bit(B_WRITING, &b->state));
1022         return b;
1023 }
1024
1025 /*
1026  * The endio routine for reading: set the error, clear the bit and wake up
1027  * anyone waiting on the buffer.
1028  */
1029 static void read_endio(struct bio *bio, int error)
1030 {
1031         struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1032
1033         b->read_error = error;
1034
1035         BUG_ON(!test_bit(B_READING, &b->state));
1036
1037         smp_mb__before_atomic();
1038         clear_bit(B_READING, &b->state);
1039         smp_mb__after_atomic();
1040
1041         wake_up_bit(&b->state, B_READING);
1042 }
1043
1044 /*
1045  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1046  * functions is similar except that dm_bufio_new doesn't read the
1047  * buffer from the disk (assuming that the caller overwrites all the data
1048  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1049  */
1050 static void *new_read(struct dm_bufio_client *c, sector_t block,
1051                       enum new_flag nf, struct dm_buffer **bp)
1052 {
1053         int need_submit;
1054         struct dm_buffer *b;
1055
1056         LIST_HEAD(write_list);
1057
1058         dm_bufio_lock(c);
1059         b = __bufio_new(c, block, nf, &need_submit, &write_list);
1060         dm_bufio_unlock(c);
1061
1062         __flush_write_list(&write_list);
1063
1064         if (!b)
1065                 return b;
1066
1067         if (need_submit)
1068                 submit_io(b, READ, b->block, read_endio);
1069
1070         wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1071
1072         if (b->read_error) {
1073                 int error = b->read_error;
1074
1075                 dm_bufio_release(b);
1076
1077                 return ERR_PTR(error);
1078         }
1079
1080         *bp = b;
1081
1082         return b->data;
1083 }
1084
1085 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1086                    struct dm_buffer **bp)
1087 {
1088         return new_read(c, block, NF_GET, bp);
1089 }
1090 EXPORT_SYMBOL_GPL(dm_bufio_get);
1091
1092 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1093                     struct dm_buffer **bp)
1094 {
1095         BUG_ON(dm_bufio_in_request());
1096
1097         return new_read(c, block, NF_READ, bp);
1098 }
1099 EXPORT_SYMBOL_GPL(dm_bufio_read);
1100
1101 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1102                    struct dm_buffer **bp)
1103 {
1104         BUG_ON(dm_bufio_in_request());
1105
1106         return new_read(c, block, NF_FRESH, bp);
1107 }
1108 EXPORT_SYMBOL_GPL(dm_bufio_new);
1109
1110 void dm_bufio_prefetch(struct dm_bufio_client *c,
1111                        sector_t block, unsigned n_blocks)
1112 {
1113         struct blk_plug plug;
1114
1115         LIST_HEAD(write_list);
1116
1117         BUG_ON(dm_bufio_in_request());
1118
1119         blk_start_plug(&plug);
1120         dm_bufio_lock(c);
1121
1122         for (; n_blocks--; block++) {
1123                 int need_submit;
1124                 struct dm_buffer *b;
1125                 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1126                                 &write_list);
1127                 if (unlikely(!list_empty(&write_list))) {
1128                         dm_bufio_unlock(c);
1129                         blk_finish_plug(&plug);
1130                         __flush_write_list(&write_list);
1131                         blk_start_plug(&plug);
1132                         dm_bufio_lock(c);
1133                 }
1134                 if (unlikely(b != NULL)) {
1135                         dm_bufio_unlock(c);
1136
1137                         if (need_submit)
1138                                 submit_io(b, READ, b->block, read_endio);
1139                         dm_bufio_release(b);
1140
1141                         dm_bufio_cond_resched();
1142
1143                         if (!n_blocks)
1144                                 goto flush_plug;
1145                         dm_bufio_lock(c);
1146                 }
1147         }
1148
1149         dm_bufio_unlock(c);
1150
1151 flush_plug:
1152         blk_finish_plug(&plug);
1153 }
1154 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1155
1156 void dm_bufio_release(struct dm_buffer *b)
1157 {
1158         struct dm_bufio_client *c = b->c;
1159
1160         dm_bufio_lock(c);
1161
1162         BUG_ON(!b->hold_count);
1163
1164         b->hold_count--;
1165         if (!b->hold_count) {
1166                 wake_up(&c->free_buffer_wait);
1167
1168                 /*
1169                  * If there were errors on the buffer, and the buffer is not
1170                  * to be written, free the buffer. There is no point in caching
1171                  * invalid buffer.
1172                  */
1173                 if ((b->read_error || b->write_error) &&
1174                     !test_bit(B_READING, &b->state) &&
1175                     !test_bit(B_WRITING, &b->state) &&
1176                     !test_bit(B_DIRTY, &b->state)) {
1177                         __unlink_buffer(b);
1178                         __free_buffer_wake(b);
1179                 }
1180         }
1181
1182         dm_bufio_unlock(c);
1183 }
1184 EXPORT_SYMBOL_GPL(dm_bufio_release);
1185
1186 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1187 {
1188         struct dm_bufio_client *c = b->c;
1189
1190         dm_bufio_lock(c);
1191
1192         BUG_ON(test_bit(B_READING, &b->state));
1193
1194         if (!test_and_set_bit(B_DIRTY, &b->state))
1195                 __relink_lru(b, LIST_DIRTY);
1196
1197         dm_bufio_unlock(c);
1198 }
1199 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1200
1201 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1202 {
1203         LIST_HEAD(write_list);
1204
1205         BUG_ON(dm_bufio_in_request());
1206
1207         dm_bufio_lock(c);
1208         __write_dirty_buffers_async(c, 0, &write_list);
1209         dm_bufio_unlock(c);
1210         __flush_write_list(&write_list);
1211 }
1212 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1213
1214 /*
1215  * For performance, it is essential that the buffers are written asynchronously
1216  * and simultaneously (so that the block layer can merge the writes) and then
1217  * waited upon.
1218  *
1219  * Finally, we flush hardware disk cache.
1220  */
1221 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1222 {
1223         int a, f;
1224         unsigned long buffers_processed = 0;
1225         struct dm_buffer *b, *tmp;
1226
1227         LIST_HEAD(write_list);
1228
1229         dm_bufio_lock(c);
1230         __write_dirty_buffers_async(c, 0, &write_list);
1231         dm_bufio_unlock(c);
1232         __flush_write_list(&write_list);
1233         dm_bufio_lock(c);
1234
1235 again:
1236         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1237                 int dropped_lock = 0;
1238
1239                 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1240                         buffers_processed++;
1241
1242                 BUG_ON(test_bit(B_READING, &b->state));
1243
1244                 if (test_bit(B_WRITING, &b->state)) {
1245                         if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1246                                 dropped_lock = 1;
1247                                 b->hold_count++;
1248                                 dm_bufio_unlock(c);
1249                                 wait_on_bit_io(&b->state, B_WRITING,
1250                                                TASK_UNINTERRUPTIBLE);
1251                                 dm_bufio_lock(c);
1252                                 b->hold_count--;
1253                         } else
1254                                 wait_on_bit_io(&b->state, B_WRITING,
1255                                                TASK_UNINTERRUPTIBLE);
1256                 }
1257
1258                 if (!test_bit(B_DIRTY, &b->state) &&
1259                     !test_bit(B_WRITING, &b->state))
1260                         __relink_lru(b, LIST_CLEAN);
1261
1262                 dm_bufio_cond_resched();
1263
1264                 /*
1265                  * If we dropped the lock, the list is no longer consistent,
1266                  * so we must restart the search.
1267                  *
1268                  * In the most common case, the buffer just processed is
1269                  * relinked to the clean list, so we won't loop scanning the
1270                  * same buffer again and again.
1271                  *
1272                  * This may livelock if there is another thread simultaneously
1273                  * dirtying buffers, so we count the number of buffers walked
1274                  * and if it exceeds the total number of buffers, it means that
1275                  * someone is doing some writes simultaneously with us.  In
1276                  * this case, stop, dropping the lock.
1277                  */
1278                 if (dropped_lock)
1279                         goto again;
1280         }
1281         wake_up(&c->free_buffer_wait);
1282         dm_bufio_unlock(c);
1283
1284         a = xchg(&c->async_write_error, 0);
1285         f = dm_bufio_issue_flush(c);
1286         if (a)
1287                 return a;
1288
1289         return f;
1290 }
1291 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1292
1293 /*
1294  * Use dm-io to send and empty barrier flush the device.
1295  */
1296 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1297 {
1298         struct dm_io_request io_req = {
1299                 .bi_rw = WRITE_FLUSH,
1300                 .mem.type = DM_IO_KMEM,
1301                 .mem.ptr.addr = NULL,
1302                 .client = c->dm_io,
1303         };
1304         struct dm_io_region io_reg = {
1305                 .bdev = c->bdev,
1306                 .sector = 0,
1307                 .count = 0,
1308         };
1309
1310         BUG_ON(dm_bufio_in_request());
1311
1312         return dm_io(&io_req, 1, &io_reg, NULL);
1313 }
1314 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1315
1316 /*
1317  * We first delete any other buffer that may be at that new location.
1318  *
1319  * Then, we write the buffer to the original location if it was dirty.
1320  *
1321  * Then, if we are the only one who is holding the buffer, relink the buffer
1322  * in the hash queue for the new location.
1323  *
1324  * If there was someone else holding the buffer, we write it to the new
1325  * location but not relink it, because that other user needs to have the buffer
1326  * at the same place.
1327  */
1328 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1329 {
1330         struct dm_bufio_client *c = b->c;
1331         struct dm_buffer *new;
1332
1333         BUG_ON(dm_bufio_in_request());
1334
1335         dm_bufio_lock(c);
1336
1337 retry:
1338         new = __find(c, new_block);
1339         if (new) {
1340                 if (new->hold_count) {
1341                         __wait_for_free_buffer(c);
1342                         goto retry;
1343                 }
1344
1345                 /*
1346                  * FIXME: Is there any point waiting for a write that's going
1347                  * to be overwritten in a bit?
1348                  */
1349                 __make_buffer_clean(new);
1350                 __unlink_buffer(new);
1351                 __free_buffer_wake(new);
1352         }
1353
1354         BUG_ON(!b->hold_count);
1355         BUG_ON(test_bit(B_READING, &b->state));
1356
1357         __write_dirty_buffer(b, NULL);
1358         if (b->hold_count == 1) {
1359                 wait_on_bit_io(&b->state, B_WRITING,
1360                                TASK_UNINTERRUPTIBLE);
1361                 set_bit(B_DIRTY, &b->state);
1362                 __unlink_buffer(b);
1363                 __link_buffer(b, new_block, LIST_DIRTY);
1364         } else {
1365                 sector_t old_block;
1366                 wait_on_bit_lock_io(&b->state, B_WRITING,
1367                                     TASK_UNINTERRUPTIBLE);
1368                 /*
1369                  * Relink buffer to "new_block" so that write_callback
1370                  * sees "new_block" as a block number.
1371                  * After the write, link the buffer back to old_block.
1372                  * All this must be done in bufio lock, so that block number
1373                  * change isn't visible to other threads.
1374                  */
1375                 old_block = b->block;
1376                 __unlink_buffer(b);
1377                 __link_buffer(b, new_block, b->list_mode);
1378                 submit_io(b, WRITE, new_block, write_endio);
1379                 wait_on_bit_io(&b->state, B_WRITING,
1380                                TASK_UNINTERRUPTIBLE);
1381                 __unlink_buffer(b);
1382                 __link_buffer(b, old_block, b->list_mode);
1383         }
1384
1385         dm_bufio_unlock(c);
1386         dm_bufio_release(b);
1387 }
1388 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1389
1390 /*
1391  * Free the given buffer.
1392  *
1393  * This is just a hint, if the buffer is in use or dirty, this function
1394  * does nothing.
1395  */
1396 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1397 {
1398         struct dm_buffer *b;
1399
1400         dm_bufio_lock(c);
1401
1402         b = __find(c, block);
1403         if (b && likely(!b->hold_count) && likely(!b->state)) {
1404                 __unlink_buffer(b);
1405                 __free_buffer_wake(b);
1406         }
1407
1408         dm_bufio_unlock(c);
1409 }
1410 EXPORT_SYMBOL(dm_bufio_forget);
1411
1412 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1413 {
1414         c->minimum_buffers = n;
1415 }
1416 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1417
1418 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1419 {
1420         return c->block_size;
1421 }
1422 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1423
1424 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1425 {
1426         return i_size_read(c->bdev->bd_inode) >>
1427                            (SECTOR_SHIFT + c->sectors_per_block_bits);
1428 }
1429 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1430
1431 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1432 {
1433         return b->block;
1434 }
1435 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1436
1437 void *dm_bufio_get_block_data(struct dm_buffer *b)
1438 {
1439         return b->data;
1440 }
1441 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1442
1443 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1444 {
1445         return b + 1;
1446 }
1447 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1448
1449 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1450 {
1451         return b->c;
1452 }
1453 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1454
1455 static void drop_buffers(struct dm_bufio_client *c)
1456 {
1457         struct dm_buffer *b;
1458         int i;
1459
1460         BUG_ON(dm_bufio_in_request());
1461
1462         /*
1463          * An optimization so that the buffers are not written one-by-one.
1464          */
1465         dm_bufio_write_dirty_buffers_async(c);
1466
1467         dm_bufio_lock(c);
1468
1469         while ((b = __get_unclaimed_buffer(c)))
1470                 __free_buffer_wake(b);
1471
1472         for (i = 0; i < LIST_SIZE; i++)
1473                 list_for_each_entry(b, &c->lru[i], lru_list)
1474                         DMERR("leaked buffer %llx, hold count %u, list %d",
1475                               (unsigned long long)b->block, b->hold_count, i);
1476
1477         for (i = 0; i < LIST_SIZE; i++)
1478                 BUG_ON(!list_empty(&c->lru[i]));
1479
1480         dm_bufio_unlock(c);
1481 }
1482
1483 /*
1484  * We may not be able to evict this buffer if IO pending or the client
1485  * is still using it.  Caller is expected to know buffer is too old.
1486  *
1487  * And if GFP_NOFS is used, we must not do any I/O because we hold
1488  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1489  * rerouted to different bufio client.
1490  */
1491 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1492 {
1493         if (!(gfp & __GFP_FS)) {
1494                 if (test_bit(B_READING, &b->state) ||
1495                     test_bit(B_WRITING, &b->state) ||
1496                     test_bit(B_DIRTY, &b->state))
1497                         return false;
1498         }
1499
1500         if (b->hold_count)
1501                 return false;
1502
1503         __make_buffer_clean(b);
1504         __unlink_buffer(b);
1505         __free_buffer_wake(b);
1506
1507         return true;
1508 }
1509
1510 static unsigned get_retain_buffers(struct dm_bufio_client *c)
1511 {
1512         unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1513         return retain_bytes / c->block_size;
1514 }
1515
1516 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1517                             gfp_t gfp_mask)
1518 {
1519         int l;
1520         struct dm_buffer *b, *tmp;
1521         unsigned long freed = 0;
1522         unsigned long count = nr_to_scan;
1523         unsigned retain_target = get_retain_buffers(c);
1524
1525         for (l = 0; l < LIST_SIZE; l++) {
1526                 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1527                         if (__try_evict_buffer(b, gfp_mask))
1528                                 freed++;
1529                         if (!--nr_to_scan || ((count - freed) <= retain_target))
1530                                 return freed;
1531                         dm_bufio_cond_resched();
1532                 }
1533         }
1534         return freed;
1535 }
1536
1537 static unsigned long
1538 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1539 {
1540         struct dm_bufio_client *c;
1541         unsigned long freed;
1542
1543         c = container_of(shrink, struct dm_bufio_client, shrinker);
1544         if (sc->gfp_mask & __GFP_FS)
1545                 dm_bufio_lock(c);
1546         else if (!dm_bufio_trylock(c))
1547                 return SHRINK_STOP;
1548
1549         freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1550         dm_bufio_unlock(c);
1551         return freed;
1552 }
1553
1554 static unsigned long
1555 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1556 {
1557         struct dm_bufio_client *c;
1558         unsigned long count;
1559
1560         c = container_of(shrink, struct dm_bufio_client, shrinker);
1561         if (sc->gfp_mask & __GFP_FS)
1562                 dm_bufio_lock(c);
1563         else if (!dm_bufio_trylock(c))
1564                 return 0;
1565
1566         count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1567         dm_bufio_unlock(c);
1568         return count;
1569 }
1570
1571 /*
1572  * Create the buffering interface
1573  */
1574 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1575                                                unsigned reserved_buffers, unsigned aux_size,
1576                                                void (*alloc_callback)(struct dm_buffer *),
1577                                                void (*write_callback)(struct dm_buffer *))
1578 {
1579         int r;
1580         struct dm_bufio_client *c;
1581         unsigned i;
1582
1583         BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1584                (block_size & (block_size - 1)));
1585
1586         c = kzalloc(sizeof(*c), GFP_KERNEL);
1587         if (!c) {
1588                 r = -ENOMEM;
1589                 goto bad_client;
1590         }
1591         c->buffer_tree = RB_ROOT;
1592
1593         c->bdev = bdev;
1594         c->block_size = block_size;
1595         c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1596         c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1597                                   ffs(block_size) - 1 - PAGE_SHIFT : 0;
1598         c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1599                                   PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1600
1601         c->aux_size = aux_size;
1602         c->alloc_callback = alloc_callback;
1603         c->write_callback = write_callback;
1604
1605         for (i = 0; i < LIST_SIZE; i++) {
1606                 INIT_LIST_HEAD(&c->lru[i]);
1607                 c->n_buffers[i] = 0;
1608         }
1609
1610         mutex_init(&c->lock);
1611         INIT_LIST_HEAD(&c->reserved_buffers);
1612         c->need_reserved_buffers = reserved_buffers;
1613
1614         c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1615
1616         init_waitqueue_head(&c->free_buffer_wait);
1617         c->async_write_error = 0;
1618
1619         c->dm_io = dm_io_client_create();
1620         if (IS_ERR(c->dm_io)) {
1621                 r = PTR_ERR(c->dm_io);
1622                 goto bad_dm_io;
1623         }
1624
1625         mutex_lock(&dm_bufio_clients_lock);
1626         if (c->blocks_per_page_bits) {
1627                 if (!DM_BUFIO_CACHE_NAME(c)) {
1628                         DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1629                         if (!DM_BUFIO_CACHE_NAME(c)) {
1630                                 r = -ENOMEM;
1631                                 mutex_unlock(&dm_bufio_clients_lock);
1632                                 goto bad_cache;
1633                         }
1634                 }
1635
1636                 if (!DM_BUFIO_CACHE(c)) {
1637                         DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1638                                                               c->block_size,
1639                                                               c->block_size, 0, NULL);
1640                         if (!DM_BUFIO_CACHE(c)) {
1641                                 r = -ENOMEM;
1642                                 mutex_unlock(&dm_bufio_clients_lock);
1643                                 goto bad_cache;
1644                         }
1645                 }
1646         }
1647         mutex_unlock(&dm_bufio_clients_lock);
1648
1649         while (c->need_reserved_buffers) {
1650                 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1651
1652                 if (!b) {
1653                         r = -ENOMEM;
1654                         goto bad_buffer;
1655                 }
1656                 __free_buffer_wake(b);
1657         }
1658
1659         mutex_lock(&dm_bufio_clients_lock);
1660         dm_bufio_client_count++;
1661         list_add(&c->client_list, &dm_bufio_all_clients);
1662         __cache_size_refresh();
1663         mutex_unlock(&dm_bufio_clients_lock);
1664
1665         c->shrinker.count_objects = dm_bufio_shrink_count;
1666         c->shrinker.scan_objects = dm_bufio_shrink_scan;
1667         c->shrinker.seeks = 1;
1668         c->shrinker.batch = 0;
1669         register_shrinker(&c->shrinker);
1670
1671         return c;
1672
1673 bad_buffer:
1674 bad_cache:
1675         while (!list_empty(&c->reserved_buffers)) {
1676                 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1677                                                  struct dm_buffer, lru_list);
1678                 list_del(&b->lru_list);
1679                 free_buffer(b);
1680         }
1681         dm_io_client_destroy(c->dm_io);
1682 bad_dm_io:
1683         kfree(c);
1684 bad_client:
1685         return ERR_PTR(r);
1686 }
1687 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1688
1689 /*
1690  * Free the buffering interface.
1691  * It is required that there are no references on any buffers.
1692  */
1693 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1694 {
1695         unsigned i;
1696
1697         drop_buffers(c);
1698
1699         unregister_shrinker(&c->shrinker);
1700
1701         mutex_lock(&dm_bufio_clients_lock);
1702
1703         list_del(&c->client_list);
1704         dm_bufio_client_count--;
1705         __cache_size_refresh();
1706
1707         mutex_unlock(&dm_bufio_clients_lock);
1708
1709         BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1710         BUG_ON(c->need_reserved_buffers);
1711
1712         while (!list_empty(&c->reserved_buffers)) {
1713                 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1714                                                  struct dm_buffer, lru_list);
1715                 list_del(&b->lru_list);
1716                 free_buffer(b);
1717         }
1718
1719         for (i = 0; i < LIST_SIZE; i++)
1720                 if (c->n_buffers[i])
1721                         DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1722
1723         for (i = 0; i < LIST_SIZE; i++)
1724                 BUG_ON(c->n_buffers[i]);
1725
1726         dm_io_client_destroy(c->dm_io);
1727         kfree(c);
1728 }
1729 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1730
1731 static unsigned get_max_age_hz(void)
1732 {
1733         unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1734
1735         if (max_age > UINT_MAX / HZ)
1736                 max_age = UINT_MAX / HZ;
1737
1738         return max_age * HZ;
1739 }
1740
1741 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1742 {
1743         return time_after_eq(jiffies, b->last_accessed + age_hz);
1744 }
1745
1746 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1747 {
1748         struct dm_buffer *b, *tmp;
1749         unsigned retain_target = get_retain_buffers(c);
1750         unsigned count;
1751
1752         dm_bufio_lock(c);
1753
1754         count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1755         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1756                 if (count <= retain_target)
1757                         break;
1758
1759                 if (!older_than(b, age_hz))
1760                         break;
1761
1762                 if (__try_evict_buffer(b, 0))
1763                         count--;
1764
1765                 dm_bufio_cond_resched();
1766         }
1767
1768         dm_bufio_unlock(c);
1769 }
1770
1771 static void cleanup_old_buffers(void)
1772 {
1773         unsigned long max_age_hz = get_max_age_hz();
1774         struct dm_bufio_client *c;
1775
1776         mutex_lock(&dm_bufio_clients_lock);
1777
1778         list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1779                 __evict_old_buffers(c, max_age_hz);
1780
1781         mutex_unlock(&dm_bufio_clients_lock);
1782 }
1783
1784 static struct workqueue_struct *dm_bufio_wq;
1785 static struct delayed_work dm_bufio_work;
1786
1787 static void work_fn(struct work_struct *w)
1788 {
1789         cleanup_old_buffers();
1790
1791         queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1792                            DM_BUFIO_WORK_TIMER_SECS * HZ);
1793 }
1794
1795 /*----------------------------------------------------------------
1796  * Module setup
1797  *--------------------------------------------------------------*/
1798
1799 /*
1800  * This is called only once for the whole dm_bufio module.
1801  * It initializes memory limit.
1802  */
1803 static int __init dm_bufio_init(void)
1804 {
1805         __u64 mem;
1806
1807         dm_bufio_allocated_kmem_cache = 0;
1808         dm_bufio_allocated_get_free_pages = 0;
1809         dm_bufio_allocated_vmalloc = 0;
1810         dm_bufio_current_allocated = 0;
1811
1812         memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1813         memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1814
1815         mem = (__u64)((totalram_pages - totalhigh_pages) *
1816                       DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1817
1818         if (mem > ULONG_MAX)
1819                 mem = ULONG_MAX;
1820
1821 #ifdef CONFIG_MMU
1822         /*
1823          * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1824          * in fs/proc/internal.h
1825          */
1826         if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1827                 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1828 #endif
1829
1830         dm_bufio_default_cache_size = mem;
1831
1832         mutex_lock(&dm_bufio_clients_lock);
1833         __cache_size_refresh();
1834         mutex_unlock(&dm_bufio_clients_lock);
1835
1836         dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1837         if (!dm_bufio_wq)
1838                 return -ENOMEM;
1839
1840         INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1841         queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1842                            DM_BUFIO_WORK_TIMER_SECS * HZ);
1843
1844         return 0;
1845 }
1846
1847 /*
1848  * This is called once when unloading the dm_bufio module.
1849  */
1850 static void __exit dm_bufio_exit(void)
1851 {
1852         int bug = 0;
1853         int i;
1854
1855         cancel_delayed_work_sync(&dm_bufio_work);
1856         destroy_workqueue(dm_bufio_wq);
1857
1858         for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1859                 struct kmem_cache *kc = dm_bufio_caches[i];
1860
1861                 if (kc)
1862                         kmem_cache_destroy(kc);
1863         }
1864
1865         for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1866                 kfree(dm_bufio_cache_names[i]);
1867
1868         if (dm_bufio_client_count) {
1869                 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1870                         __func__, dm_bufio_client_count);
1871                 bug = 1;
1872         }
1873
1874         if (dm_bufio_current_allocated) {
1875                 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1876                         __func__, dm_bufio_current_allocated);
1877                 bug = 1;
1878         }
1879
1880         if (dm_bufio_allocated_get_free_pages) {
1881                 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1882                        __func__, dm_bufio_allocated_get_free_pages);
1883                 bug = 1;
1884         }
1885
1886         if (dm_bufio_allocated_vmalloc) {
1887                 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1888                        __func__, dm_bufio_allocated_vmalloc);
1889                 bug = 1;
1890         }
1891
1892         if (bug)
1893                 BUG();
1894 }
1895
1896 module_init(dm_bufio_init)
1897 module_exit(dm_bufio_exit)
1898
1899 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1900 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1901
1902 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1903 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1904
1905 module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
1906 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1907
1908 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1909 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1910
1911 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1912 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1913
1914 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1915 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1916
1917 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1918 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1919
1920 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1921 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1922
1923 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1924 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1925 MODULE_LICENSE("GPL");