Merge tag 'mvebu-soc-3.16' of git://git.infradead.org/linux-mvebu into next/soc
[pandora-kernel.git] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/rculist.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/rbtree.h>
20
21 #define DM_MSG_PREFIX   "thin"
22
23 /*
24  * Tunable constants
25  */
26 #define ENDIO_HOOK_POOL_SIZE 1024
27 #define MAPPING_POOL_SIZE 1024
28 #define PRISON_CELLS 1024
29 #define COMMIT_PERIOD HZ
30
31 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
32                 "A percentage of time allocated for copy on write");
33
34 /*
35  * The block size of the device holding pool data must be
36  * between 64KB and 1GB.
37  */
38 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
39 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
40
41 /*
42  * Device id is restricted to 24 bits.
43  */
44 #define MAX_DEV_ID ((1 << 24) - 1)
45
46 /*
47  * How do we handle breaking sharing of data blocks?
48  * =================================================
49  *
50  * We use a standard copy-on-write btree to store the mappings for the
51  * devices (note I'm talking about copy-on-write of the metadata here, not
52  * the data).  When you take an internal snapshot you clone the root node
53  * of the origin btree.  After this there is no concept of an origin or a
54  * snapshot.  They are just two device trees that happen to point to the
55  * same data blocks.
56  *
57  * When we get a write in we decide if it's to a shared data block using
58  * some timestamp magic.  If it is, we have to break sharing.
59  *
60  * Let's say we write to a shared block in what was the origin.  The
61  * steps are:
62  *
63  * i) plug io further to this physical block. (see bio_prison code).
64  *
65  * ii) quiesce any read io to that shared data block.  Obviously
66  * including all devices that share this block.  (see dm_deferred_set code)
67  *
68  * iii) copy the data block to a newly allocate block.  This step can be
69  * missed out if the io covers the block. (schedule_copy).
70  *
71  * iv) insert the new mapping into the origin's btree
72  * (process_prepared_mapping).  This act of inserting breaks some
73  * sharing of btree nodes between the two devices.  Breaking sharing only
74  * effects the btree of that specific device.  Btrees for the other
75  * devices that share the block never change.  The btree for the origin
76  * device as it was after the last commit is untouched, ie. we're using
77  * persistent data structures in the functional programming sense.
78  *
79  * v) unplug io to this physical block, including the io that triggered
80  * the breaking of sharing.
81  *
82  * Steps (ii) and (iii) occur in parallel.
83  *
84  * The metadata _doesn't_ need to be committed before the io continues.  We
85  * get away with this because the io is always written to a _new_ block.
86  * If there's a crash, then:
87  *
88  * - The origin mapping will point to the old origin block (the shared
89  * one).  This will contain the data as it was before the io that triggered
90  * the breaking of sharing came in.
91  *
92  * - The snap mapping still points to the old block.  As it would after
93  * the commit.
94  *
95  * The downside of this scheme is the timestamp magic isn't perfect, and
96  * will continue to think that data block in the snapshot device is shared
97  * even after the write to the origin has broken sharing.  I suspect data
98  * blocks will typically be shared by many different devices, so we're
99  * breaking sharing n + 1 times, rather than n, where n is the number of
100  * devices that reference this data block.  At the moment I think the
101  * benefits far, far outweigh the disadvantages.
102  */
103
104 /*----------------------------------------------------------------*/
105
106 /*
107  * Key building.
108  */
109 static void build_data_key(struct dm_thin_device *td,
110                            dm_block_t b, struct dm_cell_key *key)
111 {
112         key->virtual = 0;
113         key->dev = dm_thin_dev_id(td);
114         key->block = b;
115 }
116
117 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
118                               struct dm_cell_key *key)
119 {
120         key->virtual = 1;
121         key->dev = dm_thin_dev_id(td);
122         key->block = b;
123 }
124
125 /*----------------------------------------------------------------*/
126
127 /*
128  * A pool device ties together a metadata device and a data device.  It
129  * also provides the interface for creating and destroying internal
130  * devices.
131  */
132 struct dm_thin_new_mapping;
133
134 /*
135  * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
136  */
137 enum pool_mode {
138         PM_WRITE,               /* metadata may be changed */
139         PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
140         PM_READ_ONLY,           /* metadata may not be changed */
141         PM_FAIL,                /* all I/O fails */
142 };
143
144 struct pool_features {
145         enum pool_mode mode;
146
147         bool zero_new_blocks:1;
148         bool discard_enabled:1;
149         bool discard_passdown:1;
150         bool error_if_no_space:1;
151 };
152
153 struct thin_c;
154 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
155 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
156
157 struct pool {
158         struct list_head list;
159         struct dm_target *ti;   /* Only set if a pool target is bound */
160
161         struct mapped_device *pool_md;
162         struct block_device *md_dev;
163         struct dm_pool_metadata *pmd;
164
165         dm_block_t low_water_blocks;
166         uint32_t sectors_per_block;
167         int sectors_per_block_shift;
168
169         struct pool_features pf;
170         bool low_water_triggered:1;     /* A dm event has been sent */
171
172         struct dm_bio_prison *prison;
173         struct dm_kcopyd_client *copier;
174
175         struct workqueue_struct *wq;
176         struct work_struct worker;
177         struct delayed_work waker;
178
179         unsigned long last_commit_jiffies;
180         unsigned ref_count;
181
182         spinlock_t lock;
183         struct bio_list deferred_flush_bios;
184         struct list_head prepared_mappings;
185         struct list_head prepared_discards;
186         struct list_head active_thins;
187
188         struct dm_deferred_set *shared_read_ds;
189         struct dm_deferred_set *all_io_ds;
190
191         struct dm_thin_new_mapping *next_mapping;
192         mempool_t *mapping_pool;
193
194         process_bio_fn process_bio;
195         process_bio_fn process_discard;
196
197         process_mapping_fn process_prepared_mapping;
198         process_mapping_fn process_prepared_discard;
199 };
200
201 static enum pool_mode get_pool_mode(struct pool *pool);
202 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
203
204 /*
205  * Target context for a pool.
206  */
207 struct pool_c {
208         struct dm_target *ti;
209         struct pool *pool;
210         struct dm_dev *data_dev;
211         struct dm_dev *metadata_dev;
212         struct dm_target_callbacks callbacks;
213
214         dm_block_t low_water_blocks;
215         struct pool_features requested_pf; /* Features requested during table load */
216         struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
217 };
218
219 /*
220  * Target context for a thin.
221  */
222 struct thin_c {
223         struct list_head list;
224         struct dm_dev *pool_dev;
225         struct dm_dev *origin_dev;
226         dm_thin_id dev_id;
227
228         struct pool *pool;
229         struct dm_thin_device *td;
230         bool requeue_mode:1;
231         spinlock_t lock;
232         struct bio_list deferred_bio_list;
233         struct bio_list retry_on_resume_list;
234         struct rb_root sort_bio_list; /* sorted list of deferred bios */
235
236         /*
237          * Ensures the thin is not destroyed until the worker has finished
238          * iterating the active_thins list.
239          */
240         atomic_t refcount;
241         struct completion can_destroy;
242 };
243
244 /*----------------------------------------------------------------*/
245
246 /*
247  * wake_worker() is used when new work is queued and when pool_resume is
248  * ready to continue deferred IO processing.
249  */
250 static void wake_worker(struct pool *pool)
251 {
252         queue_work(pool->wq, &pool->worker);
253 }
254
255 /*----------------------------------------------------------------*/
256
257 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
258                       struct dm_bio_prison_cell **cell_result)
259 {
260         int r;
261         struct dm_bio_prison_cell *cell_prealloc;
262
263         /*
264          * Allocate a cell from the prison's mempool.
265          * This might block but it can't fail.
266          */
267         cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
268
269         r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
270         if (r)
271                 /*
272                  * We reused an old cell; we can get rid of
273                  * the new one.
274                  */
275                 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
276
277         return r;
278 }
279
280 static void cell_release(struct pool *pool,
281                          struct dm_bio_prison_cell *cell,
282                          struct bio_list *bios)
283 {
284         dm_cell_release(pool->prison, cell, bios);
285         dm_bio_prison_free_cell(pool->prison, cell);
286 }
287
288 static void cell_release_no_holder(struct pool *pool,
289                                    struct dm_bio_prison_cell *cell,
290                                    struct bio_list *bios)
291 {
292         dm_cell_release_no_holder(pool->prison, cell, bios);
293         dm_bio_prison_free_cell(pool->prison, cell);
294 }
295
296 static void cell_defer_no_holder_no_free(struct thin_c *tc,
297                                          struct dm_bio_prison_cell *cell)
298 {
299         struct pool *pool = tc->pool;
300         unsigned long flags;
301
302         spin_lock_irqsave(&tc->lock, flags);
303         dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
304         spin_unlock_irqrestore(&tc->lock, flags);
305
306         wake_worker(pool);
307 }
308
309 static void cell_error(struct pool *pool,
310                        struct dm_bio_prison_cell *cell)
311 {
312         dm_cell_error(pool->prison, cell);
313         dm_bio_prison_free_cell(pool->prison, cell);
314 }
315
316 /*----------------------------------------------------------------*/
317
318 /*
319  * A global list of pools that uses a struct mapped_device as a key.
320  */
321 static struct dm_thin_pool_table {
322         struct mutex mutex;
323         struct list_head pools;
324 } dm_thin_pool_table;
325
326 static void pool_table_init(void)
327 {
328         mutex_init(&dm_thin_pool_table.mutex);
329         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
330 }
331
332 static void __pool_table_insert(struct pool *pool)
333 {
334         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
335         list_add(&pool->list, &dm_thin_pool_table.pools);
336 }
337
338 static void __pool_table_remove(struct pool *pool)
339 {
340         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
341         list_del(&pool->list);
342 }
343
344 static struct pool *__pool_table_lookup(struct mapped_device *md)
345 {
346         struct pool *pool = NULL, *tmp;
347
348         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
349
350         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
351                 if (tmp->pool_md == md) {
352                         pool = tmp;
353                         break;
354                 }
355         }
356
357         return pool;
358 }
359
360 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
361 {
362         struct pool *pool = NULL, *tmp;
363
364         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
365
366         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
367                 if (tmp->md_dev == md_dev) {
368                         pool = tmp;
369                         break;
370                 }
371         }
372
373         return pool;
374 }
375
376 /*----------------------------------------------------------------*/
377
378 struct dm_thin_endio_hook {
379         struct thin_c *tc;
380         struct dm_deferred_entry *shared_read_entry;
381         struct dm_deferred_entry *all_io_entry;
382         struct dm_thin_new_mapping *overwrite_mapping;
383         struct rb_node rb_node;
384 };
385
386 static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
387 {
388         struct bio *bio;
389         struct bio_list bios;
390         unsigned long flags;
391
392         bio_list_init(&bios);
393
394         spin_lock_irqsave(&tc->lock, flags);
395         bio_list_merge(&bios, master);
396         bio_list_init(master);
397         spin_unlock_irqrestore(&tc->lock, flags);
398
399         while ((bio = bio_list_pop(&bios)))
400                 bio_endio(bio, DM_ENDIO_REQUEUE);
401 }
402
403 static void requeue_io(struct thin_c *tc)
404 {
405         requeue_bio_list(tc, &tc->deferred_bio_list);
406         requeue_bio_list(tc, &tc->retry_on_resume_list);
407 }
408
409 static void error_thin_retry_list(struct thin_c *tc)
410 {
411         struct bio *bio;
412         unsigned long flags;
413         struct bio_list bios;
414
415         bio_list_init(&bios);
416
417         spin_lock_irqsave(&tc->lock, flags);
418         bio_list_merge(&bios, &tc->retry_on_resume_list);
419         bio_list_init(&tc->retry_on_resume_list);
420         spin_unlock_irqrestore(&tc->lock, flags);
421
422         while ((bio = bio_list_pop(&bios)))
423                 bio_io_error(bio);
424 }
425
426 static void error_retry_list(struct pool *pool)
427 {
428         struct thin_c *tc;
429
430         rcu_read_lock();
431         list_for_each_entry_rcu(tc, &pool->active_thins, list)
432                 error_thin_retry_list(tc);
433         rcu_read_unlock();
434 }
435
436 /*
437  * This section of code contains the logic for processing a thin device's IO.
438  * Much of the code depends on pool object resources (lists, workqueues, etc)
439  * but most is exclusively called from the thin target rather than the thin-pool
440  * target.
441  */
442
443 static bool block_size_is_power_of_two(struct pool *pool)
444 {
445         return pool->sectors_per_block_shift >= 0;
446 }
447
448 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
449 {
450         struct pool *pool = tc->pool;
451         sector_t block_nr = bio->bi_iter.bi_sector;
452
453         if (block_size_is_power_of_two(pool))
454                 block_nr >>= pool->sectors_per_block_shift;
455         else
456                 (void) sector_div(block_nr, pool->sectors_per_block);
457
458         return block_nr;
459 }
460
461 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
462 {
463         struct pool *pool = tc->pool;
464         sector_t bi_sector = bio->bi_iter.bi_sector;
465
466         bio->bi_bdev = tc->pool_dev->bdev;
467         if (block_size_is_power_of_two(pool))
468                 bio->bi_iter.bi_sector =
469                         (block << pool->sectors_per_block_shift) |
470                         (bi_sector & (pool->sectors_per_block - 1));
471         else
472                 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
473                                  sector_div(bi_sector, pool->sectors_per_block);
474 }
475
476 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
477 {
478         bio->bi_bdev = tc->origin_dev->bdev;
479 }
480
481 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
482 {
483         return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
484                 dm_thin_changed_this_transaction(tc->td);
485 }
486
487 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
488 {
489         struct dm_thin_endio_hook *h;
490
491         if (bio->bi_rw & REQ_DISCARD)
492                 return;
493
494         h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
495         h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
496 }
497
498 static void issue(struct thin_c *tc, struct bio *bio)
499 {
500         struct pool *pool = tc->pool;
501         unsigned long flags;
502
503         if (!bio_triggers_commit(tc, bio)) {
504                 generic_make_request(bio);
505                 return;
506         }
507
508         /*
509          * Complete bio with an error if earlier I/O caused changes to
510          * the metadata that can't be committed e.g, due to I/O errors
511          * on the metadata device.
512          */
513         if (dm_thin_aborted_changes(tc->td)) {
514                 bio_io_error(bio);
515                 return;
516         }
517
518         /*
519          * Batch together any bios that trigger commits and then issue a
520          * single commit for them in process_deferred_bios().
521          */
522         spin_lock_irqsave(&pool->lock, flags);
523         bio_list_add(&pool->deferred_flush_bios, bio);
524         spin_unlock_irqrestore(&pool->lock, flags);
525 }
526
527 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
528 {
529         remap_to_origin(tc, bio);
530         issue(tc, bio);
531 }
532
533 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
534                             dm_block_t block)
535 {
536         remap(tc, bio, block);
537         issue(tc, bio);
538 }
539
540 /*----------------------------------------------------------------*/
541
542 /*
543  * Bio endio functions.
544  */
545 struct dm_thin_new_mapping {
546         struct list_head list;
547
548         bool quiesced:1;
549         bool prepared:1;
550         bool pass_discard:1;
551         bool definitely_not_shared:1;
552
553         int err;
554         struct thin_c *tc;
555         dm_block_t virt_block;
556         dm_block_t data_block;
557         struct dm_bio_prison_cell *cell, *cell2;
558
559         /*
560          * If the bio covers the whole area of a block then we can avoid
561          * zeroing or copying.  Instead this bio is hooked.  The bio will
562          * still be in the cell, so care has to be taken to avoid issuing
563          * the bio twice.
564          */
565         struct bio *bio;
566         bio_end_io_t *saved_bi_end_io;
567 };
568
569 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
570 {
571         struct pool *pool = m->tc->pool;
572
573         if (m->quiesced && m->prepared) {
574                 list_add_tail(&m->list, &pool->prepared_mappings);
575                 wake_worker(pool);
576         }
577 }
578
579 static void copy_complete(int read_err, unsigned long write_err, void *context)
580 {
581         unsigned long flags;
582         struct dm_thin_new_mapping *m = context;
583         struct pool *pool = m->tc->pool;
584
585         m->err = read_err || write_err ? -EIO : 0;
586
587         spin_lock_irqsave(&pool->lock, flags);
588         m->prepared = true;
589         __maybe_add_mapping(m);
590         spin_unlock_irqrestore(&pool->lock, flags);
591 }
592
593 static void overwrite_endio(struct bio *bio, int err)
594 {
595         unsigned long flags;
596         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
597         struct dm_thin_new_mapping *m = h->overwrite_mapping;
598         struct pool *pool = m->tc->pool;
599
600         m->err = err;
601
602         spin_lock_irqsave(&pool->lock, flags);
603         m->prepared = true;
604         __maybe_add_mapping(m);
605         spin_unlock_irqrestore(&pool->lock, flags);
606 }
607
608 /*----------------------------------------------------------------*/
609
610 /*
611  * Workqueue.
612  */
613
614 /*
615  * Prepared mapping jobs.
616  */
617
618 /*
619  * This sends the bios in the cell back to the deferred_bios list.
620  */
621 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
622 {
623         struct pool *pool = tc->pool;
624         unsigned long flags;
625
626         spin_lock_irqsave(&tc->lock, flags);
627         cell_release(pool, cell, &tc->deferred_bio_list);
628         spin_unlock_irqrestore(&tc->lock, flags);
629
630         wake_worker(pool);
631 }
632
633 /*
634  * Same as cell_defer above, except it omits the original holder of the cell.
635  */
636 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
637 {
638         struct pool *pool = tc->pool;
639         unsigned long flags;
640
641         spin_lock_irqsave(&tc->lock, flags);
642         cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
643         spin_unlock_irqrestore(&tc->lock, flags);
644
645         wake_worker(pool);
646 }
647
648 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
649 {
650         if (m->bio) {
651                 m->bio->bi_end_io = m->saved_bi_end_io;
652                 atomic_inc(&m->bio->bi_remaining);
653         }
654         cell_error(m->tc->pool, m->cell);
655         list_del(&m->list);
656         mempool_free(m, m->tc->pool->mapping_pool);
657 }
658
659 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
660 {
661         struct thin_c *tc = m->tc;
662         struct pool *pool = tc->pool;
663         struct bio *bio;
664         int r;
665
666         bio = m->bio;
667         if (bio) {
668                 bio->bi_end_io = m->saved_bi_end_io;
669                 atomic_inc(&bio->bi_remaining);
670         }
671
672         if (m->err) {
673                 cell_error(pool, m->cell);
674                 goto out;
675         }
676
677         /*
678          * Commit the prepared block into the mapping btree.
679          * Any I/O for this block arriving after this point will get
680          * remapped to it directly.
681          */
682         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
683         if (r) {
684                 metadata_operation_failed(pool, "dm_thin_insert_block", r);
685                 cell_error(pool, m->cell);
686                 goto out;
687         }
688
689         /*
690          * Release any bios held while the block was being provisioned.
691          * If we are processing a write bio that completely covers the block,
692          * we already processed it so can ignore it now when processing
693          * the bios in the cell.
694          */
695         if (bio) {
696                 cell_defer_no_holder(tc, m->cell);
697                 bio_endio(bio, 0);
698         } else
699                 cell_defer(tc, m->cell);
700
701 out:
702         list_del(&m->list);
703         mempool_free(m, pool->mapping_pool);
704 }
705
706 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
707 {
708         struct thin_c *tc = m->tc;
709
710         bio_io_error(m->bio);
711         cell_defer_no_holder(tc, m->cell);
712         cell_defer_no_holder(tc, m->cell2);
713         mempool_free(m, tc->pool->mapping_pool);
714 }
715
716 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
717 {
718         struct thin_c *tc = m->tc;
719
720         inc_all_io_entry(tc->pool, m->bio);
721         cell_defer_no_holder(tc, m->cell);
722         cell_defer_no_holder(tc, m->cell2);
723
724         if (m->pass_discard)
725                 if (m->definitely_not_shared)
726                         remap_and_issue(tc, m->bio, m->data_block);
727                 else {
728                         bool used = false;
729                         if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
730                                 bio_endio(m->bio, 0);
731                         else
732                                 remap_and_issue(tc, m->bio, m->data_block);
733                 }
734         else
735                 bio_endio(m->bio, 0);
736
737         mempool_free(m, tc->pool->mapping_pool);
738 }
739
740 static void process_prepared_discard(struct dm_thin_new_mapping *m)
741 {
742         int r;
743         struct thin_c *tc = m->tc;
744
745         r = dm_thin_remove_block(tc->td, m->virt_block);
746         if (r)
747                 DMERR_LIMIT("dm_thin_remove_block() failed");
748
749         process_prepared_discard_passdown(m);
750 }
751
752 static void process_prepared(struct pool *pool, struct list_head *head,
753                              process_mapping_fn *fn)
754 {
755         unsigned long flags;
756         struct list_head maps;
757         struct dm_thin_new_mapping *m, *tmp;
758
759         INIT_LIST_HEAD(&maps);
760         spin_lock_irqsave(&pool->lock, flags);
761         list_splice_init(head, &maps);
762         spin_unlock_irqrestore(&pool->lock, flags);
763
764         list_for_each_entry_safe(m, tmp, &maps, list)
765                 (*fn)(m);
766 }
767
768 /*
769  * Deferred bio jobs.
770  */
771 static int io_overlaps_block(struct pool *pool, struct bio *bio)
772 {
773         return bio->bi_iter.bi_size ==
774                 (pool->sectors_per_block << SECTOR_SHIFT);
775 }
776
777 static int io_overwrites_block(struct pool *pool, struct bio *bio)
778 {
779         return (bio_data_dir(bio) == WRITE) &&
780                 io_overlaps_block(pool, bio);
781 }
782
783 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
784                                bio_end_io_t *fn)
785 {
786         *save = bio->bi_end_io;
787         bio->bi_end_io = fn;
788 }
789
790 static int ensure_next_mapping(struct pool *pool)
791 {
792         if (pool->next_mapping)
793                 return 0;
794
795         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
796
797         return pool->next_mapping ? 0 : -ENOMEM;
798 }
799
800 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
801 {
802         struct dm_thin_new_mapping *m = pool->next_mapping;
803
804         BUG_ON(!pool->next_mapping);
805
806         memset(m, 0, sizeof(struct dm_thin_new_mapping));
807         INIT_LIST_HEAD(&m->list);
808         m->bio = NULL;
809
810         pool->next_mapping = NULL;
811
812         return m;
813 }
814
815 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
816                           struct dm_dev *origin, dm_block_t data_origin,
817                           dm_block_t data_dest,
818                           struct dm_bio_prison_cell *cell, struct bio *bio)
819 {
820         int r;
821         struct pool *pool = tc->pool;
822         struct dm_thin_new_mapping *m = get_next_mapping(pool);
823
824         m->tc = tc;
825         m->virt_block = virt_block;
826         m->data_block = data_dest;
827         m->cell = cell;
828
829         if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
830                 m->quiesced = true;
831
832         /*
833          * IO to pool_dev remaps to the pool target's data_dev.
834          *
835          * If the whole block of data is being overwritten, we can issue the
836          * bio immediately. Otherwise we use kcopyd to clone the data first.
837          */
838         if (io_overwrites_block(pool, bio)) {
839                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
840
841                 h->overwrite_mapping = m;
842                 m->bio = bio;
843                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
844                 inc_all_io_entry(pool, bio);
845                 remap_and_issue(tc, bio, data_dest);
846         } else {
847                 struct dm_io_region from, to;
848
849                 from.bdev = origin->bdev;
850                 from.sector = data_origin * pool->sectors_per_block;
851                 from.count = pool->sectors_per_block;
852
853                 to.bdev = tc->pool_dev->bdev;
854                 to.sector = data_dest * pool->sectors_per_block;
855                 to.count = pool->sectors_per_block;
856
857                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
858                                    0, copy_complete, m);
859                 if (r < 0) {
860                         mempool_free(m, pool->mapping_pool);
861                         DMERR_LIMIT("dm_kcopyd_copy() failed");
862                         cell_error(pool, cell);
863                 }
864         }
865 }
866
867 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
868                                    dm_block_t data_origin, dm_block_t data_dest,
869                                    struct dm_bio_prison_cell *cell, struct bio *bio)
870 {
871         schedule_copy(tc, virt_block, tc->pool_dev,
872                       data_origin, data_dest, cell, bio);
873 }
874
875 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
876                                    dm_block_t data_dest,
877                                    struct dm_bio_prison_cell *cell, struct bio *bio)
878 {
879         schedule_copy(tc, virt_block, tc->origin_dev,
880                       virt_block, data_dest, cell, bio);
881 }
882
883 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
884                           dm_block_t data_block, struct dm_bio_prison_cell *cell,
885                           struct bio *bio)
886 {
887         struct pool *pool = tc->pool;
888         struct dm_thin_new_mapping *m = get_next_mapping(pool);
889
890         m->quiesced = true;
891         m->prepared = false;
892         m->tc = tc;
893         m->virt_block = virt_block;
894         m->data_block = data_block;
895         m->cell = cell;
896
897         /*
898          * If the whole block of data is being overwritten or we are not
899          * zeroing pre-existing data, we can issue the bio immediately.
900          * Otherwise we use kcopyd to zero the data first.
901          */
902         if (!pool->pf.zero_new_blocks)
903                 process_prepared_mapping(m);
904
905         else if (io_overwrites_block(pool, bio)) {
906                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
907
908                 h->overwrite_mapping = m;
909                 m->bio = bio;
910                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
911                 inc_all_io_entry(pool, bio);
912                 remap_and_issue(tc, bio, data_block);
913         } else {
914                 int r;
915                 struct dm_io_region to;
916
917                 to.bdev = tc->pool_dev->bdev;
918                 to.sector = data_block * pool->sectors_per_block;
919                 to.count = pool->sectors_per_block;
920
921                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
922                 if (r < 0) {
923                         mempool_free(m, pool->mapping_pool);
924                         DMERR_LIMIT("dm_kcopyd_zero() failed");
925                         cell_error(pool, cell);
926                 }
927         }
928 }
929
930 /*
931  * A non-zero return indicates read_only or fail_io mode.
932  * Many callers don't care about the return value.
933  */
934 static int commit(struct pool *pool)
935 {
936         int r;
937
938         if (get_pool_mode(pool) != PM_WRITE)
939                 return -EINVAL;
940
941         r = dm_pool_commit_metadata(pool->pmd);
942         if (r)
943                 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
944
945         return r;
946 }
947
948 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
949 {
950         unsigned long flags;
951
952         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
953                 DMWARN("%s: reached low water mark for data device: sending event.",
954                        dm_device_name(pool->pool_md));
955                 spin_lock_irqsave(&pool->lock, flags);
956                 pool->low_water_triggered = true;
957                 spin_unlock_irqrestore(&pool->lock, flags);
958                 dm_table_event(pool->ti->table);
959         }
960 }
961
962 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
963
964 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
965 {
966         int r;
967         dm_block_t free_blocks;
968         struct pool *pool = tc->pool;
969
970         if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
971                 return -EINVAL;
972
973         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
974         if (r) {
975                 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
976                 return r;
977         }
978
979         check_low_water_mark(pool, free_blocks);
980
981         if (!free_blocks) {
982                 /*
983                  * Try to commit to see if that will free up some
984                  * more space.
985                  */
986                 r = commit(pool);
987                 if (r)
988                         return r;
989
990                 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
991                 if (r) {
992                         metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
993                         return r;
994                 }
995
996                 if (!free_blocks) {
997                         set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
998                         return -ENOSPC;
999                 }
1000         }
1001
1002         r = dm_pool_alloc_data_block(pool->pmd, result);
1003         if (r) {
1004                 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1005                 return r;
1006         }
1007
1008         return 0;
1009 }
1010
1011 /*
1012  * If we have run out of space, queue bios until the device is
1013  * resumed, presumably after having been reloaded with more space.
1014  */
1015 static void retry_on_resume(struct bio *bio)
1016 {
1017         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1018         struct thin_c *tc = h->tc;
1019         unsigned long flags;
1020
1021         spin_lock_irqsave(&tc->lock, flags);
1022         bio_list_add(&tc->retry_on_resume_list, bio);
1023         spin_unlock_irqrestore(&tc->lock, flags);
1024 }
1025
1026 static bool should_error_unserviceable_bio(struct pool *pool)
1027 {
1028         enum pool_mode m = get_pool_mode(pool);
1029
1030         switch (m) {
1031         case PM_WRITE:
1032                 /* Shouldn't get here */
1033                 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1034                 return true;
1035
1036         case PM_OUT_OF_DATA_SPACE:
1037                 return pool->pf.error_if_no_space;
1038
1039         case PM_READ_ONLY:
1040         case PM_FAIL:
1041                 return true;
1042         default:
1043                 /* Shouldn't get here */
1044                 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1045                 return true;
1046         }
1047 }
1048
1049 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1050 {
1051         if (should_error_unserviceable_bio(pool))
1052                 bio_io_error(bio);
1053         else
1054                 retry_on_resume(bio);
1055 }
1056
1057 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1058 {
1059         struct bio *bio;
1060         struct bio_list bios;
1061
1062         if (should_error_unserviceable_bio(pool)) {
1063                 cell_error(pool, cell);
1064                 return;
1065         }
1066
1067         bio_list_init(&bios);
1068         cell_release(pool, cell, &bios);
1069
1070         if (should_error_unserviceable_bio(pool))
1071                 while ((bio = bio_list_pop(&bios)))
1072                         bio_io_error(bio);
1073         else
1074                 while ((bio = bio_list_pop(&bios)))
1075                         retry_on_resume(bio);
1076 }
1077
1078 static void process_discard(struct thin_c *tc, struct bio *bio)
1079 {
1080         int r;
1081         unsigned long flags;
1082         struct pool *pool = tc->pool;
1083         struct dm_bio_prison_cell *cell, *cell2;
1084         struct dm_cell_key key, key2;
1085         dm_block_t block = get_bio_block(tc, bio);
1086         struct dm_thin_lookup_result lookup_result;
1087         struct dm_thin_new_mapping *m;
1088
1089         build_virtual_key(tc->td, block, &key);
1090         if (bio_detain(tc->pool, &key, bio, &cell))
1091                 return;
1092
1093         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1094         switch (r) {
1095         case 0:
1096                 /*
1097                  * Check nobody is fiddling with this pool block.  This can
1098                  * happen if someone's in the process of breaking sharing
1099                  * on this block.
1100                  */
1101                 build_data_key(tc->td, lookup_result.block, &key2);
1102                 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1103                         cell_defer_no_holder(tc, cell);
1104                         break;
1105                 }
1106
1107                 if (io_overlaps_block(pool, bio)) {
1108                         /*
1109                          * IO may still be going to the destination block.  We must
1110                          * quiesce before we can do the removal.
1111                          */
1112                         m = get_next_mapping(pool);
1113                         m->tc = tc;
1114                         m->pass_discard = pool->pf.discard_passdown;
1115                         m->definitely_not_shared = !lookup_result.shared;
1116                         m->virt_block = block;
1117                         m->data_block = lookup_result.block;
1118                         m->cell = cell;
1119                         m->cell2 = cell2;
1120                         m->bio = bio;
1121
1122                         if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1123                                 spin_lock_irqsave(&pool->lock, flags);
1124                                 list_add_tail(&m->list, &pool->prepared_discards);
1125                                 spin_unlock_irqrestore(&pool->lock, flags);
1126                                 wake_worker(pool);
1127                         }
1128                 } else {
1129                         inc_all_io_entry(pool, bio);
1130                         cell_defer_no_holder(tc, cell);
1131                         cell_defer_no_holder(tc, cell2);
1132
1133                         /*
1134                          * The DM core makes sure that the discard doesn't span
1135                          * a block boundary.  So we submit the discard of a
1136                          * partial block appropriately.
1137                          */
1138                         if ((!lookup_result.shared) && pool->pf.discard_passdown)
1139                                 remap_and_issue(tc, bio, lookup_result.block);
1140                         else
1141                                 bio_endio(bio, 0);
1142                 }
1143                 break;
1144
1145         case -ENODATA:
1146                 /*
1147                  * It isn't provisioned, just forget it.
1148                  */
1149                 cell_defer_no_holder(tc, cell);
1150                 bio_endio(bio, 0);
1151                 break;
1152
1153         default:
1154                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1155                             __func__, r);
1156                 cell_defer_no_holder(tc, cell);
1157                 bio_io_error(bio);
1158                 break;
1159         }
1160 }
1161
1162 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1163                           struct dm_cell_key *key,
1164                           struct dm_thin_lookup_result *lookup_result,
1165                           struct dm_bio_prison_cell *cell)
1166 {
1167         int r;
1168         dm_block_t data_block;
1169         struct pool *pool = tc->pool;
1170
1171         r = alloc_data_block(tc, &data_block);
1172         switch (r) {
1173         case 0:
1174                 schedule_internal_copy(tc, block, lookup_result->block,
1175                                        data_block, cell, bio);
1176                 break;
1177
1178         case -ENOSPC:
1179                 retry_bios_on_resume(pool, cell);
1180                 break;
1181
1182         default:
1183                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1184                             __func__, r);
1185                 cell_error(pool, cell);
1186                 break;
1187         }
1188 }
1189
1190 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1191                                dm_block_t block,
1192                                struct dm_thin_lookup_result *lookup_result)
1193 {
1194         struct dm_bio_prison_cell *cell;
1195         struct pool *pool = tc->pool;
1196         struct dm_cell_key key;
1197
1198         /*
1199          * If cell is already occupied, then sharing is already in the process
1200          * of being broken so we have nothing further to do here.
1201          */
1202         build_data_key(tc->td, lookup_result->block, &key);
1203         if (bio_detain(pool, &key, bio, &cell))
1204                 return;
1205
1206         if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1207                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1208         else {
1209                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1210
1211                 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1212                 inc_all_io_entry(pool, bio);
1213                 cell_defer_no_holder(tc, cell);
1214
1215                 remap_and_issue(tc, bio, lookup_result->block);
1216         }
1217 }
1218
1219 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1220                             struct dm_bio_prison_cell *cell)
1221 {
1222         int r;
1223         dm_block_t data_block;
1224         struct pool *pool = tc->pool;
1225
1226         /*
1227          * Remap empty bios (flushes) immediately, without provisioning.
1228          */
1229         if (!bio->bi_iter.bi_size) {
1230                 inc_all_io_entry(pool, bio);
1231                 cell_defer_no_holder(tc, cell);
1232
1233                 remap_and_issue(tc, bio, 0);
1234                 return;
1235         }
1236
1237         /*
1238          * Fill read bios with zeroes and complete them immediately.
1239          */
1240         if (bio_data_dir(bio) == READ) {
1241                 zero_fill_bio(bio);
1242                 cell_defer_no_holder(tc, cell);
1243                 bio_endio(bio, 0);
1244                 return;
1245         }
1246
1247         r = alloc_data_block(tc, &data_block);
1248         switch (r) {
1249         case 0:
1250                 if (tc->origin_dev)
1251                         schedule_external_copy(tc, block, data_block, cell, bio);
1252                 else
1253                         schedule_zero(tc, block, data_block, cell, bio);
1254                 break;
1255
1256         case -ENOSPC:
1257                 retry_bios_on_resume(pool, cell);
1258                 break;
1259
1260         default:
1261                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1262                             __func__, r);
1263                 cell_error(pool, cell);
1264                 break;
1265         }
1266 }
1267
1268 static void process_bio(struct thin_c *tc, struct bio *bio)
1269 {
1270         int r;
1271         struct pool *pool = tc->pool;
1272         dm_block_t block = get_bio_block(tc, bio);
1273         struct dm_bio_prison_cell *cell;
1274         struct dm_cell_key key;
1275         struct dm_thin_lookup_result lookup_result;
1276
1277         /*
1278          * If cell is already occupied, then the block is already
1279          * being provisioned so we have nothing further to do here.
1280          */
1281         build_virtual_key(tc->td, block, &key);
1282         if (bio_detain(pool, &key, bio, &cell))
1283                 return;
1284
1285         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1286         switch (r) {
1287         case 0:
1288                 if (lookup_result.shared) {
1289                         process_shared_bio(tc, bio, block, &lookup_result);
1290                         cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1291                 } else {
1292                         inc_all_io_entry(pool, bio);
1293                         cell_defer_no_holder(tc, cell);
1294
1295                         remap_and_issue(tc, bio, lookup_result.block);
1296                 }
1297                 break;
1298
1299         case -ENODATA:
1300                 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1301                         inc_all_io_entry(pool, bio);
1302                         cell_defer_no_holder(tc, cell);
1303
1304                         remap_to_origin_and_issue(tc, bio);
1305                 } else
1306                         provision_block(tc, bio, block, cell);
1307                 break;
1308
1309         default:
1310                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1311                             __func__, r);
1312                 cell_defer_no_holder(tc, cell);
1313                 bio_io_error(bio);
1314                 break;
1315         }
1316 }
1317
1318 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1319 {
1320         int r;
1321         int rw = bio_data_dir(bio);
1322         dm_block_t block = get_bio_block(tc, bio);
1323         struct dm_thin_lookup_result lookup_result;
1324
1325         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1326         switch (r) {
1327         case 0:
1328                 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1329                         handle_unserviceable_bio(tc->pool, bio);
1330                 else {
1331                         inc_all_io_entry(tc->pool, bio);
1332                         remap_and_issue(tc, bio, lookup_result.block);
1333                 }
1334                 break;
1335
1336         case -ENODATA:
1337                 if (rw != READ) {
1338                         handle_unserviceable_bio(tc->pool, bio);
1339                         break;
1340                 }
1341
1342                 if (tc->origin_dev) {
1343                         inc_all_io_entry(tc->pool, bio);
1344                         remap_to_origin_and_issue(tc, bio);
1345                         break;
1346                 }
1347
1348                 zero_fill_bio(bio);
1349                 bio_endio(bio, 0);
1350                 break;
1351
1352         default:
1353                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1354                             __func__, r);
1355                 bio_io_error(bio);
1356                 break;
1357         }
1358 }
1359
1360 static void process_bio_success(struct thin_c *tc, struct bio *bio)
1361 {
1362         bio_endio(bio, 0);
1363 }
1364
1365 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1366 {
1367         bio_io_error(bio);
1368 }
1369
1370 /*
1371  * FIXME: should we also commit due to size of transaction, measured in
1372  * metadata blocks?
1373  */
1374 static int need_commit_due_to_time(struct pool *pool)
1375 {
1376         return jiffies < pool->last_commit_jiffies ||
1377                jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1378 }
1379
1380 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1381 #define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1382
1383 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1384 {
1385         struct rb_node **rbp, *parent;
1386         struct dm_thin_endio_hook *pbd;
1387         sector_t bi_sector = bio->bi_iter.bi_sector;
1388
1389         rbp = &tc->sort_bio_list.rb_node;
1390         parent = NULL;
1391         while (*rbp) {
1392                 parent = *rbp;
1393                 pbd = thin_pbd(parent);
1394
1395                 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1396                         rbp = &(*rbp)->rb_left;
1397                 else
1398                         rbp = &(*rbp)->rb_right;
1399         }
1400
1401         pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1402         rb_link_node(&pbd->rb_node, parent, rbp);
1403         rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1404 }
1405
1406 static void __extract_sorted_bios(struct thin_c *tc)
1407 {
1408         struct rb_node *node;
1409         struct dm_thin_endio_hook *pbd;
1410         struct bio *bio;
1411
1412         for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1413                 pbd = thin_pbd(node);
1414                 bio = thin_bio(pbd);
1415
1416                 bio_list_add(&tc->deferred_bio_list, bio);
1417                 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1418         }
1419
1420         WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1421 }
1422
1423 static void __sort_thin_deferred_bios(struct thin_c *tc)
1424 {
1425         struct bio *bio;
1426         struct bio_list bios;
1427
1428         bio_list_init(&bios);
1429         bio_list_merge(&bios, &tc->deferred_bio_list);
1430         bio_list_init(&tc->deferred_bio_list);
1431
1432         /* Sort deferred_bio_list using rb-tree */
1433         while ((bio = bio_list_pop(&bios)))
1434                 __thin_bio_rb_add(tc, bio);
1435
1436         /*
1437          * Transfer the sorted bios in sort_bio_list back to
1438          * deferred_bio_list to allow lockless submission of
1439          * all bios.
1440          */
1441         __extract_sorted_bios(tc);
1442 }
1443
1444 static void process_thin_deferred_bios(struct thin_c *tc)
1445 {
1446         struct pool *pool = tc->pool;
1447         unsigned long flags;
1448         struct bio *bio;
1449         struct bio_list bios;
1450         struct blk_plug plug;
1451
1452         if (tc->requeue_mode) {
1453                 requeue_bio_list(tc, &tc->deferred_bio_list);
1454                 return;
1455         }
1456
1457         bio_list_init(&bios);
1458
1459         spin_lock_irqsave(&tc->lock, flags);
1460
1461         if (bio_list_empty(&tc->deferred_bio_list)) {
1462                 spin_unlock_irqrestore(&tc->lock, flags);
1463                 return;
1464         }
1465
1466         __sort_thin_deferred_bios(tc);
1467
1468         bio_list_merge(&bios, &tc->deferred_bio_list);
1469         bio_list_init(&tc->deferred_bio_list);
1470
1471         spin_unlock_irqrestore(&tc->lock, flags);
1472
1473         blk_start_plug(&plug);
1474         while ((bio = bio_list_pop(&bios))) {
1475                 /*
1476                  * If we've got no free new_mapping structs, and processing
1477                  * this bio might require one, we pause until there are some
1478                  * prepared mappings to process.
1479                  */
1480                 if (ensure_next_mapping(pool)) {
1481                         spin_lock_irqsave(&tc->lock, flags);
1482                         bio_list_add(&tc->deferred_bio_list, bio);
1483                         bio_list_merge(&tc->deferred_bio_list, &bios);
1484                         spin_unlock_irqrestore(&tc->lock, flags);
1485                         break;
1486                 }
1487
1488                 if (bio->bi_rw & REQ_DISCARD)
1489                         pool->process_discard(tc, bio);
1490                 else
1491                         pool->process_bio(tc, bio);
1492         }
1493         blk_finish_plug(&plug);
1494 }
1495
1496 static void thin_get(struct thin_c *tc);
1497 static void thin_put(struct thin_c *tc);
1498
1499 /*
1500  * We can't hold rcu_read_lock() around code that can block.  So we
1501  * find a thin with the rcu lock held; bump a refcount; then drop
1502  * the lock.
1503  */
1504 static struct thin_c *get_first_thin(struct pool *pool)
1505 {
1506         struct thin_c *tc = NULL;
1507
1508         rcu_read_lock();
1509         if (!list_empty(&pool->active_thins)) {
1510                 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1511                 thin_get(tc);
1512         }
1513         rcu_read_unlock();
1514
1515         return tc;
1516 }
1517
1518 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1519 {
1520         struct thin_c *old_tc = tc;
1521
1522         rcu_read_lock();
1523         list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1524                 thin_get(tc);
1525                 thin_put(old_tc);
1526                 rcu_read_unlock();
1527                 return tc;
1528         }
1529         thin_put(old_tc);
1530         rcu_read_unlock();
1531
1532         return NULL;
1533 }
1534
1535 static void process_deferred_bios(struct pool *pool)
1536 {
1537         unsigned long flags;
1538         struct bio *bio;
1539         struct bio_list bios;
1540         struct thin_c *tc;
1541
1542         tc = get_first_thin(pool);
1543         while (tc) {
1544                 process_thin_deferred_bios(tc);
1545                 tc = get_next_thin(pool, tc);
1546         }
1547
1548         /*
1549          * If there are any deferred flush bios, we must commit
1550          * the metadata before issuing them.
1551          */
1552         bio_list_init(&bios);
1553         spin_lock_irqsave(&pool->lock, flags);
1554         bio_list_merge(&bios, &pool->deferred_flush_bios);
1555         bio_list_init(&pool->deferred_flush_bios);
1556         spin_unlock_irqrestore(&pool->lock, flags);
1557
1558         if (bio_list_empty(&bios) &&
1559             !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1560                 return;
1561
1562         if (commit(pool)) {
1563                 while ((bio = bio_list_pop(&bios)))
1564                         bio_io_error(bio);
1565                 return;
1566         }
1567         pool->last_commit_jiffies = jiffies;
1568
1569         while ((bio = bio_list_pop(&bios)))
1570                 generic_make_request(bio);
1571 }
1572
1573 static void do_worker(struct work_struct *ws)
1574 {
1575         struct pool *pool = container_of(ws, struct pool, worker);
1576
1577         process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1578         process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1579         process_deferred_bios(pool);
1580 }
1581
1582 /*
1583  * We want to commit periodically so that not too much
1584  * unwritten data builds up.
1585  */
1586 static void do_waker(struct work_struct *ws)
1587 {
1588         struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1589         wake_worker(pool);
1590         queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1591 }
1592
1593 /*----------------------------------------------------------------*/
1594
1595 struct noflush_work {
1596         struct work_struct worker;
1597         struct thin_c *tc;
1598
1599         atomic_t complete;
1600         wait_queue_head_t wait;
1601 };
1602
1603 static void complete_noflush_work(struct noflush_work *w)
1604 {
1605         atomic_set(&w->complete, 1);
1606         wake_up(&w->wait);
1607 }
1608
1609 static void do_noflush_start(struct work_struct *ws)
1610 {
1611         struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1612         w->tc->requeue_mode = true;
1613         requeue_io(w->tc);
1614         complete_noflush_work(w);
1615 }
1616
1617 static void do_noflush_stop(struct work_struct *ws)
1618 {
1619         struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1620         w->tc->requeue_mode = false;
1621         complete_noflush_work(w);
1622 }
1623
1624 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1625 {
1626         struct noflush_work w;
1627
1628         INIT_WORK_ONSTACK(&w.worker, fn);
1629         w.tc = tc;
1630         atomic_set(&w.complete, 0);
1631         init_waitqueue_head(&w.wait);
1632
1633         queue_work(tc->pool->wq, &w.worker);
1634
1635         wait_event(w.wait, atomic_read(&w.complete));
1636 }
1637
1638 /*----------------------------------------------------------------*/
1639
1640 static enum pool_mode get_pool_mode(struct pool *pool)
1641 {
1642         return pool->pf.mode;
1643 }
1644
1645 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
1646 {
1647         dm_table_event(pool->ti->table);
1648         DMINFO("%s: switching pool to %s mode",
1649                dm_device_name(pool->pool_md), new_mode);
1650 }
1651
1652 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1653 {
1654         struct pool_c *pt = pool->ti->private;
1655         bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1656         enum pool_mode old_mode = get_pool_mode(pool);
1657
1658         /*
1659          * Never allow the pool to transition to PM_WRITE mode if user
1660          * intervention is required to verify metadata and data consistency.
1661          */
1662         if (new_mode == PM_WRITE && needs_check) {
1663                 DMERR("%s: unable to switch pool to write mode until repaired.",
1664                       dm_device_name(pool->pool_md));
1665                 if (old_mode != new_mode)
1666                         new_mode = old_mode;
1667                 else
1668                         new_mode = PM_READ_ONLY;
1669         }
1670         /*
1671          * If we were in PM_FAIL mode, rollback of metadata failed.  We're
1672          * not going to recover without a thin_repair.  So we never let the
1673          * pool move out of the old mode.
1674          */
1675         if (old_mode == PM_FAIL)
1676                 new_mode = old_mode;
1677
1678         switch (new_mode) {
1679         case PM_FAIL:
1680                 if (old_mode != new_mode)
1681                         notify_of_pool_mode_change(pool, "failure");
1682                 dm_pool_metadata_read_only(pool->pmd);
1683                 pool->process_bio = process_bio_fail;
1684                 pool->process_discard = process_bio_fail;
1685                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1686                 pool->process_prepared_discard = process_prepared_discard_fail;
1687
1688                 error_retry_list(pool);
1689                 break;
1690
1691         case PM_READ_ONLY:
1692                 if (old_mode != new_mode)
1693                         notify_of_pool_mode_change(pool, "read-only");
1694                 dm_pool_metadata_read_only(pool->pmd);
1695                 pool->process_bio = process_bio_read_only;
1696                 pool->process_discard = process_bio_success;
1697                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1698                 pool->process_prepared_discard = process_prepared_discard_passdown;
1699
1700                 error_retry_list(pool);
1701                 break;
1702
1703         case PM_OUT_OF_DATA_SPACE:
1704                 /*
1705                  * Ideally we'd never hit this state; the low water mark
1706                  * would trigger userland to extend the pool before we
1707                  * completely run out of data space.  However, many small
1708                  * IOs to unprovisioned space can consume data space at an
1709                  * alarming rate.  Adjust your low water mark if you're
1710                  * frequently seeing this mode.
1711                  */
1712                 if (old_mode != new_mode)
1713                         notify_of_pool_mode_change(pool, "out-of-data-space");
1714                 pool->process_bio = process_bio_read_only;
1715                 pool->process_discard = process_discard;
1716                 pool->process_prepared_mapping = process_prepared_mapping;
1717                 pool->process_prepared_discard = process_prepared_discard_passdown;
1718                 break;
1719
1720         case PM_WRITE:
1721                 if (old_mode != new_mode)
1722                         notify_of_pool_mode_change(pool, "write");
1723                 dm_pool_metadata_read_write(pool->pmd);
1724                 pool->process_bio = process_bio;
1725                 pool->process_discard = process_discard;
1726                 pool->process_prepared_mapping = process_prepared_mapping;
1727                 pool->process_prepared_discard = process_prepared_discard;
1728                 break;
1729         }
1730
1731         pool->pf.mode = new_mode;
1732         /*
1733          * The pool mode may have changed, sync it so bind_control_target()
1734          * doesn't cause an unexpected mode transition on resume.
1735          */
1736         pt->adjusted_pf.mode = new_mode;
1737 }
1738
1739 static void abort_transaction(struct pool *pool)
1740 {
1741         const char *dev_name = dm_device_name(pool->pool_md);
1742
1743         DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1744         if (dm_pool_abort_metadata(pool->pmd)) {
1745                 DMERR("%s: failed to abort metadata transaction", dev_name);
1746                 set_pool_mode(pool, PM_FAIL);
1747         }
1748
1749         if (dm_pool_metadata_set_needs_check(pool->pmd)) {
1750                 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1751                 set_pool_mode(pool, PM_FAIL);
1752         }
1753 }
1754
1755 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1756 {
1757         DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1758                     dm_device_name(pool->pool_md), op, r);
1759
1760         abort_transaction(pool);
1761         set_pool_mode(pool, PM_READ_ONLY);
1762 }
1763
1764 /*----------------------------------------------------------------*/
1765
1766 /*
1767  * Mapping functions.
1768  */
1769
1770 /*
1771  * Called only while mapping a thin bio to hand it over to the workqueue.
1772  */
1773 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1774 {
1775         unsigned long flags;
1776         struct pool *pool = tc->pool;
1777
1778         spin_lock_irqsave(&tc->lock, flags);
1779         bio_list_add(&tc->deferred_bio_list, bio);
1780         spin_unlock_irqrestore(&tc->lock, flags);
1781
1782         wake_worker(pool);
1783 }
1784
1785 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1786 {
1787         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1788
1789         h->tc = tc;
1790         h->shared_read_entry = NULL;
1791         h->all_io_entry = NULL;
1792         h->overwrite_mapping = NULL;
1793 }
1794
1795 /*
1796  * Non-blocking function called from the thin target's map function.
1797  */
1798 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1799 {
1800         int r;
1801         struct thin_c *tc = ti->private;
1802         dm_block_t block = get_bio_block(tc, bio);
1803         struct dm_thin_device *td = tc->td;
1804         struct dm_thin_lookup_result result;
1805         struct dm_bio_prison_cell cell1, cell2;
1806         struct dm_bio_prison_cell *cell_result;
1807         struct dm_cell_key key;
1808
1809         thin_hook_bio(tc, bio);
1810
1811         if (tc->requeue_mode) {
1812                 bio_endio(bio, DM_ENDIO_REQUEUE);
1813                 return DM_MAPIO_SUBMITTED;
1814         }
1815
1816         if (get_pool_mode(tc->pool) == PM_FAIL) {
1817                 bio_io_error(bio);
1818                 return DM_MAPIO_SUBMITTED;
1819         }
1820
1821         if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1822                 thin_defer_bio(tc, bio);
1823                 return DM_MAPIO_SUBMITTED;
1824         }
1825
1826         r = dm_thin_find_block(td, block, 0, &result);
1827
1828         /*
1829          * Note that we defer readahead too.
1830          */
1831         switch (r) {
1832         case 0:
1833                 if (unlikely(result.shared)) {
1834                         /*
1835                          * We have a race condition here between the
1836                          * result.shared value returned by the lookup and
1837                          * snapshot creation, which may cause new
1838                          * sharing.
1839                          *
1840                          * To avoid this always quiesce the origin before
1841                          * taking the snap.  You want to do this anyway to
1842                          * ensure a consistent application view
1843                          * (i.e. lockfs).
1844                          *
1845                          * More distant ancestors are irrelevant. The
1846                          * shared flag will be set in their case.
1847                          */
1848                         thin_defer_bio(tc, bio);
1849                         return DM_MAPIO_SUBMITTED;
1850                 }
1851
1852                 build_virtual_key(tc->td, block, &key);
1853                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1854                         return DM_MAPIO_SUBMITTED;
1855
1856                 build_data_key(tc->td, result.block, &key);
1857                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1858                         cell_defer_no_holder_no_free(tc, &cell1);
1859                         return DM_MAPIO_SUBMITTED;
1860                 }
1861
1862                 inc_all_io_entry(tc->pool, bio);
1863                 cell_defer_no_holder_no_free(tc, &cell2);
1864                 cell_defer_no_holder_no_free(tc, &cell1);
1865
1866                 remap(tc, bio, result.block);
1867                 return DM_MAPIO_REMAPPED;
1868
1869         case -ENODATA:
1870                 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1871                         /*
1872                          * This block isn't provisioned, and we have no way
1873                          * of doing so.
1874                          */
1875                         handle_unserviceable_bio(tc->pool, bio);
1876                         return DM_MAPIO_SUBMITTED;
1877                 }
1878                 /* fall through */
1879
1880         case -EWOULDBLOCK:
1881                 /*
1882                  * In future, the failed dm_thin_find_block above could
1883                  * provide the hint to load the metadata into cache.
1884                  */
1885                 thin_defer_bio(tc, bio);
1886                 return DM_MAPIO_SUBMITTED;
1887
1888         default:
1889                 /*
1890                  * Must always call bio_io_error on failure.
1891                  * dm_thin_find_block can fail with -EINVAL if the
1892                  * pool is switched to fail-io mode.
1893                  */
1894                 bio_io_error(bio);
1895                 return DM_MAPIO_SUBMITTED;
1896         }
1897 }
1898
1899 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1900 {
1901         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1902         struct request_queue *q;
1903
1904         if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
1905                 return 1;
1906
1907         q = bdev_get_queue(pt->data_dev->bdev);
1908         return bdi_congested(&q->backing_dev_info, bdi_bits);
1909 }
1910
1911 static void requeue_bios(struct pool *pool)
1912 {
1913         unsigned long flags;
1914         struct thin_c *tc;
1915
1916         rcu_read_lock();
1917         list_for_each_entry_rcu(tc, &pool->active_thins, list) {
1918                 spin_lock_irqsave(&tc->lock, flags);
1919                 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
1920                 bio_list_init(&tc->retry_on_resume_list);
1921                 spin_unlock_irqrestore(&tc->lock, flags);
1922         }
1923         rcu_read_unlock();
1924 }
1925
1926 /*----------------------------------------------------------------
1927  * Binding of control targets to a pool object
1928  *--------------------------------------------------------------*/
1929 static bool data_dev_supports_discard(struct pool_c *pt)
1930 {
1931         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1932
1933         return q && blk_queue_discard(q);
1934 }
1935
1936 static bool is_factor(sector_t block_size, uint32_t n)
1937 {
1938         return !sector_div(block_size, n);
1939 }
1940
1941 /*
1942  * If discard_passdown was enabled verify that the data device
1943  * supports discards.  Disable discard_passdown if not.
1944  */
1945 static void disable_passdown_if_not_supported(struct pool_c *pt)
1946 {
1947         struct pool *pool = pt->pool;
1948         struct block_device *data_bdev = pt->data_dev->bdev;
1949         struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1950         sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1951         const char *reason = NULL;
1952         char buf[BDEVNAME_SIZE];
1953
1954         if (!pt->adjusted_pf.discard_passdown)
1955                 return;
1956
1957         if (!data_dev_supports_discard(pt))
1958                 reason = "discard unsupported";
1959
1960         else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1961                 reason = "max discard sectors smaller than a block";
1962
1963         else if (data_limits->discard_granularity > block_size)
1964                 reason = "discard granularity larger than a block";
1965
1966         else if (!is_factor(block_size, data_limits->discard_granularity))
1967                 reason = "discard granularity not a factor of block size";
1968
1969         if (reason) {
1970                 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1971                 pt->adjusted_pf.discard_passdown = false;
1972         }
1973 }
1974
1975 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1976 {
1977         struct pool_c *pt = ti->private;
1978
1979         /*
1980          * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1981          */
1982         enum pool_mode old_mode = get_pool_mode(pool);
1983         enum pool_mode new_mode = pt->adjusted_pf.mode;
1984
1985         /*
1986          * Don't change the pool's mode until set_pool_mode() below.
1987          * Otherwise the pool's process_* function pointers may
1988          * not match the desired pool mode.
1989          */
1990         pt->adjusted_pf.mode = old_mode;
1991
1992         pool->ti = ti;
1993         pool->pf = pt->adjusted_pf;
1994         pool->low_water_blocks = pt->low_water_blocks;
1995
1996         set_pool_mode(pool, new_mode);
1997
1998         return 0;
1999 }
2000
2001 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2002 {
2003         if (pool->ti == ti)
2004                 pool->ti = NULL;
2005 }
2006
2007 /*----------------------------------------------------------------
2008  * Pool creation
2009  *--------------------------------------------------------------*/
2010 /* Initialize pool features. */
2011 static void pool_features_init(struct pool_features *pf)
2012 {
2013         pf->mode = PM_WRITE;
2014         pf->zero_new_blocks = true;
2015         pf->discard_enabled = true;
2016         pf->discard_passdown = true;
2017         pf->error_if_no_space = false;
2018 }
2019
2020 static void __pool_destroy(struct pool *pool)
2021 {
2022         __pool_table_remove(pool);
2023
2024         if (dm_pool_metadata_close(pool->pmd) < 0)
2025                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2026
2027         dm_bio_prison_destroy(pool->prison);
2028         dm_kcopyd_client_destroy(pool->copier);
2029
2030         if (pool->wq)
2031                 destroy_workqueue(pool->wq);
2032
2033         if (pool->next_mapping)
2034                 mempool_free(pool->next_mapping, pool->mapping_pool);
2035         mempool_destroy(pool->mapping_pool);
2036         dm_deferred_set_destroy(pool->shared_read_ds);
2037         dm_deferred_set_destroy(pool->all_io_ds);
2038         kfree(pool);
2039 }
2040
2041 static struct kmem_cache *_new_mapping_cache;
2042
2043 static struct pool *pool_create(struct mapped_device *pool_md,
2044                                 struct block_device *metadata_dev,
2045                                 unsigned long block_size,
2046                                 int read_only, char **error)
2047 {
2048         int r;
2049         void *err_p;
2050         struct pool *pool;
2051         struct dm_pool_metadata *pmd;
2052         bool format_device = read_only ? false : true;
2053
2054         pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
2055         if (IS_ERR(pmd)) {
2056                 *error = "Error creating metadata object";
2057                 return (struct pool *)pmd;
2058         }
2059
2060         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2061         if (!pool) {
2062                 *error = "Error allocating memory for pool";
2063                 err_p = ERR_PTR(-ENOMEM);
2064                 goto bad_pool;
2065         }
2066
2067         pool->pmd = pmd;
2068         pool->sectors_per_block = block_size;
2069         if (block_size & (block_size - 1))
2070                 pool->sectors_per_block_shift = -1;
2071         else
2072                 pool->sectors_per_block_shift = __ffs(block_size);
2073         pool->low_water_blocks = 0;
2074         pool_features_init(&pool->pf);
2075         pool->prison = dm_bio_prison_create(PRISON_CELLS);
2076         if (!pool->prison) {
2077                 *error = "Error creating pool's bio prison";
2078                 err_p = ERR_PTR(-ENOMEM);
2079                 goto bad_prison;
2080         }
2081
2082         pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2083         if (IS_ERR(pool->copier)) {
2084                 r = PTR_ERR(pool->copier);
2085                 *error = "Error creating pool's kcopyd client";
2086                 err_p = ERR_PTR(r);
2087                 goto bad_kcopyd_client;
2088         }
2089
2090         /*
2091          * Create singlethreaded workqueue that will service all devices
2092          * that use this metadata.
2093          */
2094         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2095         if (!pool->wq) {
2096                 *error = "Error creating pool's workqueue";
2097                 err_p = ERR_PTR(-ENOMEM);
2098                 goto bad_wq;
2099         }
2100
2101         INIT_WORK(&pool->worker, do_worker);
2102         INIT_DELAYED_WORK(&pool->waker, do_waker);
2103         spin_lock_init(&pool->lock);
2104         bio_list_init(&pool->deferred_flush_bios);
2105         INIT_LIST_HEAD(&pool->prepared_mappings);
2106         INIT_LIST_HEAD(&pool->prepared_discards);
2107         INIT_LIST_HEAD(&pool->active_thins);
2108         pool->low_water_triggered = false;
2109
2110         pool->shared_read_ds = dm_deferred_set_create();
2111         if (!pool->shared_read_ds) {
2112                 *error = "Error creating pool's shared read deferred set";
2113                 err_p = ERR_PTR(-ENOMEM);
2114                 goto bad_shared_read_ds;
2115         }
2116
2117         pool->all_io_ds = dm_deferred_set_create();
2118         if (!pool->all_io_ds) {
2119                 *error = "Error creating pool's all io deferred set";
2120                 err_p = ERR_PTR(-ENOMEM);
2121                 goto bad_all_io_ds;
2122         }
2123
2124         pool->next_mapping = NULL;
2125         pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2126                                                       _new_mapping_cache);
2127         if (!pool->mapping_pool) {
2128                 *error = "Error creating pool's mapping mempool";
2129                 err_p = ERR_PTR(-ENOMEM);
2130                 goto bad_mapping_pool;
2131         }
2132
2133         pool->ref_count = 1;
2134         pool->last_commit_jiffies = jiffies;
2135         pool->pool_md = pool_md;
2136         pool->md_dev = metadata_dev;
2137         __pool_table_insert(pool);
2138
2139         return pool;
2140
2141 bad_mapping_pool:
2142         dm_deferred_set_destroy(pool->all_io_ds);
2143 bad_all_io_ds:
2144         dm_deferred_set_destroy(pool->shared_read_ds);
2145 bad_shared_read_ds:
2146         destroy_workqueue(pool->wq);
2147 bad_wq:
2148         dm_kcopyd_client_destroy(pool->copier);
2149 bad_kcopyd_client:
2150         dm_bio_prison_destroy(pool->prison);
2151 bad_prison:
2152         kfree(pool);
2153 bad_pool:
2154         if (dm_pool_metadata_close(pmd))
2155                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2156
2157         return err_p;
2158 }
2159
2160 static void __pool_inc(struct pool *pool)
2161 {
2162         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2163         pool->ref_count++;
2164 }
2165
2166 static void __pool_dec(struct pool *pool)
2167 {
2168         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2169         BUG_ON(!pool->ref_count);
2170         if (!--pool->ref_count)
2171                 __pool_destroy(pool);
2172 }
2173
2174 static struct pool *__pool_find(struct mapped_device *pool_md,
2175                                 struct block_device *metadata_dev,
2176                                 unsigned long block_size, int read_only,
2177                                 char **error, int *created)
2178 {
2179         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2180
2181         if (pool) {
2182                 if (pool->pool_md != pool_md) {
2183                         *error = "metadata device already in use by a pool";
2184                         return ERR_PTR(-EBUSY);
2185                 }
2186                 __pool_inc(pool);
2187
2188         } else {
2189                 pool = __pool_table_lookup(pool_md);
2190                 if (pool) {
2191                         if (pool->md_dev != metadata_dev) {
2192                                 *error = "different pool cannot replace a pool";
2193                                 return ERR_PTR(-EINVAL);
2194                         }
2195                         __pool_inc(pool);
2196
2197                 } else {
2198                         pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
2199                         *created = 1;
2200                 }
2201         }
2202
2203         return pool;
2204 }
2205
2206 /*----------------------------------------------------------------
2207  * Pool target methods
2208  *--------------------------------------------------------------*/
2209 static void pool_dtr(struct dm_target *ti)
2210 {
2211         struct pool_c *pt = ti->private;
2212
2213         mutex_lock(&dm_thin_pool_table.mutex);
2214
2215         unbind_control_target(pt->pool, ti);
2216         __pool_dec(pt->pool);
2217         dm_put_device(ti, pt->metadata_dev);
2218         dm_put_device(ti, pt->data_dev);
2219         kfree(pt);
2220
2221         mutex_unlock(&dm_thin_pool_table.mutex);
2222 }
2223
2224 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2225                                struct dm_target *ti)
2226 {
2227         int r;
2228         unsigned argc;
2229         const char *arg_name;
2230
2231         static struct dm_arg _args[] = {
2232                 {0, 4, "Invalid number of pool feature arguments"},
2233         };
2234
2235         /*
2236          * No feature arguments supplied.
2237          */
2238         if (!as->argc)
2239                 return 0;
2240
2241         r = dm_read_arg_group(_args, as, &argc, &ti->error);
2242         if (r)
2243                 return -EINVAL;
2244
2245         while (argc && !r) {
2246                 arg_name = dm_shift_arg(as);
2247                 argc--;
2248
2249                 if (!strcasecmp(arg_name, "skip_block_zeroing"))
2250                         pf->zero_new_blocks = false;
2251
2252                 else if (!strcasecmp(arg_name, "ignore_discard"))
2253                         pf->discard_enabled = false;
2254
2255                 else if (!strcasecmp(arg_name, "no_discard_passdown"))
2256                         pf->discard_passdown = false;
2257
2258                 else if (!strcasecmp(arg_name, "read_only"))
2259                         pf->mode = PM_READ_ONLY;
2260
2261                 else if (!strcasecmp(arg_name, "error_if_no_space"))
2262                         pf->error_if_no_space = true;
2263
2264                 else {
2265                         ti->error = "Unrecognised pool feature requested";
2266                         r = -EINVAL;
2267                         break;
2268                 }
2269         }
2270
2271         return r;
2272 }
2273
2274 static void metadata_low_callback(void *context)
2275 {
2276         struct pool *pool = context;
2277
2278         DMWARN("%s: reached low water mark for metadata device: sending event.",
2279                dm_device_name(pool->pool_md));
2280
2281         dm_table_event(pool->ti->table);
2282 }
2283
2284 static sector_t get_dev_size(struct block_device *bdev)
2285 {
2286         return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2287 }
2288
2289 static void warn_if_metadata_device_too_big(struct block_device *bdev)
2290 {
2291         sector_t metadata_dev_size = get_dev_size(bdev);
2292         char buffer[BDEVNAME_SIZE];
2293
2294         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
2295                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2296                        bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
2297 }
2298
2299 static sector_t get_metadata_dev_size(struct block_device *bdev)
2300 {
2301         sector_t metadata_dev_size = get_dev_size(bdev);
2302
2303         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2304                 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
2305
2306         return metadata_dev_size;
2307 }
2308
2309 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2310 {
2311         sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2312
2313         sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
2314
2315         return metadata_dev_size;
2316 }
2317
2318 /*
2319  * When a metadata threshold is crossed a dm event is triggered, and
2320  * userland should respond by growing the metadata device.  We could let
2321  * userland set the threshold, like we do with the data threshold, but I'm
2322  * not sure they know enough to do this well.
2323  */
2324 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2325 {
2326         /*
2327          * 4M is ample for all ops with the possible exception of thin
2328          * device deletion which is harmless if it fails (just retry the
2329          * delete after you've grown the device).
2330          */
2331         dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2332         return min((dm_block_t)1024ULL /* 4M */, quarter);
2333 }
2334
2335 /*
2336  * thin-pool <metadata dev> <data dev>
2337  *           <data block size (sectors)>
2338  *           <low water mark (blocks)>
2339  *           [<#feature args> [<arg>]*]
2340  *
2341  * Optional feature arguments are:
2342  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
2343  *           ignore_discard: disable discard
2344  *           no_discard_passdown: don't pass discards down to the data device
2345  *           read_only: Don't allow any changes to be made to the pool metadata.
2346  *           error_if_no_space: error IOs, instead of queueing, if no space.
2347  */
2348 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2349 {
2350         int r, pool_created = 0;
2351         struct pool_c *pt;
2352         struct pool *pool;
2353         struct pool_features pf;
2354         struct dm_arg_set as;
2355         struct dm_dev *data_dev;
2356         unsigned long block_size;
2357         dm_block_t low_water_blocks;
2358         struct dm_dev *metadata_dev;
2359         fmode_t metadata_mode;
2360
2361         /*
2362          * FIXME Remove validation from scope of lock.
2363          */
2364         mutex_lock(&dm_thin_pool_table.mutex);
2365
2366         if (argc < 4) {
2367                 ti->error = "Invalid argument count";
2368                 r = -EINVAL;
2369                 goto out_unlock;
2370         }
2371
2372         as.argc = argc;
2373         as.argv = argv;
2374
2375         /*
2376          * Set default pool features.
2377          */
2378         pool_features_init(&pf);
2379
2380         dm_consume_args(&as, 4);
2381         r = parse_pool_features(&as, &pf, ti);
2382         if (r)
2383                 goto out_unlock;
2384
2385         metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2386         r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2387         if (r) {
2388                 ti->error = "Error opening metadata block device";
2389                 goto out_unlock;
2390         }
2391         warn_if_metadata_device_too_big(metadata_dev->bdev);
2392
2393         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2394         if (r) {
2395                 ti->error = "Error getting data device";
2396                 goto out_metadata;
2397         }
2398
2399         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2400             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2401             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2402             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2403                 ti->error = "Invalid block size";
2404                 r = -EINVAL;
2405                 goto out;
2406         }
2407
2408         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2409                 ti->error = "Invalid low water mark";
2410                 r = -EINVAL;
2411                 goto out;
2412         }
2413
2414         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2415         if (!pt) {
2416                 r = -ENOMEM;
2417                 goto out;
2418         }
2419
2420         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2421                            block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2422         if (IS_ERR(pool)) {
2423                 r = PTR_ERR(pool);
2424                 goto out_free_pt;
2425         }
2426
2427         /*
2428          * 'pool_created' reflects whether this is the first table load.
2429          * Top level discard support is not allowed to be changed after
2430          * initial load.  This would require a pool reload to trigger thin
2431          * device changes.
2432          */
2433         if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2434                 ti->error = "Discard support cannot be disabled once enabled";
2435                 r = -EINVAL;
2436                 goto out_flags_changed;
2437         }
2438
2439         pt->pool = pool;
2440         pt->ti = ti;
2441         pt->metadata_dev = metadata_dev;
2442         pt->data_dev = data_dev;
2443         pt->low_water_blocks = low_water_blocks;
2444         pt->adjusted_pf = pt->requested_pf = pf;
2445         ti->num_flush_bios = 1;
2446
2447         /*
2448          * Only need to enable discards if the pool should pass
2449          * them down to the data device.  The thin device's discard
2450          * processing will cause mappings to be removed from the btree.
2451          */
2452         ti->discard_zeroes_data_unsupported = true;
2453         if (pf.discard_enabled && pf.discard_passdown) {
2454                 ti->num_discard_bios = 1;
2455
2456                 /*
2457                  * Setting 'discards_supported' circumvents the normal
2458                  * stacking of discard limits (this keeps the pool and
2459                  * thin devices' discard limits consistent).
2460                  */
2461                 ti->discards_supported = true;
2462         }
2463         ti->private = pt;
2464
2465         r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2466                                                 calc_metadata_threshold(pt),
2467                                                 metadata_low_callback,
2468                                                 pool);
2469         if (r)
2470                 goto out_free_pt;
2471
2472         pt->callbacks.congested_fn = pool_is_congested;
2473         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2474
2475         mutex_unlock(&dm_thin_pool_table.mutex);
2476
2477         return 0;
2478
2479 out_flags_changed:
2480         __pool_dec(pool);
2481 out_free_pt:
2482         kfree(pt);
2483 out:
2484         dm_put_device(ti, data_dev);
2485 out_metadata:
2486         dm_put_device(ti, metadata_dev);
2487 out_unlock:
2488         mutex_unlock(&dm_thin_pool_table.mutex);
2489
2490         return r;
2491 }
2492
2493 static int pool_map(struct dm_target *ti, struct bio *bio)
2494 {
2495         int r;
2496         struct pool_c *pt = ti->private;
2497         struct pool *pool = pt->pool;
2498         unsigned long flags;
2499
2500         /*
2501          * As this is a singleton target, ti->begin is always zero.
2502          */
2503         spin_lock_irqsave(&pool->lock, flags);
2504         bio->bi_bdev = pt->data_dev->bdev;
2505         r = DM_MAPIO_REMAPPED;
2506         spin_unlock_irqrestore(&pool->lock, flags);
2507
2508         return r;
2509 }
2510
2511 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2512 {
2513         int r;
2514         struct pool_c *pt = ti->private;
2515         struct pool *pool = pt->pool;
2516         sector_t data_size = ti->len;
2517         dm_block_t sb_data_size;
2518
2519         *need_commit = false;
2520
2521         (void) sector_div(data_size, pool->sectors_per_block);
2522
2523         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2524         if (r) {
2525                 DMERR("%s: failed to retrieve data device size",
2526                       dm_device_name(pool->pool_md));
2527                 return r;
2528         }
2529
2530         if (data_size < sb_data_size) {
2531                 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2532                       dm_device_name(pool->pool_md),
2533                       (unsigned long long)data_size, sb_data_size);
2534                 return -EINVAL;
2535
2536         } else if (data_size > sb_data_size) {
2537                 if (dm_pool_metadata_needs_check(pool->pmd)) {
2538                         DMERR("%s: unable to grow the data device until repaired.",
2539                               dm_device_name(pool->pool_md));
2540                         return 0;
2541                 }
2542
2543                 if (sb_data_size)
2544                         DMINFO("%s: growing the data device from %llu to %llu blocks",
2545                                dm_device_name(pool->pool_md),
2546                                sb_data_size, (unsigned long long)data_size);
2547                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2548                 if (r) {
2549                         metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
2550                         return r;
2551                 }
2552
2553                 *need_commit = true;
2554         }
2555
2556         return 0;
2557 }
2558
2559 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2560 {
2561         int r;
2562         struct pool_c *pt = ti->private;
2563         struct pool *pool = pt->pool;
2564         dm_block_t metadata_dev_size, sb_metadata_dev_size;
2565
2566         *need_commit = false;
2567
2568         metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
2569
2570         r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2571         if (r) {
2572                 DMERR("%s: failed to retrieve metadata device size",
2573                       dm_device_name(pool->pool_md));
2574                 return r;
2575         }
2576
2577         if (metadata_dev_size < sb_metadata_dev_size) {
2578                 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2579                       dm_device_name(pool->pool_md),
2580                       metadata_dev_size, sb_metadata_dev_size);
2581                 return -EINVAL;
2582
2583         } else if (metadata_dev_size > sb_metadata_dev_size) {
2584                 if (dm_pool_metadata_needs_check(pool->pmd)) {
2585                         DMERR("%s: unable to grow the metadata device until repaired.",
2586                               dm_device_name(pool->pool_md));
2587                         return 0;
2588                 }
2589
2590                 warn_if_metadata_device_too_big(pool->md_dev);
2591                 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2592                        dm_device_name(pool->pool_md),
2593                        sb_metadata_dev_size, metadata_dev_size);
2594                 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2595                 if (r) {
2596                         metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
2597                         return r;
2598                 }
2599
2600                 *need_commit = true;
2601         }
2602
2603         return 0;
2604 }
2605
2606 /*
2607  * Retrieves the number of blocks of the data device from
2608  * the superblock and compares it to the actual device size,
2609  * thus resizing the data device in case it has grown.
2610  *
2611  * This both copes with opening preallocated data devices in the ctr
2612  * being followed by a resume
2613  * -and-
2614  * calling the resume method individually after userspace has
2615  * grown the data device in reaction to a table event.
2616  */
2617 static int pool_preresume(struct dm_target *ti)
2618 {
2619         int r;
2620         bool need_commit1, need_commit2;
2621         struct pool_c *pt = ti->private;
2622         struct pool *pool = pt->pool;
2623
2624         /*
2625          * Take control of the pool object.
2626          */
2627         r = bind_control_target(pool, ti);
2628         if (r)
2629                 return r;
2630
2631         r = maybe_resize_data_dev(ti, &need_commit1);
2632         if (r)
2633                 return r;
2634
2635         r = maybe_resize_metadata_dev(ti, &need_commit2);
2636         if (r)
2637                 return r;
2638
2639         if (need_commit1 || need_commit2)
2640                 (void) commit(pool);
2641
2642         return 0;
2643 }
2644
2645 static void pool_resume(struct dm_target *ti)
2646 {
2647         struct pool_c *pt = ti->private;
2648         struct pool *pool = pt->pool;
2649         unsigned long flags;
2650
2651         spin_lock_irqsave(&pool->lock, flags);
2652         pool->low_water_triggered = false;
2653         spin_unlock_irqrestore(&pool->lock, flags);
2654         requeue_bios(pool);
2655
2656         do_waker(&pool->waker.work);
2657 }
2658
2659 static void pool_postsuspend(struct dm_target *ti)
2660 {
2661         struct pool_c *pt = ti->private;
2662         struct pool *pool = pt->pool;
2663
2664         cancel_delayed_work(&pool->waker);
2665         flush_workqueue(pool->wq);
2666         (void) commit(pool);
2667 }
2668
2669 static int check_arg_count(unsigned argc, unsigned args_required)
2670 {
2671         if (argc != args_required) {
2672                 DMWARN("Message received with %u arguments instead of %u.",
2673                        argc, args_required);
2674                 return -EINVAL;
2675         }
2676
2677         return 0;
2678 }
2679
2680 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2681 {
2682         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2683             *dev_id <= MAX_DEV_ID)
2684                 return 0;
2685
2686         if (warning)
2687                 DMWARN("Message received with invalid device id: %s", arg);
2688
2689         return -EINVAL;
2690 }
2691
2692 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2693 {
2694         dm_thin_id dev_id;
2695         int r;
2696
2697         r = check_arg_count(argc, 2);
2698         if (r)
2699                 return r;
2700
2701         r = read_dev_id(argv[1], &dev_id, 1);
2702         if (r)
2703                 return r;
2704
2705         r = dm_pool_create_thin(pool->pmd, dev_id);
2706         if (r) {
2707                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2708                        argv[1]);
2709                 return r;
2710         }
2711
2712         return 0;
2713 }
2714
2715 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2716 {
2717         dm_thin_id dev_id;
2718         dm_thin_id origin_dev_id;
2719         int r;
2720
2721         r = check_arg_count(argc, 3);
2722         if (r)
2723                 return r;
2724
2725         r = read_dev_id(argv[1], &dev_id, 1);
2726         if (r)
2727                 return r;
2728
2729         r = read_dev_id(argv[2], &origin_dev_id, 1);
2730         if (r)
2731                 return r;
2732
2733         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2734         if (r) {
2735                 DMWARN("Creation of new snapshot %s of device %s failed.",
2736                        argv[1], argv[2]);
2737                 return r;
2738         }
2739
2740         return 0;
2741 }
2742
2743 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2744 {
2745         dm_thin_id dev_id;
2746         int r;
2747
2748         r = check_arg_count(argc, 2);
2749         if (r)
2750                 return r;
2751
2752         r = read_dev_id(argv[1], &dev_id, 1);
2753         if (r)
2754                 return r;
2755
2756         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2757         if (r)
2758                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2759
2760         return r;
2761 }
2762
2763 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2764 {
2765         dm_thin_id old_id, new_id;
2766         int r;
2767
2768         r = check_arg_count(argc, 3);
2769         if (r)
2770                 return r;
2771
2772         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2773                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2774                 return -EINVAL;
2775         }
2776
2777         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2778                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2779                 return -EINVAL;
2780         }
2781
2782         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2783         if (r) {
2784                 DMWARN("Failed to change transaction id from %s to %s.",
2785                        argv[1], argv[2]);
2786                 return r;
2787         }
2788
2789         return 0;
2790 }
2791
2792 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2793 {
2794         int r;
2795
2796         r = check_arg_count(argc, 1);
2797         if (r)
2798                 return r;
2799
2800         (void) commit(pool);
2801
2802         r = dm_pool_reserve_metadata_snap(pool->pmd);
2803         if (r)
2804                 DMWARN("reserve_metadata_snap message failed.");
2805
2806         return r;
2807 }
2808
2809 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2810 {
2811         int r;
2812
2813         r = check_arg_count(argc, 1);
2814         if (r)
2815                 return r;
2816
2817         r = dm_pool_release_metadata_snap(pool->pmd);
2818         if (r)
2819                 DMWARN("release_metadata_snap message failed.");
2820
2821         return r;
2822 }
2823
2824 /*
2825  * Messages supported:
2826  *   create_thin        <dev_id>
2827  *   create_snap        <dev_id> <origin_id>
2828  *   delete             <dev_id>
2829  *   trim               <dev_id> <new_size_in_sectors>
2830  *   set_transaction_id <current_trans_id> <new_trans_id>
2831  *   reserve_metadata_snap
2832  *   release_metadata_snap
2833  */
2834 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2835 {
2836         int r = -EINVAL;
2837         struct pool_c *pt = ti->private;
2838         struct pool *pool = pt->pool;
2839
2840         if (!strcasecmp(argv[0], "create_thin"))
2841                 r = process_create_thin_mesg(argc, argv, pool);
2842
2843         else if (!strcasecmp(argv[0], "create_snap"))
2844                 r = process_create_snap_mesg(argc, argv, pool);
2845
2846         else if (!strcasecmp(argv[0], "delete"))
2847                 r = process_delete_mesg(argc, argv, pool);
2848
2849         else if (!strcasecmp(argv[0], "set_transaction_id"))
2850                 r = process_set_transaction_id_mesg(argc, argv, pool);
2851
2852         else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2853                 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2854
2855         else if (!strcasecmp(argv[0], "release_metadata_snap"))
2856                 r = process_release_metadata_snap_mesg(argc, argv, pool);
2857
2858         else
2859                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2860
2861         if (!r)
2862                 (void) commit(pool);
2863
2864         return r;
2865 }
2866
2867 static void emit_flags(struct pool_features *pf, char *result,
2868                        unsigned sz, unsigned maxlen)
2869 {
2870         unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2871                 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
2872                 pf->error_if_no_space;
2873         DMEMIT("%u ", count);
2874
2875         if (!pf->zero_new_blocks)
2876                 DMEMIT("skip_block_zeroing ");
2877
2878         if (!pf->discard_enabled)
2879                 DMEMIT("ignore_discard ");
2880
2881         if (!pf->discard_passdown)
2882                 DMEMIT("no_discard_passdown ");
2883
2884         if (pf->mode == PM_READ_ONLY)
2885                 DMEMIT("read_only ");
2886
2887         if (pf->error_if_no_space)
2888                 DMEMIT("error_if_no_space ");
2889 }
2890
2891 /*
2892  * Status line is:
2893  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2894  *    <used data sectors>/<total data sectors> <held metadata root>
2895  */
2896 static void pool_status(struct dm_target *ti, status_type_t type,
2897                         unsigned status_flags, char *result, unsigned maxlen)
2898 {
2899         int r;
2900         unsigned sz = 0;
2901         uint64_t transaction_id;
2902         dm_block_t nr_free_blocks_data;
2903         dm_block_t nr_free_blocks_metadata;
2904         dm_block_t nr_blocks_data;
2905         dm_block_t nr_blocks_metadata;
2906         dm_block_t held_root;
2907         char buf[BDEVNAME_SIZE];
2908         char buf2[BDEVNAME_SIZE];
2909         struct pool_c *pt = ti->private;
2910         struct pool *pool = pt->pool;
2911
2912         switch (type) {
2913         case STATUSTYPE_INFO:
2914                 if (get_pool_mode(pool) == PM_FAIL) {
2915                         DMEMIT("Fail");
2916                         break;
2917                 }
2918
2919                 /* Commit to ensure statistics aren't out-of-date */
2920                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2921                         (void) commit(pool);
2922
2923                 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2924                 if (r) {
2925                         DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2926                               dm_device_name(pool->pool_md), r);
2927                         goto err;
2928                 }
2929
2930                 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2931                 if (r) {
2932                         DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2933                               dm_device_name(pool->pool_md), r);
2934                         goto err;
2935                 }
2936
2937                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2938                 if (r) {
2939                         DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2940                               dm_device_name(pool->pool_md), r);
2941                         goto err;
2942                 }
2943
2944                 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2945                 if (r) {
2946                         DMERR("%s: dm_pool_get_free_block_count returned %d",
2947                               dm_device_name(pool->pool_md), r);
2948                         goto err;
2949                 }
2950
2951                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2952                 if (r) {
2953                         DMERR("%s: dm_pool_get_data_dev_size returned %d",
2954                               dm_device_name(pool->pool_md), r);
2955                         goto err;
2956                 }
2957
2958                 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2959                 if (r) {
2960                         DMERR("%s: dm_pool_get_metadata_snap returned %d",
2961                               dm_device_name(pool->pool_md), r);
2962                         goto err;
2963                 }
2964
2965                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2966                        (unsigned long long)transaction_id,
2967                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2968                        (unsigned long long)nr_blocks_metadata,
2969                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2970                        (unsigned long long)nr_blocks_data);
2971
2972                 if (held_root)
2973                         DMEMIT("%llu ", held_root);
2974                 else
2975                         DMEMIT("- ");
2976
2977                 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
2978                         DMEMIT("out_of_data_space ");
2979                 else if (pool->pf.mode == PM_READ_ONLY)
2980                         DMEMIT("ro ");
2981                 else
2982                         DMEMIT("rw ");
2983
2984                 if (!pool->pf.discard_enabled)
2985                         DMEMIT("ignore_discard ");
2986                 else if (pool->pf.discard_passdown)
2987                         DMEMIT("discard_passdown ");
2988                 else
2989                         DMEMIT("no_discard_passdown ");
2990
2991                 if (pool->pf.error_if_no_space)
2992                         DMEMIT("error_if_no_space ");
2993                 else
2994                         DMEMIT("queue_if_no_space ");
2995
2996                 break;
2997
2998         case STATUSTYPE_TABLE:
2999                 DMEMIT("%s %s %lu %llu ",
3000                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3001                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3002                        (unsigned long)pool->sectors_per_block,
3003                        (unsigned long long)pt->low_water_blocks);
3004                 emit_flags(&pt->requested_pf, result, sz, maxlen);
3005                 break;
3006         }
3007         return;
3008
3009 err:
3010         DMEMIT("Error");
3011 }
3012
3013 static int pool_iterate_devices(struct dm_target *ti,
3014                                 iterate_devices_callout_fn fn, void *data)
3015 {
3016         struct pool_c *pt = ti->private;
3017
3018         return fn(ti, pt->data_dev, 0, ti->len, data);
3019 }
3020
3021 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3022                       struct bio_vec *biovec, int max_size)
3023 {
3024         struct pool_c *pt = ti->private;
3025         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3026
3027         if (!q->merge_bvec_fn)
3028                 return max_size;
3029
3030         bvm->bi_bdev = pt->data_dev->bdev;
3031
3032         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3033 }
3034
3035 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
3036 {
3037         struct pool *pool = pt->pool;
3038         struct queue_limits *data_limits;
3039
3040         limits->max_discard_sectors = pool->sectors_per_block;
3041
3042         /*
3043          * discard_granularity is just a hint, and not enforced.
3044          */
3045         if (pt->adjusted_pf.discard_passdown) {
3046                 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
3047                 limits->discard_granularity = data_limits->discard_granularity;
3048         } else
3049                 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
3050 }
3051
3052 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3053 {
3054         struct pool_c *pt = ti->private;
3055         struct pool *pool = pt->pool;
3056         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3057
3058         /*
3059          * If the system-determined stacked limits are compatible with the
3060          * pool's blocksize (io_opt is a factor) do not override them.
3061          */
3062         if (io_opt_sectors < pool->sectors_per_block ||
3063             do_div(io_opt_sectors, pool->sectors_per_block)) {
3064                 blk_limits_io_min(limits, 0);
3065                 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3066         }
3067
3068         /*
3069          * pt->adjusted_pf is a staging area for the actual features to use.
3070          * They get transferred to the live pool in bind_control_target()
3071          * called from pool_preresume().
3072          */
3073         if (!pt->adjusted_pf.discard_enabled) {
3074                 /*
3075                  * Must explicitly disallow stacking discard limits otherwise the
3076                  * block layer will stack them if pool's data device has support.
3077                  * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3078                  * user to see that, so make sure to set all discard limits to 0.
3079                  */
3080                 limits->discard_granularity = 0;
3081                 return;
3082         }
3083
3084         disable_passdown_if_not_supported(pt);
3085
3086         set_discard_limits(pt, limits);
3087 }
3088
3089 static struct target_type pool_target = {
3090         .name = "thin-pool",
3091         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3092                     DM_TARGET_IMMUTABLE,
3093         .version = {1, 12, 0},
3094         .module = THIS_MODULE,
3095         .ctr = pool_ctr,
3096         .dtr = pool_dtr,
3097         .map = pool_map,
3098         .postsuspend = pool_postsuspend,
3099         .preresume = pool_preresume,
3100         .resume = pool_resume,
3101         .message = pool_message,
3102         .status = pool_status,
3103         .merge = pool_merge,
3104         .iterate_devices = pool_iterate_devices,
3105         .io_hints = pool_io_hints,
3106 };
3107
3108 /*----------------------------------------------------------------
3109  * Thin target methods
3110  *--------------------------------------------------------------*/
3111 static void thin_get(struct thin_c *tc)
3112 {
3113         atomic_inc(&tc->refcount);
3114 }
3115
3116 static void thin_put(struct thin_c *tc)
3117 {
3118         if (atomic_dec_and_test(&tc->refcount))
3119                 complete(&tc->can_destroy);
3120 }
3121
3122 static void thin_dtr(struct dm_target *ti)
3123 {
3124         struct thin_c *tc = ti->private;
3125         unsigned long flags;
3126
3127         thin_put(tc);
3128         wait_for_completion(&tc->can_destroy);
3129
3130         spin_lock_irqsave(&tc->pool->lock, flags);
3131         list_del_rcu(&tc->list);
3132         spin_unlock_irqrestore(&tc->pool->lock, flags);
3133         synchronize_rcu();
3134
3135         mutex_lock(&dm_thin_pool_table.mutex);
3136
3137         __pool_dec(tc->pool);
3138         dm_pool_close_thin_device(tc->td);
3139         dm_put_device(ti, tc->pool_dev);
3140         if (tc->origin_dev)
3141                 dm_put_device(ti, tc->origin_dev);
3142         kfree(tc);
3143
3144         mutex_unlock(&dm_thin_pool_table.mutex);
3145 }
3146
3147 /*
3148  * Thin target parameters:
3149  *
3150  * <pool_dev> <dev_id> [origin_dev]
3151  *
3152  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3153  * dev_id: the internal device identifier
3154  * origin_dev: a device external to the pool that should act as the origin
3155  *
3156  * If the pool device has discards disabled, they get disabled for the thin
3157  * device as well.
3158  */
3159 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3160 {
3161         int r;
3162         struct thin_c *tc;
3163         struct dm_dev *pool_dev, *origin_dev;
3164         struct mapped_device *pool_md;
3165         unsigned long flags;
3166
3167         mutex_lock(&dm_thin_pool_table.mutex);
3168
3169         if (argc != 2 && argc != 3) {
3170                 ti->error = "Invalid argument count";
3171                 r = -EINVAL;
3172                 goto out_unlock;
3173         }
3174
3175         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3176         if (!tc) {
3177                 ti->error = "Out of memory";
3178                 r = -ENOMEM;
3179                 goto out_unlock;
3180         }
3181         spin_lock_init(&tc->lock);
3182         bio_list_init(&tc->deferred_bio_list);
3183         bio_list_init(&tc->retry_on_resume_list);
3184         tc->sort_bio_list = RB_ROOT;
3185
3186         if (argc == 3) {
3187                 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3188                 if (r) {
3189                         ti->error = "Error opening origin device";
3190                         goto bad_origin_dev;
3191                 }
3192                 tc->origin_dev = origin_dev;
3193         }
3194
3195         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3196         if (r) {
3197                 ti->error = "Error opening pool device";
3198                 goto bad_pool_dev;
3199         }
3200         tc->pool_dev = pool_dev;
3201
3202         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3203                 ti->error = "Invalid device id";
3204                 r = -EINVAL;
3205                 goto bad_common;
3206         }
3207
3208         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3209         if (!pool_md) {
3210                 ti->error = "Couldn't get pool mapped device";
3211                 r = -EINVAL;
3212                 goto bad_common;
3213         }
3214
3215         tc->pool = __pool_table_lookup(pool_md);
3216         if (!tc->pool) {
3217                 ti->error = "Couldn't find pool object";
3218                 r = -EINVAL;
3219                 goto bad_pool_lookup;
3220         }
3221         __pool_inc(tc->pool);
3222
3223         if (get_pool_mode(tc->pool) == PM_FAIL) {
3224                 ti->error = "Couldn't open thin device, Pool is in fail mode";
3225                 r = -EINVAL;
3226                 goto bad_thin_open;
3227         }
3228
3229         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3230         if (r) {
3231                 ti->error = "Couldn't open thin internal device";
3232                 goto bad_thin_open;
3233         }
3234
3235         r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3236         if (r)
3237                 goto bad_target_max_io_len;
3238
3239         ti->num_flush_bios = 1;
3240         ti->flush_supported = true;
3241         ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
3242
3243         /* In case the pool supports discards, pass them on. */
3244         ti->discard_zeroes_data_unsupported = true;
3245         if (tc->pool->pf.discard_enabled) {
3246                 ti->discards_supported = true;
3247                 ti->num_discard_bios = 1;
3248                 /* Discard bios must be split on a block boundary */
3249                 ti->split_discard_bios = true;
3250         }
3251
3252         dm_put(pool_md);
3253
3254         mutex_unlock(&dm_thin_pool_table.mutex);
3255
3256         atomic_set(&tc->refcount, 1);
3257         init_completion(&tc->can_destroy);
3258
3259         spin_lock_irqsave(&tc->pool->lock, flags);
3260         list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3261         spin_unlock_irqrestore(&tc->pool->lock, flags);
3262         /*
3263          * This synchronize_rcu() call is needed here otherwise we risk a
3264          * wake_worker() call finding no bios to process (because the newly
3265          * added tc isn't yet visible).  So this reduces latency since we
3266          * aren't then dependent on the periodic commit to wake_worker().
3267          */
3268         synchronize_rcu();
3269
3270         return 0;
3271
3272 bad_target_max_io_len:
3273         dm_pool_close_thin_device(tc->td);
3274 bad_thin_open:
3275         __pool_dec(tc->pool);
3276 bad_pool_lookup:
3277         dm_put(pool_md);
3278 bad_common:
3279         dm_put_device(ti, tc->pool_dev);
3280 bad_pool_dev:
3281         if (tc->origin_dev)
3282                 dm_put_device(ti, tc->origin_dev);
3283 bad_origin_dev:
3284         kfree(tc);
3285 out_unlock:
3286         mutex_unlock(&dm_thin_pool_table.mutex);
3287
3288         return r;
3289 }
3290
3291 static int thin_map(struct dm_target *ti, struct bio *bio)
3292 {
3293         bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
3294
3295         return thin_bio_map(ti, bio);
3296 }
3297
3298 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
3299 {
3300         unsigned long flags;
3301         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
3302         struct list_head work;
3303         struct dm_thin_new_mapping *m, *tmp;
3304         struct pool *pool = h->tc->pool;
3305
3306         if (h->shared_read_entry) {
3307                 INIT_LIST_HEAD(&work);
3308                 dm_deferred_entry_dec(h->shared_read_entry, &work);
3309
3310                 spin_lock_irqsave(&pool->lock, flags);
3311                 list_for_each_entry_safe(m, tmp, &work, list) {
3312                         list_del(&m->list);
3313                         m->quiesced = true;
3314                         __maybe_add_mapping(m);
3315                 }
3316                 spin_unlock_irqrestore(&pool->lock, flags);
3317         }
3318
3319         if (h->all_io_entry) {
3320                 INIT_LIST_HEAD(&work);
3321                 dm_deferred_entry_dec(h->all_io_entry, &work);
3322                 if (!list_empty(&work)) {
3323                         spin_lock_irqsave(&pool->lock, flags);
3324                         list_for_each_entry_safe(m, tmp, &work, list)
3325                                 list_add_tail(&m->list, &pool->prepared_discards);
3326                         spin_unlock_irqrestore(&pool->lock, flags);
3327                         wake_worker(pool);
3328                 }
3329         }
3330
3331         return 0;
3332 }
3333
3334 static void thin_presuspend(struct dm_target *ti)
3335 {
3336         struct thin_c *tc = ti->private;
3337
3338         if (dm_noflush_suspending(ti))
3339                 noflush_work(tc, do_noflush_start);
3340 }
3341
3342 static void thin_postsuspend(struct dm_target *ti)
3343 {
3344         struct thin_c *tc = ti->private;
3345
3346         /*
3347          * The dm_noflush_suspending flag has been cleared by now, so
3348          * unfortunately we must always run this.
3349          */
3350         noflush_work(tc, do_noflush_stop);
3351 }
3352
3353 /*
3354  * <nr mapped sectors> <highest mapped sector>
3355  */
3356 static void thin_status(struct dm_target *ti, status_type_t type,
3357                         unsigned status_flags, char *result, unsigned maxlen)
3358 {
3359         int r;
3360         ssize_t sz = 0;
3361         dm_block_t mapped, highest;
3362         char buf[BDEVNAME_SIZE];
3363         struct thin_c *tc = ti->private;
3364
3365         if (get_pool_mode(tc->pool) == PM_FAIL) {
3366                 DMEMIT("Fail");
3367                 return;
3368         }
3369
3370         if (!tc->td)
3371                 DMEMIT("-");
3372         else {
3373                 switch (type) {
3374                 case STATUSTYPE_INFO:
3375                         r = dm_thin_get_mapped_count(tc->td, &mapped);
3376                         if (r) {
3377                                 DMERR("dm_thin_get_mapped_count returned %d", r);
3378                                 goto err;
3379                         }
3380
3381                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
3382                         if (r < 0) {
3383                                 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3384                                 goto err;
3385                         }
3386
3387                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3388                         if (r)
3389                                 DMEMIT("%llu", ((highest + 1) *
3390                                                 tc->pool->sectors_per_block) - 1);
3391                         else
3392                                 DMEMIT("-");
3393                         break;
3394
3395                 case STATUSTYPE_TABLE:
3396                         DMEMIT("%s %lu",
3397                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3398                                (unsigned long) tc->dev_id);
3399                         if (tc->origin_dev)
3400                                 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
3401                         break;
3402                 }
3403         }
3404
3405         return;
3406
3407 err:
3408         DMEMIT("Error");
3409 }
3410
3411 static int thin_iterate_devices(struct dm_target *ti,
3412                                 iterate_devices_callout_fn fn, void *data)
3413 {
3414         sector_t blocks;
3415         struct thin_c *tc = ti->private;
3416         struct pool *pool = tc->pool;
3417
3418         /*
3419          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
3420          * we follow a more convoluted path through to the pool's target.
3421          */
3422         if (!pool->ti)
3423                 return 0;       /* nothing is bound */
3424
3425         blocks = pool->ti->len;
3426         (void) sector_div(blocks, pool->sectors_per_block);
3427         if (blocks)
3428                 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
3429
3430         return 0;
3431 }
3432
3433 static struct target_type thin_target = {
3434         .name = "thin",
3435         .version = {1, 12, 0},
3436         .module = THIS_MODULE,
3437         .ctr = thin_ctr,
3438         .dtr = thin_dtr,
3439         .map = thin_map,
3440         .end_io = thin_endio,
3441         .presuspend = thin_presuspend,
3442         .postsuspend = thin_postsuspend,
3443         .status = thin_status,
3444         .iterate_devices = thin_iterate_devices,
3445 };
3446
3447 /*----------------------------------------------------------------*/
3448
3449 static int __init dm_thin_init(void)
3450 {
3451         int r;
3452
3453         pool_table_init();
3454
3455         r = dm_register_target(&thin_target);
3456         if (r)
3457                 return r;
3458
3459         r = dm_register_target(&pool_target);
3460         if (r)
3461                 goto bad_pool_target;
3462
3463         r = -ENOMEM;
3464
3465         _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3466         if (!_new_mapping_cache)
3467                 goto bad_new_mapping_cache;
3468
3469         return 0;
3470
3471 bad_new_mapping_cache:
3472         dm_unregister_target(&pool_target);
3473 bad_pool_target:
3474         dm_unregister_target(&thin_target);
3475
3476         return r;
3477 }
3478
3479 static void dm_thin_exit(void)
3480 {
3481         dm_unregister_target(&thin_target);
3482         dm_unregister_target(&pool_target);
3483
3484         kmem_cache_destroy(_new_mapping_cache);
3485 }
3486
3487 module_init(dm_thin_init);
3488 module_exit(dm_thin_exit);
3489
3490 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3491 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3492 MODULE_LICENSE("GPL");