dn_getsockoptdecnet: move nf_{get/set}sockopt outside sock lock
[pandora-kernel.git] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8
9 #include <linux/device-mapper.h>
10 #include <linux/dm-io.h>
11 #include <linux/dm-kcopyd.h>
12 #include <linux/list.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17
18 #define DM_MSG_PREFIX   "thin"
19
20 /*
21  * Tunable constants
22  */
23 #define ENDIO_HOOK_POOL_SIZE 1024
24 #define DEFERRED_SET_SIZE 64
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27
28 /*
29  * The block size of the device holding pool data must be
30  * between 64KB and 1GB.
31  */
32 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
33 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
34
35 /*
36  * The metadata device is currently limited in size.  The limitation is
37  * checked lower down in dm-space-map-metadata, but we also check it here
38  * so we can fail early.
39  *
40  * We have one block of index, which can hold 255 index entries.  Each
41  * index entry contains allocation info about 16k metadata blocks.
42  */
43 #define METADATA_DEV_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
44
45 /*
46  * Device id is restricted to 24 bits.
47  */
48 #define MAX_DEV_ID ((1 << 24) - 1)
49
50 /*
51  * How do we handle breaking sharing of data blocks?
52  * =================================================
53  *
54  * We use a standard copy-on-write btree to store the mappings for the
55  * devices (note I'm talking about copy-on-write of the metadata here, not
56  * the data).  When you take an internal snapshot you clone the root node
57  * of the origin btree.  After this there is no concept of an origin or a
58  * snapshot.  They are just two device trees that happen to point to the
59  * same data blocks.
60  *
61  * When we get a write in we decide if it's to a shared data block using
62  * some timestamp magic.  If it is, we have to break sharing.
63  *
64  * Let's say we write to a shared block in what was the origin.  The
65  * steps are:
66  *
67  * i) plug io further to this physical block. (see bio_prison code).
68  *
69  * ii) quiesce any read io to that shared data block.  Obviously
70  * including all devices that share this block.  (see deferred_set code)
71  *
72  * iii) copy the data block to a newly allocate block.  This step can be
73  * missed out if the io covers the block. (schedule_copy).
74  *
75  * iv) insert the new mapping into the origin's btree
76  * (process_prepared_mappings).  This act of inserting breaks some
77  * sharing of btree nodes between the two devices.  Breaking sharing only
78  * effects the btree of that specific device.  Btrees for the other
79  * devices that share the block never change.  The btree for the origin
80  * device as it was after the last commit is untouched, ie. we're using
81  * persistent data structures in the functional programming sense.
82  *
83  * v) unplug io to this physical block, including the io that triggered
84  * the breaking of sharing.
85  *
86  * Steps (ii) and (iii) occur in parallel.
87  *
88  * The metadata _doesn't_ need to be committed before the io continues.  We
89  * get away with this because the io is always written to a _new_ block.
90  * If there's a crash, then:
91  *
92  * - The origin mapping will point to the old origin block (the shared
93  * one).  This will contain the data as it was before the io that triggered
94  * the breaking of sharing came in.
95  *
96  * - The snap mapping still points to the old block.  As it would after
97  * the commit.
98  *
99  * The downside of this scheme is the timestamp magic isn't perfect, and
100  * will continue to think that data block in the snapshot device is shared
101  * even after the write to the origin has broken sharing.  I suspect data
102  * blocks will typically be shared by many different devices, so we're
103  * breaking sharing n + 1 times, rather than n, where n is the number of
104  * devices that reference this data block.  At the moment I think the
105  * benefits far, far outweigh the disadvantages.
106  */
107
108 /*----------------------------------------------------------------*/
109
110 /*
111  * Sometimes we can't deal with a bio straight away.  We put them in prison
112  * where they can't cause any mischief.  Bios are put in a cell identified
113  * by a key, multiple bios can be in the same cell.  When the cell is
114  * subsequently unlocked the bios become available.
115  */
116 struct bio_prison;
117
118 struct cell_key {
119         int virtual;
120         dm_thin_id dev;
121         dm_block_t block;
122 };
123
124 struct cell {
125         struct hlist_node list;
126         struct bio_prison *prison;
127         struct cell_key key;
128         struct bio *holder;
129         struct bio_list bios;
130 };
131
132 struct bio_prison {
133         spinlock_t lock;
134         mempool_t *cell_pool;
135
136         unsigned nr_buckets;
137         unsigned hash_mask;
138         struct hlist_head *cells;
139 };
140
141 static uint32_t calc_nr_buckets(unsigned nr_cells)
142 {
143         uint32_t n = 128;
144
145         nr_cells /= 4;
146         nr_cells = min(nr_cells, 8192u);
147
148         while (n < nr_cells)
149                 n <<= 1;
150
151         return n;
152 }
153
154 /*
155  * @nr_cells should be the number of cells you want in use _concurrently_.
156  * Don't confuse it with the number of distinct keys.
157  */
158 static struct bio_prison *prison_create(unsigned nr_cells)
159 {
160         unsigned i;
161         uint32_t nr_buckets = calc_nr_buckets(nr_cells);
162         struct bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
163
164         if (!prison)
165                 return NULL;
166
167         spin_lock_init(&prison->lock);
168         prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
169                                                         sizeof(struct cell));
170         if (!prison->cell_pool) {
171                 kfree(prison);
172                 return NULL;
173         }
174
175         prison->cells = vmalloc(sizeof(*prison->cells) * nr_buckets);
176         if (!prison->cells) {
177                 mempool_destroy(prison->cell_pool);
178                 kfree(prison);
179                 return NULL;
180         }
181
182         prison->nr_buckets = nr_buckets;
183         prison->hash_mask = nr_buckets - 1;
184         for (i = 0; i < nr_buckets; i++)
185                 INIT_HLIST_HEAD(prison->cells + i);
186
187         return prison;
188 }
189
190 static void prison_destroy(struct bio_prison *prison)
191 {
192         vfree(prison->cells);
193         mempool_destroy(prison->cell_pool);
194         kfree(prison);
195 }
196
197 static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
198 {
199         const unsigned long BIG_PRIME = 4294967291UL;
200         uint64_t hash = key->block * BIG_PRIME;
201
202         return (uint32_t) (hash & prison->hash_mask);
203 }
204
205 static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
206 {
207                return (lhs->virtual == rhs->virtual) &&
208                        (lhs->dev == rhs->dev) &&
209                        (lhs->block == rhs->block);
210 }
211
212 static struct cell *__search_bucket(struct hlist_head *bucket,
213                                     struct cell_key *key)
214 {
215         struct cell *cell;
216         struct hlist_node *tmp;
217
218         hlist_for_each_entry(cell, tmp, bucket, list)
219                 if (keys_equal(&cell->key, key))
220                         return cell;
221
222         return NULL;
223 }
224
225 /*
226  * This may block if a new cell needs allocating.  You must ensure that
227  * cells will be unlocked even if the calling thread is blocked.
228  *
229  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
230  */
231 static int bio_detain(struct bio_prison *prison, struct cell_key *key,
232                       struct bio *inmate, struct cell **ref)
233 {
234         int r = 1;
235         unsigned long flags;
236         uint32_t hash = hash_key(prison, key);
237         struct cell *cell, *cell2;
238
239         BUG_ON(hash > prison->nr_buckets);
240
241         spin_lock_irqsave(&prison->lock, flags);
242
243         cell = __search_bucket(prison->cells + hash, key);
244         if (cell) {
245                 bio_list_add(&cell->bios, inmate);
246                 goto out;
247         }
248
249         /*
250          * Allocate a new cell
251          */
252         spin_unlock_irqrestore(&prison->lock, flags);
253         cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
254         spin_lock_irqsave(&prison->lock, flags);
255
256         /*
257          * We've been unlocked, so we have to double check that
258          * nobody else has inserted this cell in the meantime.
259          */
260         cell = __search_bucket(prison->cells + hash, key);
261         if (cell) {
262                 mempool_free(cell2, prison->cell_pool);
263                 bio_list_add(&cell->bios, inmate);
264                 goto out;
265         }
266
267         /*
268          * Use new cell.
269          */
270         cell = cell2;
271
272         cell->prison = prison;
273         memcpy(&cell->key, key, sizeof(cell->key));
274         cell->holder = inmate;
275         bio_list_init(&cell->bios);
276         hlist_add_head(&cell->list, prison->cells + hash);
277
278         r = 0;
279
280 out:
281         spin_unlock_irqrestore(&prison->lock, flags);
282
283         *ref = cell;
284
285         return r;
286 }
287
288 /*
289  * @inmates must have been initialised prior to this call
290  */
291 static void __cell_release(struct cell *cell, struct bio_list *inmates)
292 {
293         struct bio_prison *prison = cell->prison;
294
295         hlist_del(&cell->list);
296
297         if (inmates) {
298                 bio_list_add(inmates, cell->holder);
299                 bio_list_merge(inmates, &cell->bios);
300         }
301
302         mempool_free(cell, prison->cell_pool);
303 }
304
305 static void cell_release(struct cell *cell, struct bio_list *bios)
306 {
307         unsigned long flags;
308         struct bio_prison *prison = cell->prison;
309
310         spin_lock_irqsave(&prison->lock, flags);
311         __cell_release(cell, bios);
312         spin_unlock_irqrestore(&prison->lock, flags);
313 }
314
315 /*
316  * There are a couple of places where we put a bio into a cell briefly
317  * before taking it out again.  In these situations we know that no other
318  * bio may be in the cell.  This function releases the cell, and also does
319  * a sanity check.
320  */
321 static void __cell_release_singleton(struct cell *cell, struct bio *bio)
322 {
323         BUG_ON(cell->holder != bio);
324         BUG_ON(!bio_list_empty(&cell->bios));
325
326         __cell_release(cell, NULL);
327 }
328
329 static void cell_release_singleton(struct cell *cell, struct bio *bio)
330 {
331         unsigned long flags;
332         struct bio_prison *prison = cell->prison;
333
334         spin_lock_irqsave(&prison->lock, flags);
335         __cell_release_singleton(cell, bio);
336         spin_unlock_irqrestore(&prison->lock, flags);
337 }
338
339 /*
340  * Sometimes we don't want the holder, just the additional bios.
341  */
342 static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
343 {
344         struct bio_prison *prison = cell->prison;
345
346         hlist_del(&cell->list);
347         bio_list_merge(inmates, &cell->bios);
348
349         mempool_free(cell, prison->cell_pool);
350 }
351
352 static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
353 {
354         unsigned long flags;
355         struct bio_prison *prison = cell->prison;
356
357         spin_lock_irqsave(&prison->lock, flags);
358         __cell_release_no_holder(cell, inmates);
359         spin_unlock_irqrestore(&prison->lock, flags);
360 }
361
362 static void cell_error(struct cell *cell)
363 {
364         struct bio_prison *prison = cell->prison;
365         struct bio_list bios;
366         struct bio *bio;
367         unsigned long flags;
368
369         bio_list_init(&bios);
370
371         spin_lock_irqsave(&prison->lock, flags);
372         __cell_release(cell, &bios);
373         spin_unlock_irqrestore(&prison->lock, flags);
374
375         while ((bio = bio_list_pop(&bios)))
376                 bio_io_error(bio);
377 }
378
379 /*----------------------------------------------------------------*/
380
381 /*
382  * We use the deferred set to keep track of pending reads to shared blocks.
383  * We do this to ensure the new mapping caused by a write isn't performed
384  * until these prior reads have completed.  Otherwise the insertion of the
385  * new mapping could free the old block that the read bios are mapped to.
386  */
387
388 struct deferred_set;
389 struct deferred_entry {
390         struct deferred_set *ds;
391         unsigned count;
392         struct list_head work_items;
393 };
394
395 struct deferred_set {
396         spinlock_t lock;
397         unsigned current_entry;
398         unsigned sweeper;
399         struct deferred_entry entries[DEFERRED_SET_SIZE];
400 };
401
402 static void ds_init(struct deferred_set *ds)
403 {
404         int i;
405
406         spin_lock_init(&ds->lock);
407         ds->current_entry = 0;
408         ds->sweeper = 0;
409         for (i = 0; i < DEFERRED_SET_SIZE; i++) {
410                 ds->entries[i].ds = ds;
411                 ds->entries[i].count = 0;
412                 INIT_LIST_HEAD(&ds->entries[i].work_items);
413         }
414 }
415
416 static struct deferred_entry *ds_inc(struct deferred_set *ds)
417 {
418         unsigned long flags;
419         struct deferred_entry *entry;
420
421         spin_lock_irqsave(&ds->lock, flags);
422         entry = ds->entries + ds->current_entry;
423         entry->count++;
424         spin_unlock_irqrestore(&ds->lock, flags);
425
426         return entry;
427 }
428
429 static unsigned ds_next(unsigned index)
430 {
431         return (index + 1) % DEFERRED_SET_SIZE;
432 }
433
434 static void __sweep(struct deferred_set *ds, struct list_head *head)
435 {
436         while ((ds->sweeper != ds->current_entry) &&
437                !ds->entries[ds->sweeper].count) {
438                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
439                 ds->sweeper = ds_next(ds->sweeper);
440         }
441
442         if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
443                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
444 }
445
446 static void ds_dec(struct deferred_entry *entry, struct list_head *head)
447 {
448         unsigned long flags;
449
450         spin_lock_irqsave(&entry->ds->lock, flags);
451         BUG_ON(!entry->count);
452         --entry->count;
453         __sweep(entry->ds, head);
454         spin_unlock_irqrestore(&entry->ds->lock, flags);
455 }
456
457 /*
458  * Returns 1 if deferred or 0 if no pending items to delay job.
459  */
460 static int ds_add_work(struct deferred_set *ds, struct list_head *work)
461 {
462         int r = 1;
463         unsigned long flags;
464         unsigned next_entry;
465
466         spin_lock_irqsave(&ds->lock, flags);
467         if ((ds->sweeper == ds->current_entry) &&
468             !ds->entries[ds->current_entry].count)
469                 r = 0;
470         else {
471                 list_add(work, &ds->entries[ds->current_entry].work_items);
472                 next_entry = ds_next(ds->current_entry);
473                 if (!ds->entries[next_entry].count)
474                         ds->current_entry = next_entry;
475         }
476         spin_unlock_irqrestore(&ds->lock, flags);
477
478         return r;
479 }
480
481 /*----------------------------------------------------------------*/
482
483 /*
484  * Key building.
485  */
486 static void build_data_key(struct dm_thin_device *td,
487                            dm_block_t b, struct cell_key *key)
488 {
489         key->virtual = 0;
490         key->dev = dm_thin_dev_id(td);
491         key->block = b;
492 }
493
494 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
495                               struct cell_key *key)
496 {
497         key->virtual = 1;
498         key->dev = dm_thin_dev_id(td);
499         key->block = b;
500 }
501
502 /*----------------------------------------------------------------*/
503
504 /*
505  * A pool device ties together a metadata device and a data device.  It
506  * also provides the interface for creating and destroying internal
507  * devices.
508  */
509 struct new_mapping;
510 struct pool {
511         struct list_head list;
512         struct dm_target *ti;   /* Only set if a pool target is bound */
513
514         struct mapped_device *pool_md;
515         struct block_device *md_dev;
516         struct dm_pool_metadata *pmd;
517
518         uint32_t sectors_per_block;
519         unsigned block_shift;
520         dm_block_t offset_mask;
521         dm_block_t low_water_blocks;
522
523         unsigned zero_new_blocks:1;
524         unsigned low_water_triggered:1; /* A dm event has been sent */
525         unsigned no_free_space:1;       /* A -ENOSPC warning has been issued */
526
527         struct bio_prison *prison;
528         struct dm_kcopyd_client *copier;
529
530         struct workqueue_struct *wq;
531         struct work_struct worker;
532
533         unsigned ref_count;
534
535         spinlock_t lock;
536         struct bio_list deferred_bios;
537         struct bio_list deferred_flush_bios;
538         struct list_head prepared_mappings;
539
540         struct bio_list retry_on_resume_list;
541
542         struct deferred_set ds; /* FIXME: move to thin_c */
543
544         struct new_mapping *next_mapping;
545         mempool_t *mapping_pool;
546         mempool_t *endio_hook_pool;
547 };
548
549 /*
550  * Target context for a pool.
551  */
552 struct pool_c {
553         struct dm_target *ti;
554         struct pool *pool;
555         struct dm_dev *data_dev;
556         struct dm_dev *metadata_dev;
557         struct dm_target_callbacks callbacks;
558
559         dm_block_t low_water_blocks;
560         unsigned zero_new_blocks:1;
561 };
562
563 /*
564  * Target context for a thin.
565  */
566 struct thin_c {
567         struct dm_dev *pool_dev;
568         dm_thin_id dev_id;
569
570         struct pool *pool;
571         struct dm_thin_device *td;
572 };
573
574 /*----------------------------------------------------------------*/
575
576 /*
577  * A global list of pools that uses a struct mapped_device as a key.
578  */
579 static struct dm_thin_pool_table {
580         struct mutex mutex;
581         struct list_head pools;
582 } dm_thin_pool_table;
583
584 static void pool_table_init(void)
585 {
586         mutex_init(&dm_thin_pool_table.mutex);
587         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
588 }
589
590 static void __pool_table_insert(struct pool *pool)
591 {
592         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
593         list_add(&pool->list, &dm_thin_pool_table.pools);
594 }
595
596 static void __pool_table_remove(struct pool *pool)
597 {
598         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
599         list_del(&pool->list);
600 }
601
602 static struct pool *__pool_table_lookup(struct mapped_device *md)
603 {
604         struct pool *pool = NULL, *tmp;
605
606         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
607
608         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
609                 if (tmp->pool_md == md) {
610                         pool = tmp;
611                         break;
612                 }
613         }
614
615         return pool;
616 }
617
618 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
619 {
620         struct pool *pool = NULL, *tmp;
621
622         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
623
624         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
625                 if (tmp->md_dev == md_dev) {
626                         pool = tmp;
627                         break;
628                 }
629         }
630
631         return pool;
632 }
633
634 /*----------------------------------------------------------------*/
635
636 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
637 {
638         struct bio *bio;
639         struct bio_list bios;
640
641         bio_list_init(&bios);
642         bio_list_merge(&bios, master);
643         bio_list_init(master);
644
645         while ((bio = bio_list_pop(&bios))) {
646                 if (dm_get_mapinfo(bio)->ptr == tc)
647                         bio_endio(bio, DM_ENDIO_REQUEUE);
648                 else
649                         bio_list_add(master, bio);
650         }
651 }
652
653 static void requeue_io(struct thin_c *tc)
654 {
655         struct pool *pool = tc->pool;
656         unsigned long flags;
657
658         spin_lock_irqsave(&pool->lock, flags);
659         __requeue_bio_list(tc, &pool->deferred_bios);
660         __requeue_bio_list(tc, &pool->retry_on_resume_list);
661         spin_unlock_irqrestore(&pool->lock, flags);
662 }
663
664 /*
665  * This section of code contains the logic for processing a thin device's IO.
666  * Much of the code depends on pool object resources (lists, workqueues, etc)
667  * but most is exclusively called from the thin target rather than the thin-pool
668  * target.
669  */
670
671 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
672 {
673         return bio->bi_sector >> tc->pool->block_shift;
674 }
675
676 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
677 {
678         struct pool *pool = tc->pool;
679
680         bio->bi_bdev = tc->pool_dev->bdev;
681         bio->bi_sector = (block << pool->block_shift) +
682                 (bio->bi_sector & pool->offset_mask);
683 }
684
685 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
686                             dm_block_t block)
687 {
688         struct pool *pool = tc->pool;
689         unsigned long flags;
690
691         remap(tc, bio, block);
692
693         /*
694          * Batch together any FUA/FLUSH bios we find and then issue
695          * a single commit for them in process_deferred_bios().
696          */
697         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
698                 spin_lock_irqsave(&pool->lock, flags);
699                 bio_list_add(&pool->deferred_flush_bios, bio);
700                 spin_unlock_irqrestore(&pool->lock, flags);
701         } else
702                 generic_make_request(bio);
703 }
704
705 /*
706  * wake_worker() is used when new work is queued and when pool_resume is
707  * ready to continue deferred IO processing.
708  */
709 static void wake_worker(struct pool *pool)
710 {
711         queue_work(pool->wq, &pool->worker);
712 }
713
714 /*----------------------------------------------------------------*/
715
716 /*
717  * Bio endio functions.
718  */
719 struct endio_hook {
720         struct thin_c *tc;
721         bio_end_io_t *saved_bi_end_io;
722         struct deferred_entry *entry;
723 };
724
725 struct new_mapping {
726         struct list_head list;
727
728         int prepared;
729
730         struct thin_c *tc;
731         dm_block_t virt_block;
732         dm_block_t data_block;
733         struct cell *cell;
734         int err;
735
736         /*
737          * If the bio covers the whole area of a block then we can avoid
738          * zeroing or copying.  Instead this bio is hooked.  The bio will
739          * still be in the cell, so care has to be taken to avoid issuing
740          * the bio twice.
741          */
742         struct bio *bio;
743         bio_end_io_t *saved_bi_end_io;
744 };
745
746 static void __maybe_add_mapping(struct new_mapping *m)
747 {
748         struct pool *pool = m->tc->pool;
749
750         if (list_empty(&m->list) && m->prepared) {
751                 list_add(&m->list, &pool->prepared_mappings);
752                 wake_worker(pool);
753         }
754 }
755
756 static void copy_complete(int read_err, unsigned long write_err, void *context)
757 {
758         unsigned long flags;
759         struct new_mapping *m = context;
760         struct pool *pool = m->tc->pool;
761
762         m->err = read_err || write_err ? -EIO : 0;
763
764         spin_lock_irqsave(&pool->lock, flags);
765         m->prepared = 1;
766         __maybe_add_mapping(m);
767         spin_unlock_irqrestore(&pool->lock, flags);
768 }
769
770 static void overwrite_endio(struct bio *bio, int err)
771 {
772         unsigned long flags;
773         struct new_mapping *m = dm_get_mapinfo(bio)->ptr;
774         struct pool *pool = m->tc->pool;
775
776         m->err = err;
777
778         spin_lock_irqsave(&pool->lock, flags);
779         m->prepared = 1;
780         __maybe_add_mapping(m);
781         spin_unlock_irqrestore(&pool->lock, flags);
782 }
783
784 static void shared_read_endio(struct bio *bio, int err)
785 {
786         struct list_head mappings;
787         struct new_mapping *m, *tmp;
788         struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
789         unsigned long flags;
790         struct pool *pool = h->tc->pool;
791
792         bio->bi_end_io = h->saved_bi_end_io;
793         bio_endio(bio, err);
794
795         INIT_LIST_HEAD(&mappings);
796         ds_dec(h->entry, &mappings);
797
798         spin_lock_irqsave(&pool->lock, flags);
799         list_for_each_entry_safe(m, tmp, &mappings, list) {
800                 list_del(&m->list);
801                 INIT_LIST_HEAD(&m->list);
802                 __maybe_add_mapping(m);
803         }
804         spin_unlock_irqrestore(&pool->lock, flags);
805
806         mempool_free(h, pool->endio_hook_pool);
807 }
808
809 /*----------------------------------------------------------------*/
810
811 /*
812  * Workqueue.
813  */
814
815 /*
816  * Prepared mapping jobs.
817  */
818
819 /*
820  * This sends the bios in the cell back to the deferred_bios list.
821  */
822 static void cell_defer(struct thin_c *tc, struct cell *cell,
823                        dm_block_t data_block)
824 {
825         struct pool *pool = tc->pool;
826         unsigned long flags;
827
828         spin_lock_irqsave(&pool->lock, flags);
829         cell_release(cell, &pool->deferred_bios);
830         spin_unlock_irqrestore(&tc->pool->lock, flags);
831
832         wake_worker(pool);
833 }
834
835 /*
836  * Same as cell_defer above, except it omits one particular detainee,
837  * a write bio that covers the block and has already been processed.
838  */
839 static void cell_defer_except(struct thin_c *tc, struct cell *cell)
840 {
841         struct bio_list bios;
842         struct pool *pool = tc->pool;
843         unsigned long flags;
844
845         bio_list_init(&bios);
846
847         spin_lock_irqsave(&pool->lock, flags);
848         cell_release_no_holder(cell, &pool->deferred_bios);
849         spin_unlock_irqrestore(&pool->lock, flags);
850
851         wake_worker(pool);
852 }
853
854 static void process_prepared_mapping(struct new_mapping *m)
855 {
856         struct thin_c *tc = m->tc;
857         struct bio *bio;
858         int r;
859
860         bio = m->bio;
861         if (bio)
862                 bio->bi_end_io = m->saved_bi_end_io;
863
864         if (m->err) {
865                 cell_error(m->cell);
866                 goto out;
867         }
868
869         /*
870          * Commit the prepared block into the mapping btree.
871          * Any I/O for this block arriving after this point will get
872          * remapped to it directly.
873          */
874         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
875         if (r) {
876                 DMERR("dm_thin_insert_block() failed");
877                 cell_error(m->cell);
878                 goto out;
879         }
880
881         /*
882          * Release any bios held while the block was being provisioned.
883          * If we are processing a write bio that completely covers the block,
884          * we already processed it so can ignore it now when processing
885          * the bios in the cell.
886          */
887         if (bio) {
888                 cell_defer_except(tc, m->cell);
889                 bio_endio(bio, 0);
890         } else
891                 cell_defer(tc, m->cell, m->data_block);
892
893 out:
894         list_del(&m->list);
895         mempool_free(m, tc->pool->mapping_pool);
896 }
897
898 static void process_prepared_mappings(struct pool *pool)
899 {
900         unsigned long flags;
901         struct list_head maps;
902         struct new_mapping *m, *tmp;
903
904         INIT_LIST_HEAD(&maps);
905         spin_lock_irqsave(&pool->lock, flags);
906         list_splice_init(&pool->prepared_mappings, &maps);
907         spin_unlock_irqrestore(&pool->lock, flags);
908
909         list_for_each_entry_safe(m, tmp, &maps, list)
910                 process_prepared_mapping(m);
911 }
912
913 /*
914  * Deferred bio jobs.
915  */
916 static int io_overwrites_block(struct pool *pool, struct bio *bio)
917 {
918         return ((bio_data_dir(bio) == WRITE) &&
919                 !(bio->bi_sector & pool->offset_mask)) &&
920                 (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
921 }
922
923 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
924                                bio_end_io_t *fn)
925 {
926         *save = bio->bi_end_io;
927         bio->bi_end_io = fn;
928 }
929
930 static int ensure_next_mapping(struct pool *pool)
931 {
932         if (pool->next_mapping)
933                 return 0;
934
935         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
936
937         return pool->next_mapping ? 0 : -ENOMEM;
938 }
939
940 static struct new_mapping *get_next_mapping(struct pool *pool)
941 {
942         struct new_mapping *r = pool->next_mapping;
943
944         BUG_ON(!pool->next_mapping);
945
946         pool->next_mapping = NULL;
947
948         return r;
949 }
950
951 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
952                           dm_block_t data_origin, dm_block_t data_dest,
953                           struct cell *cell, struct bio *bio)
954 {
955         int r;
956         struct pool *pool = tc->pool;
957         struct new_mapping *m = get_next_mapping(pool);
958
959         INIT_LIST_HEAD(&m->list);
960         m->prepared = 0;
961         m->tc = tc;
962         m->virt_block = virt_block;
963         m->data_block = data_dest;
964         m->cell = cell;
965         m->err = 0;
966         m->bio = NULL;
967
968         ds_add_work(&pool->ds, &m->list);
969
970         /*
971          * IO to pool_dev remaps to the pool target's data_dev.
972          *
973          * If the whole block of data is being overwritten, we can issue the
974          * bio immediately. Otherwise we use kcopyd to clone the data first.
975          */
976         if (io_overwrites_block(pool, bio)) {
977                 m->bio = bio;
978                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
979                 dm_get_mapinfo(bio)->ptr = m;
980                 remap_and_issue(tc, bio, data_dest);
981         } else {
982                 struct dm_io_region from, to;
983
984                 from.bdev = tc->pool_dev->bdev;
985                 from.sector = data_origin * pool->sectors_per_block;
986                 from.count = pool->sectors_per_block;
987
988                 to.bdev = tc->pool_dev->bdev;
989                 to.sector = data_dest * pool->sectors_per_block;
990                 to.count = pool->sectors_per_block;
991
992                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
993                                    0, copy_complete, m);
994                 if (r < 0) {
995                         mempool_free(m, pool->mapping_pool);
996                         DMERR("dm_kcopyd_copy() failed");
997                         cell_error(cell);
998                 }
999         }
1000 }
1001
1002 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1003                           dm_block_t data_block, struct cell *cell,
1004                           struct bio *bio)
1005 {
1006         struct pool *pool = tc->pool;
1007         struct new_mapping *m = get_next_mapping(pool);
1008
1009         INIT_LIST_HEAD(&m->list);
1010         m->prepared = 0;
1011         m->tc = tc;
1012         m->virt_block = virt_block;
1013         m->data_block = data_block;
1014         m->cell = cell;
1015         m->err = 0;
1016         m->bio = NULL;
1017
1018         /*
1019          * If the whole block of data is being overwritten or we are not
1020          * zeroing pre-existing data, we can issue the bio immediately.
1021          * Otherwise we use kcopyd to zero the data first.
1022          */
1023         if (!pool->zero_new_blocks)
1024                 process_prepared_mapping(m);
1025
1026         else if (io_overwrites_block(pool, bio)) {
1027                 m->bio = bio;
1028                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1029                 dm_get_mapinfo(bio)->ptr = m;
1030                 remap_and_issue(tc, bio, data_block);
1031
1032         } else {
1033                 int r;
1034                 struct dm_io_region to;
1035
1036                 to.bdev = tc->pool_dev->bdev;
1037                 to.sector = data_block * pool->sectors_per_block;
1038                 to.count = pool->sectors_per_block;
1039
1040                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
1041                 if (r < 0) {
1042                         mempool_free(m, pool->mapping_pool);
1043                         DMERR("dm_kcopyd_zero() failed");
1044                         cell_error(cell);
1045                 }
1046         }
1047 }
1048
1049 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1050 {
1051         int r;
1052         dm_block_t free_blocks;
1053         unsigned long flags;
1054         struct pool *pool = tc->pool;
1055
1056         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1057         if (r)
1058                 return r;
1059
1060         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1061                 DMWARN("%s: reached low water mark, sending event.",
1062                        dm_device_name(pool->pool_md));
1063                 spin_lock_irqsave(&pool->lock, flags);
1064                 pool->low_water_triggered = 1;
1065                 spin_unlock_irqrestore(&pool->lock, flags);
1066                 dm_table_event(pool->ti->table);
1067         }
1068
1069         if (!free_blocks) {
1070                 if (pool->no_free_space)
1071                         return -ENOSPC;
1072                 else {
1073                         /*
1074                          * Try to commit to see if that will free up some
1075                          * more space.
1076                          */
1077                         r = dm_pool_commit_metadata(pool->pmd);
1078                         if (r) {
1079                                 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1080                                       __func__, r);
1081                                 return r;
1082                         }
1083
1084                         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1085                         if (r)
1086                                 return r;
1087
1088                         /*
1089                          * If we still have no space we set a flag to avoid
1090                          * doing all this checking and return -ENOSPC.
1091                          */
1092                         if (!free_blocks) {
1093                                 DMWARN("%s: no free space available.",
1094                                        dm_device_name(pool->pool_md));
1095                                 spin_lock_irqsave(&pool->lock, flags);
1096                                 pool->no_free_space = 1;
1097                                 spin_unlock_irqrestore(&pool->lock, flags);
1098                                 return -ENOSPC;
1099                         }
1100                 }
1101         }
1102
1103         r = dm_pool_alloc_data_block(pool->pmd, result);
1104         if (r)
1105                 return r;
1106
1107         return 0;
1108 }
1109
1110 /*
1111  * If we have run out of space, queue bios until the device is
1112  * resumed, presumably after having been reloaded with more space.
1113  */
1114 static void retry_on_resume(struct bio *bio)
1115 {
1116         struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
1117         struct pool *pool = tc->pool;
1118         unsigned long flags;
1119
1120         spin_lock_irqsave(&pool->lock, flags);
1121         bio_list_add(&pool->retry_on_resume_list, bio);
1122         spin_unlock_irqrestore(&pool->lock, flags);
1123 }
1124
1125 static void no_space(struct cell *cell)
1126 {
1127         struct bio *bio;
1128         struct bio_list bios;
1129
1130         bio_list_init(&bios);
1131         cell_release(cell, &bios);
1132
1133         while ((bio = bio_list_pop(&bios)))
1134                 retry_on_resume(bio);
1135 }
1136
1137 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1138                           struct cell_key *key,
1139                           struct dm_thin_lookup_result *lookup_result,
1140                           struct cell *cell)
1141 {
1142         int r;
1143         dm_block_t data_block;
1144
1145         r = alloc_data_block(tc, &data_block);
1146         switch (r) {
1147         case 0:
1148                 schedule_copy(tc, block, lookup_result->block,
1149                               data_block, cell, bio);
1150                 break;
1151
1152         case -ENOSPC:
1153                 no_space(cell);
1154                 break;
1155
1156         default:
1157                 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1158                 cell_error(cell);
1159                 break;
1160         }
1161 }
1162
1163 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1164                                dm_block_t block,
1165                                struct dm_thin_lookup_result *lookup_result)
1166 {
1167         struct cell *cell;
1168         struct pool *pool = tc->pool;
1169         struct cell_key key;
1170
1171         /*
1172          * If cell is already occupied, then sharing is already in the process
1173          * of being broken so we have nothing further to do here.
1174          */
1175         build_data_key(tc->td, lookup_result->block, &key);
1176         if (bio_detain(pool->prison, &key, bio, &cell))
1177                 return;
1178
1179         if (bio_data_dir(bio) == WRITE)
1180                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1181         else {
1182                 struct endio_hook *h;
1183                 h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1184
1185                 h->tc = tc;
1186                 h->entry = ds_inc(&pool->ds);
1187                 save_and_set_endio(bio, &h->saved_bi_end_io, shared_read_endio);
1188                 dm_get_mapinfo(bio)->ptr = h;
1189
1190                 cell_release_singleton(cell, bio);
1191                 remap_and_issue(tc, bio, lookup_result->block);
1192         }
1193 }
1194
1195 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1196                             struct cell *cell)
1197 {
1198         int r;
1199         dm_block_t data_block;
1200
1201         /*
1202          * Remap empty bios (flushes) immediately, without provisioning.
1203          */
1204         if (!bio->bi_size) {
1205                 cell_release_singleton(cell, bio);
1206                 remap_and_issue(tc, bio, 0);
1207                 return;
1208         }
1209
1210         /*
1211          * Fill read bios with zeroes and complete them immediately.
1212          */
1213         if (bio_data_dir(bio) == READ) {
1214                 zero_fill_bio(bio);
1215                 cell_release_singleton(cell, bio);
1216                 bio_endio(bio, 0);
1217                 return;
1218         }
1219
1220         r = alloc_data_block(tc, &data_block);
1221         switch (r) {
1222         case 0:
1223                 schedule_zero(tc, block, data_block, cell, bio);
1224                 break;
1225
1226         case -ENOSPC:
1227                 no_space(cell);
1228                 break;
1229
1230         default:
1231                 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1232                 cell_error(cell);
1233                 break;
1234         }
1235 }
1236
1237 static void process_bio(struct thin_c *tc, struct bio *bio)
1238 {
1239         int r;
1240         dm_block_t block = get_bio_block(tc, bio);
1241         struct cell *cell;
1242         struct cell_key key;
1243         struct dm_thin_lookup_result lookup_result;
1244
1245         /*
1246          * If cell is already occupied, then the block is already
1247          * being provisioned so we have nothing further to do here.
1248          */
1249         build_virtual_key(tc->td, block, &key);
1250         if (bio_detain(tc->pool->prison, &key, bio, &cell))
1251                 return;
1252
1253         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1254         switch (r) {
1255         case 0:
1256                 /*
1257                  * We can release this cell now.  This thread is the only
1258                  * one that puts bios into a cell, and we know there were
1259                  * no preceding bios.
1260                  */
1261                 /*
1262                  * TODO: this will probably have to change when discard goes
1263                  * back in.
1264                  */
1265                 cell_release_singleton(cell, bio);
1266
1267                 if (lookup_result.shared)
1268                         process_shared_bio(tc, bio, block, &lookup_result);
1269                 else
1270                         remap_and_issue(tc, bio, lookup_result.block);
1271                 break;
1272
1273         case -ENODATA:
1274                 provision_block(tc, bio, block, cell);
1275                 break;
1276
1277         default:
1278                 DMERR("dm_thin_find_block() failed, error = %d", r);
1279                 bio_io_error(bio);
1280                 break;
1281         }
1282 }
1283
1284 static void process_deferred_bios(struct pool *pool)
1285 {
1286         unsigned long flags;
1287         struct bio *bio;
1288         struct bio_list bios;
1289         int r;
1290
1291         bio_list_init(&bios);
1292
1293         spin_lock_irqsave(&pool->lock, flags);
1294         bio_list_merge(&bios, &pool->deferred_bios);
1295         bio_list_init(&pool->deferred_bios);
1296         spin_unlock_irqrestore(&pool->lock, flags);
1297
1298         while ((bio = bio_list_pop(&bios))) {
1299                 struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
1300                 /*
1301                  * If we've got no free new_mapping structs, and processing
1302                  * this bio might require one, we pause until there are some
1303                  * prepared mappings to process.
1304                  */
1305                 if (ensure_next_mapping(pool)) {
1306                         spin_lock_irqsave(&pool->lock, flags);
1307                         bio_list_add(&pool->deferred_bios, bio);
1308                         bio_list_merge(&pool->deferred_bios, &bios);
1309                         spin_unlock_irqrestore(&pool->lock, flags);
1310                         break;
1311                 }
1312                 process_bio(tc, bio);
1313         }
1314
1315         /*
1316          * If there are any deferred flush bios, we must commit
1317          * the metadata before issuing them.
1318          */
1319         bio_list_init(&bios);
1320         spin_lock_irqsave(&pool->lock, flags);
1321         bio_list_merge(&bios, &pool->deferred_flush_bios);
1322         bio_list_init(&pool->deferred_flush_bios);
1323         spin_unlock_irqrestore(&pool->lock, flags);
1324
1325         if (bio_list_empty(&bios))
1326                 return;
1327
1328         r = dm_pool_commit_metadata(pool->pmd);
1329         if (r) {
1330                 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1331                       __func__, r);
1332                 while ((bio = bio_list_pop(&bios)))
1333                         bio_io_error(bio);
1334                 return;
1335         }
1336
1337         while ((bio = bio_list_pop(&bios)))
1338                 generic_make_request(bio);
1339 }
1340
1341 static void do_worker(struct work_struct *ws)
1342 {
1343         struct pool *pool = container_of(ws, struct pool, worker);
1344
1345         process_prepared_mappings(pool);
1346         process_deferred_bios(pool);
1347 }
1348
1349 /*----------------------------------------------------------------*/
1350
1351 /*
1352  * Mapping functions.
1353  */
1354
1355 /*
1356  * Called only while mapping a thin bio to hand it over to the workqueue.
1357  */
1358 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1359 {
1360         unsigned long flags;
1361         struct pool *pool = tc->pool;
1362
1363         spin_lock_irqsave(&pool->lock, flags);
1364         bio_list_add(&pool->deferred_bios, bio);
1365         spin_unlock_irqrestore(&pool->lock, flags);
1366
1367         wake_worker(pool);
1368 }
1369
1370 /*
1371  * Non-blocking function called from the thin target's map function.
1372  */
1373 static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1374                         union map_info *map_context)
1375 {
1376         int r;
1377         struct thin_c *tc = ti->private;
1378         dm_block_t block = get_bio_block(tc, bio);
1379         struct dm_thin_device *td = tc->td;
1380         struct dm_thin_lookup_result result;
1381
1382         /*
1383          * Save the thin context for easy access from the deferred bio later.
1384          */
1385         map_context->ptr = tc;
1386
1387         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1388                 thin_defer_bio(tc, bio);
1389                 return DM_MAPIO_SUBMITTED;
1390         }
1391
1392         r = dm_thin_find_block(td, block, 0, &result);
1393
1394         /*
1395          * Note that we defer readahead too.
1396          */
1397         switch (r) {
1398         case 0:
1399                 if (unlikely(result.shared)) {
1400                         /*
1401                          * We have a race condition here between the
1402                          * result.shared value returned by the lookup and
1403                          * snapshot creation, which may cause new
1404                          * sharing.
1405                          *
1406                          * To avoid this always quiesce the origin before
1407                          * taking the snap.  You want to do this anyway to
1408                          * ensure a consistent application view
1409                          * (i.e. lockfs).
1410                          *
1411                          * More distant ancestors are irrelevant. The
1412                          * shared flag will be set in their case.
1413                          */
1414                         thin_defer_bio(tc, bio);
1415                         r = DM_MAPIO_SUBMITTED;
1416                 } else {
1417                         remap(tc, bio, result.block);
1418                         r = DM_MAPIO_REMAPPED;
1419                 }
1420                 break;
1421
1422         case -ENODATA:
1423                 /*
1424                  * In future, the failed dm_thin_find_block above could
1425                  * provide the hint to load the metadata into cache.
1426                  */
1427         case -EWOULDBLOCK:
1428                 thin_defer_bio(tc, bio);
1429                 r = DM_MAPIO_SUBMITTED;
1430                 break;
1431         }
1432
1433         return r;
1434 }
1435
1436 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1437 {
1438         int r;
1439         unsigned long flags;
1440         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1441
1442         spin_lock_irqsave(&pt->pool->lock, flags);
1443         r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1444         spin_unlock_irqrestore(&pt->pool->lock, flags);
1445
1446         if (!r) {
1447                 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1448                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1449         }
1450
1451         return r;
1452 }
1453
1454 static void __requeue_bios(struct pool *pool)
1455 {
1456         bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1457         bio_list_init(&pool->retry_on_resume_list);
1458 }
1459
1460 /*----------------------------------------------------------------
1461  * Binding of control targets to a pool object
1462  *--------------------------------------------------------------*/
1463 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1464 {
1465         struct pool_c *pt = ti->private;
1466
1467         pool->ti = ti;
1468         pool->low_water_blocks = pt->low_water_blocks;
1469         pool->zero_new_blocks = pt->zero_new_blocks;
1470
1471         return 0;
1472 }
1473
1474 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1475 {
1476         if (pool->ti == ti)
1477                 pool->ti = NULL;
1478 }
1479
1480 /*----------------------------------------------------------------
1481  * Pool creation
1482  *--------------------------------------------------------------*/
1483 static void __pool_destroy(struct pool *pool)
1484 {
1485         __pool_table_remove(pool);
1486
1487         if (dm_pool_metadata_close(pool->pmd) < 0)
1488                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1489
1490         prison_destroy(pool->prison);
1491         dm_kcopyd_client_destroy(pool->copier);
1492
1493         if (pool->wq)
1494                 destroy_workqueue(pool->wq);
1495
1496         if (pool->next_mapping)
1497                 mempool_free(pool->next_mapping, pool->mapping_pool);
1498         mempool_destroy(pool->mapping_pool);
1499         mempool_destroy(pool->endio_hook_pool);
1500         kfree(pool);
1501 }
1502
1503 static struct pool *pool_create(struct mapped_device *pool_md,
1504                                 struct block_device *metadata_dev,
1505                                 unsigned long block_size, char **error)
1506 {
1507         int r;
1508         void *err_p;
1509         struct pool *pool;
1510         struct dm_pool_metadata *pmd;
1511
1512         pmd = dm_pool_metadata_open(metadata_dev, block_size);
1513         if (IS_ERR(pmd)) {
1514                 *error = "Error creating metadata object";
1515                 return (struct pool *)pmd;
1516         }
1517
1518         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1519         if (!pool) {
1520                 *error = "Error allocating memory for pool";
1521                 err_p = ERR_PTR(-ENOMEM);
1522                 goto bad_pool;
1523         }
1524
1525         pool->pmd = pmd;
1526         pool->sectors_per_block = block_size;
1527         pool->block_shift = ffs(block_size) - 1;
1528         pool->offset_mask = block_size - 1;
1529         pool->low_water_blocks = 0;
1530         pool->zero_new_blocks = 1;
1531         pool->prison = prison_create(PRISON_CELLS);
1532         if (!pool->prison) {
1533                 *error = "Error creating pool's bio prison";
1534                 err_p = ERR_PTR(-ENOMEM);
1535                 goto bad_prison;
1536         }
1537
1538         pool->copier = dm_kcopyd_client_create();
1539         if (IS_ERR(pool->copier)) {
1540                 r = PTR_ERR(pool->copier);
1541                 *error = "Error creating pool's kcopyd client";
1542                 err_p = ERR_PTR(r);
1543                 goto bad_kcopyd_client;
1544         }
1545
1546         /*
1547          * Create singlethreaded workqueue that will service all devices
1548          * that use this metadata.
1549          */
1550         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1551         if (!pool->wq) {
1552                 *error = "Error creating pool's workqueue";
1553                 err_p = ERR_PTR(-ENOMEM);
1554                 goto bad_wq;
1555         }
1556
1557         INIT_WORK(&pool->worker, do_worker);
1558         spin_lock_init(&pool->lock);
1559         bio_list_init(&pool->deferred_bios);
1560         bio_list_init(&pool->deferred_flush_bios);
1561         INIT_LIST_HEAD(&pool->prepared_mappings);
1562         pool->low_water_triggered = 0;
1563         pool->no_free_space = 0;
1564         bio_list_init(&pool->retry_on_resume_list);
1565         ds_init(&pool->ds);
1566
1567         pool->next_mapping = NULL;
1568         pool->mapping_pool =
1569                 mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
1570         if (!pool->mapping_pool) {
1571                 *error = "Error creating pool's mapping mempool";
1572                 err_p = ERR_PTR(-ENOMEM);
1573                 goto bad_mapping_pool;
1574         }
1575
1576         pool->endio_hook_pool =
1577                 mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
1578         if (!pool->endio_hook_pool) {
1579                 *error = "Error creating pool's endio_hook mempool";
1580                 err_p = ERR_PTR(-ENOMEM);
1581                 goto bad_endio_hook_pool;
1582         }
1583         pool->ref_count = 1;
1584         pool->pool_md = pool_md;
1585         pool->md_dev = metadata_dev;
1586         __pool_table_insert(pool);
1587
1588         return pool;
1589
1590 bad_endio_hook_pool:
1591         mempool_destroy(pool->mapping_pool);
1592 bad_mapping_pool:
1593         destroy_workqueue(pool->wq);
1594 bad_wq:
1595         dm_kcopyd_client_destroy(pool->copier);
1596 bad_kcopyd_client:
1597         prison_destroy(pool->prison);
1598 bad_prison:
1599         kfree(pool);
1600 bad_pool:
1601         if (dm_pool_metadata_close(pmd))
1602                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1603
1604         return err_p;
1605 }
1606
1607 static void __pool_inc(struct pool *pool)
1608 {
1609         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1610         pool->ref_count++;
1611 }
1612
1613 static void __pool_dec(struct pool *pool)
1614 {
1615         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1616         BUG_ON(!pool->ref_count);
1617         if (!--pool->ref_count)
1618                 __pool_destroy(pool);
1619 }
1620
1621 static struct pool *__pool_find(struct mapped_device *pool_md,
1622                                 struct block_device *metadata_dev,
1623                                 unsigned long block_size, char **error)
1624 {
1625         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1626
1627         if (pool) {
1628                 if (pool->pool_md != pool_md)
1629                         return ERR_PTR(-EBUSY);
1630                 __pool_inc(pool);
1631
1632         } else {
1633                 pool = __pool_table_lookup(pool_md);
1634                 if (pool) {
1635                         if (pool->md_dev != metadata_dev)
1636                                 return ERR_PTR(-EINVAL);
1637                         __pool_inc(pool);
1638
1639                 } else
1640                         pool = pool_create(pool_md, metadata_dev, block_size, error);
1641         }
1642
1643         return pool;
1644 }
1645
1646 /*----------------------------------------------------------------
1647  * Pool target methods
1648  *--------------------------------------------------------------*/
1649 static void pool_dtr(struct dm_target *ti)
1650 {
1651         struct pool_c *pt = ti->private;
1652
1653         mutex_lock(&dm_thin_pool_table.mutex);
1654
1655         unbind_control_target(pt->pool, ti);
1656         __pool_dec(pt->pool);
1657         dm_put_device(ti, pt->metadata_dev);
1658         dm_put_device(ti, pt->data_dev);
1659         kfree(pt);
1660
1661         mutex_unlock(&dm_thin_pool_table.mutex);
1662 }
1663
1664 struct pool_features {
1665         unsigned zero_new_blocks:1;
1666 };
1667
1668 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1669                                struct dm_target *ti)
1670 {
1671         int r;
1672         unsigned argc;
1673         const char *arg_name;
1674
1675         static struct dm_arg _args[] = {
1676                 {0, 1, "Invalid number of pool feature arguments"},
1677         };
1678
1679         /*
1680          * No feature arguments supplied.
1681          */
1682         if (!as->argc)
1683                 return 0;
1684
1685         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1686         if (r)
1687                 return -EINVAL;
1688
1689         while (argc && !r) {
1690                 arg_name = dm_shift_arg(as);
1691                 argc--;
1692
1693                 if (!strcasecmp(arg_name, "skip_block_zeroing")) {
1694                         pf->zero_new_blocks = 0;
1695                         continue;
1696                 }
1697
1698                 ti->error = "Unrecognised pool feature requested";
1699                 r = -EINVAL;
1700         }
1701
1702         return r;
1703 }
1704
1705 /*
1706  * thin-pool <metadata dev> <data dev>
1707  *           <data block size (sectors)>
1708  *           <low water mark (blocks)>
1709  *           [<#feature args> [<arg>]*]
1710  *
1711  * Optional feature arguments are:
1712  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1713  */
1714 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1715 {
1716         int r;
1717         struct pool_c *pt;
1718         struct pool *pool;
1719         struct pool_features pf;
1720         struct dm_arg_set as;
1721         struct dm_dev *data_dev;
1722         unsigned long block_size;
1723         dm_block_t low_water_blocks;
1724         struct dm_dev *metadata_dev;
1725         sector_t metadata_dev_size;
1726
1727         /*
1728          * FIXME Remove validation from scope of lock.
1729          */
1730         mutex_lock(&dm_thin_pool_table.mutex);
1731
1732         if (argc < 4) {
1733                 ti->error = "Invalid argument count";
1734                 r = -EINVAL;
1735                 goto out_unlock;
1736         }
1737         as.argc = argc;
1738         as.argv = argv;
1739
1740         r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1741         if (r) {
1742                 ti->error = "Error opening metadata block device";
1743                 goto out_unlock;
1744         }
1745
1746         metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
1747         if (metadata_dev_size > METADATA_DEV_MAX_SECTORS) {
1748                 ti->error = "Metadata device is too large";
1749                 r = -EINVAL;
1750                 goto out_metadata;
1751         }
1752
1753         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1754         if (r) {
1755                 ti->error = "Error getting data device";
1756                 goto out_metadata;
1757         }
1758
1759         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1760             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1761             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1762             !is_power_of_2(block_size)) {
1763                 ti->error = "Invalid block size";
1764                 r = -EINVAL;
1765                 goto out;
1766         }
1767
1768         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1769                 ti->error = "Invalid low water mark";
1770                 r = -EINVAL;
1771                 goto out;
1772         }
1773
1774         /*
1775          * Set default pool features.
1776          */
1777         memset(&pf, 0, sizeof(pf));
1778         pf.zero_new_blocks = 1;
1779
1780         dm_consume_args(&as, 4);
1781         r = parse_pool_features(&as, &pf, ti);
1782         if (r)
1783                 goto out;
1784
1785         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1786         if (!pt) {
1787                 r = -ENOMEM;
1788                 goto out;
1789         }
1790
1791         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
1792                            block_size, &ti->error);
1793         if (IS_ERR(pool)) {
1794                 r = PTR_ERR(pool);
1795                 goto out_free_pt;
1796         }
1797
1798         pt->pool = pool;
1799         pt->ti = ti;
1800         pt->metadata_dev = metadata_dev;
1801         pt->data_dev = data_dev;
1802         pt->low_water_blocks = low_water_blocks;
1803         pt->zero_new_blocks = pf.zero_new_blocks;
1804         ti->num_flush_requests = 1;
1805         ti->num_discard_requests = 0;
1806         ti->private = pt;
1807
1808         pt->callbacks.congested_fn = pool_is_congested;
1809         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
1810
1811         mutex_unlock(&dm_thin_pool_table.mutex);
1812
1813         return 0;
1814
1815 out_free_pt:
1816         kfree(pt);
1817 out:
1818         dm_put_device(ti, data_dev);
1819 out_metadata:
1820         dm_put_device(ti, metadata_dev);
1821 out_unlock:
1822         mutex_unlock(&dm_thin_pool_table.mutex);
1823
1824         return r;
1825 }
1826
1827 static int pool_map(struct dm_target *ti, struct bio *bio,
1828                     union map_info *map_context)
1829 {
1830         int r;
1831         struct pool_c *pt = ti->private;
1832         struct pool *pool = pt->pool;
1833         unsigned long flags;
1834
1835         /*
1836          * As this is a singleton target, ti->begin is always zero.
1837          */
1838         spin_lock_irqsave(&pool->lock, flags);
1839         bio->bi_bdev = pt->data_dev->bdev;
1840         r = DM_MAPIO_REMAPPED;
1841         spin_unlock_irqrestore(&pool->lock, flags);
1842
1843         return r;
1844 }
1845
1846 /*
1847  * Retrieves the number of blocks of the data device from
1848  * the superblock and compares it to the actual device size,
1849  * thus resizing the data device in case it has grown.
1850  *
1851  * This both copes with opening preallocated data devices in the ctr
1852  * being followed by a resume
1853  * -and-
1854  * calling the resume method individually after userspace has
1855  * grown the data device in reaction to a table event.
1856  */
1857 static int pool_preresume(struct dm_target *ti)
1858 {
1859         int r;
1860         struct pool_c *pt = ti->private;
1861         struct pool *pool = pt->pool;
1862         dm_block_t data_size, sb_data_size;
1863
1864         /*
1865          * Take control of the pool object.
1866          */
1867         r = bind_control_target(pool, ti);
1868         if (r)
1869                 return r;
1870
1871         data_size = ti->len >> pool->block_shift;
1872         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
1873         if (r) {
1874                 DMERR("failed to retrieve data device size");
1875                 return r;
1876         }
1877
1878         if (data_size < sb_data_size) {
1879                 DMERR("pool target too small, is %llu blocks (expected %llu)",
1880                       data_size, sb_data_size);
1881                 return -EINVAL;
1882
1883         } else if (data_size > sb_data_size) {
1884                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
1885                 if (r) {
1886                         DMERR("failed to resize data device");
1887                         return r;
1888                 }
1889
1890                 r = dm_pool_commit_metadata(pool->pmd);
1891                 if (r) {
1892                         DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1893                               __func__, r);
1894                         return r;
1895                 }
1896         }
1897
1898         return 0;
1899 }
1900
1901 static void pool_resume(struct dm_target *ti)
1902 {
1903         struct pool_c *pt = ti->private;
1904         struct pool *pool = pt->pool;
1905         unsigned long flags;
1906
1907         spin_lock_irqsave(&pool->lock, flags);
1908         pool->low_water_triggered = 0;
1909         pool->no_free_space = 0;
1910         __requeue_bios(pool);
1911         spin_unlock_irqrestore(&pool->lock, flags);
1912
1913         wake_worker(pool);
1914 }
1915
1916 static void pool_postsuspend(struct dm_target *ti)
1917 {
1918         int r;
1919         struct pool_c *pt = ti->private;
1920         struct pool *pool = pt->pool;
1921
1922         flush_workqueue(pool->wq);
1923
1924         r = dm_pool_commit_metadata(pool->pmd);
1925         if (r < 0) {
1926                 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1927                       __func__, r);
1928                 /* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/
1929         }
1930 }
1931
1932 static int check_arg_count(unsigned argc, unsigned args_required)
1933 {
1934         if (argc != args_required) {
1935                 DMWARN("Message received with %u arguments instead of %u.",
1936                        argc, args_required);
1937                 return -EINVAL;
1938         }
1939
1940         return 0;
1941 }
1942
1943 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
1944 {
1945         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
1946             *dev_id <= MAX_DEV_ID)
1947                 return 0;
1948
1949         if (warning)
1950                 DMWARN("Message received with invalid device id: %s", arg);
1951
1952         return -EINVAL;
1953 }
1954
1955 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
1956 {
1957         dm_thin_id dev_id;
1958         int r;
1959
1960         r = check_arg_count(argc, 2);
1961         if (r)
1962                 return r;
1963
1964         r = read_dev_id(argv[1], &dev_id, 1);
1965         if (r)
1966                 return r;
1967
1968         r = dm_pool_create_thin(pool->pmd, dev_id);
1969         if (r) {
1970                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
1971                        argv[1]);
1972                 return r;
1973         }
1974
1975         return 0;
1976 }
1977
1978 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
1979 {
1980         dm_thin_id dev_id;
1981         dm_thin_id origin_dev_id;
1982         int r;
1983
1984         r = check_arg_count(argc, 3);
1985         if (r)
1986                 return r;
1987
1988         r = read_dev_id(argv[1], &dev_id, 1);
1989         if (r)
1990                 return r;
1991
1992         r = read_dev_id(argv[2], &origin_dev_id, 1);
1993         if (r)
1994                 return r;
1995
1996         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
1997         if (r) {
1998                 DMWARN("Creation of new snapshot %s of device %s failed.",
1999                        argv[1], argv[2]);
2000                 return r;
2001         }
2002
2003         return 0;
2004 }
2005
2006 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2007 {
2008         dm_thin_id dev_id;
2009         int r;
2010
2011         r = check_arg_count(argc, 2);
2012         if (r)
2013                 return r;
2014
2015         r = read_dev_id(argv[1], &dev_id, 1);
2016         if (r)
2017                 return r;
2018
2019         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2020         if (r)
2021                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2022
2023         return r;
2024 }
2025
2026 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2027 {
2028         dm_thin_id old_id, new_id;
2029         int r;
2030
2031         r = check_arg_count(argc, 3);
2032         if (r)
2033                 return r;
2034
2035         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2036                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2037                 return -EINVAL;
2038         }
2039
2040         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2041                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2042                 return -EINVAL;
2043         }
2044
2045         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2046         if (r) {
2047                 DMWARN("Failed to change transaction id from %s to %s.",
2048                        argv[1], argv[2]);
2049                 return r;
2050         }
2051
2052         return 0;
2053 }
2054
2055 /*
2056  * Messages supported:
2057  *   create_thin        <dev_id>
2058  *   create_snap        <dev_id> <origin_id>
2059  *   delete             <dev_id>
2060  *   trim               <dev_id> <new_size_in_sectors>
2061  *   set_transaction_id <current_trans_id> <new_trans_id>
2062  */
2063 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2064 {
2065         int r = -EINVAL;
2066         struct pool_c *pt = ti->private;
2067         struct pool *pool = pt->pool;
2068
2069         if (!strcasecmp(argv[0], "create_thin"))
2070                 r = process_create_thin_mesg(argc, argv, pool);
2071
2072         else if (!strcasecmp(argv[0], "create_snap"))
2073                 r = process_create_snap_mesg(argc, argv, pool);
2074
2075         else if (!strcasecmp(argv[0], "delete"))
2076                 r = process_delete_mesg(argc, argv, pool);
2077
2078         else if (!strcasecmp(argv[0], "set_transaction_id"))
2079                 r = process_set_transaction_id_mesg(argc, argv, pool);
2080
2081         else
2082                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2083
2084         if (!r) {
2085                 r = dm_pool_commit_metadata(pool->pmd);
2086                 if (r)
2087                         DMERR("%s message: dm_pool_commit_metadata() failed, error = %d",
2088                               argv[0], r);
2089         }
2090
2091         return r;
2092 }
2093
2094 /*
2095  * Status line is:
2096  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2097  *    <used data sectors>/<total data sectors> <held metadata root>
2098  */
2099 static void pool_status(struct dm_target *ti, status_type_t type,
2100                         char *result, unsigned maxlen)
2101 {
2102         int r;
2103         unsigned sz = 0;
2104         uint64_t transaction_id;
2105         dm_block_t nr_free_blocks_data;
2106         dm_block_t nr_free_blocks_metadata;
2107         dm_block_t nr_blocks_data;
2108         dm_block_t nr_blocks_metadata;
2109         dm_block_t held_root;
2110         char buf[BDEVNAME_SIZE];
2111         char buf2[BDEVNAME_SIZE];
2112         struct pool_c *pt = ti->private;
2113         struct pool *pool = pt->pool;
2114
2115         switch (type) {
2116         case STATUSTYPE_INFO:
2117                 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2118                 if (r) {
2119                         DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
2120                         goto err;
2121                 }
2122
2123                 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2124                 if (r) {
2125                         DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
2126                         goto err;
2127                 }
2128
2129                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2130                 if (r) {
2131                         DMERR("dm_pool_get_metadata_dev_size returned %d", r);
2132                         goto err;
2133                 }
2134
2135                 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2136                 if (r) {
2137                         DMERR("dm_pool_get_free_block_count returned %d", r);
2138                         goto err;
2139                 }
2140
2141                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2142                 if (r) {
2143                         DMERR("dm_pool_get_data_dev_size returned %d", r);
2144                         goto err;
2145                 }
2146
2147                 r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
2148                 if (r) {
2149                         DMERR("dm_pool_get_held_metadata_root returned %d", r);
2150                         goto err;
2151                 }
2152
2153                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2154                        (unsigned long long)transaction_id,
2155                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2156                        (unsigned long long)nr_blocks_metadata,
2157                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2158                        (unsigned long long)nr_blocks_data);
2159
2160                 if (held_root)
2161                         DMEMIT("%llu", held_root);
2162                 else
2163                         DMEMIT("-");
2164
2165                 break;
2166
2167         case STATUSTYPE_TABLE:
2168                 DMEMIT("%s %s %lu %llu ",
2169                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2170                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2171                        (unsigned long)pool->sectors_per_block,
2172                        (unsigned long long)pt->low_water_blocks);
2173
2174                 DMEMIT("%u ", !pool->zero_new_blocks);
2175
2176                 if (!pool->zero_new_blocks)
2177                         DMEMIT("skip_block_zeroing ");
2178                 break;
2179         }
2180         return;
2181
2182 err:
2183         DMEMIT("Error");
2184 }
2185
2186 static int pool_iterate_devices(struct dm_target *ti,
2187                                 iterate_devices_callout_fn fn, void *data)
2188 {
2189         struct pool_c *pt = ti->private;
2190
2191         return fn(ti, pt->data_dev, 0, ti->len, data);
2192 }
2193
2194 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2195                       struct bio_vec *biovec, int max_size)
2196 {
2197         struct pool_c *pt = ti->private;
2198         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2199
2200         if (!q->merge_bvec_fn)
2201                 return max_size;
2202
2203         bvm->bi_bdev = pt->data_dev->bdev;
2204
2205         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2206 }
2207
2208 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2209 {
2210         struct pool_c *pt = ti->private;
2211         struct pool *pool = pt->pool;
2212
2213         blk_limits_io_min(limits, 0);
2214         blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2215 }
2216
2217 static struct target_type pool_target = {
2218         .name = "thin-pool",
2219         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2220                     DM_TARGET_IMMUTABLE,
2221         .version = {1, 0, 2},
2222         .module = THIS_MODULE,
2223         .ctr = pool_ctr,
2224         .dtr = pool_dtr,
2225         .map = pool_map,
2226         .postsuspend = pool_postsuspend,
2227         .preresume = pool_preresume,
2228         .resume = pool_resume,
2229         .message = pool_message,
2230         .status = pool_status,
2231         .merge = pool_merge,
2232         .iterate_devices = pool_iterate_devices,
2233         .io_hints = pool_io_hints,
2234 };
2235
2236 /*----------------------------------------------------------------
2237  * Thin target methods
2238  *--------------------------------------------------------------*/
2239 static void thin_dtr(struct dm_target *ti)
2240 {
2241         struct thin_c *tc = ti->private;
2242
2243         mutex_lock(&dm_thin_pool_table.mutex);
2244
2245         __pool_dec(tc->pool);
2246         dm_pool_close_thin_device(tc->td);
2247         dm_put_device(ti, tc->pool_dev);
2248         kfree(tc);
2249
2250         mutex_unlock(&dm_thin_pool_table.mutex);
2251 }
2252
2253 /*
2254  * Thin target parameters:
2255  *
2256  * <pool_dev> <dev_id>
2257  *
2258  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2259  * dev_id: the internal device identifier
2260  */
2261 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2262 {
2263         int r;
2264         struct thin_c *tc;
2265         struct dm_dev *pool_dev;
2266         struct mapped_device *pool_md;
2267
2268         mutex_lock(&dm_thin_pool_table.mutex);
2269
2270         if (argc != 2) {
2271                 ti->error = "Invalid argument count";
2272                 r = -EINVAL;
2273                 goto out_unlock;
2274         }
2275
2276         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2277         if (!tc) {
2278                 ti->error = "Out of memory";
2279                 r = -ENOMEM;
2280                 goto out_unlock;
2281         }
2282
2283         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2284         if (r) {
2285                 ti->error = "Error opening pool device";
2286                 goto bad_pool_dev;
2287         }
2288         tc->pool_dev = pool_dev;
2289
2290         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2291                 ti->error = "Invalid device id";
2292                 r = -EINVAL;
2293                 goto bad_common;
2294         }
2295
2296         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2297         if (!pool_md) {
2298                 ti->error = "Couldn't get pool mapped device";
2299                 r = -EINVAL;
2300                 goto bad_common;
2301         }
2302
2303         tc->pool = __pool_table_lookup(pool_md);
2304         if (!tc->pool) {
2305                 ti->error = "Couldn't find pool object";
2306                 r = -EINVAL;
2307                 goto bad_pool_lookup;
2308         }
2309         __pool_inc(tc->pool);
2310
2311         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2312         if (r) {
2313                 ti->error = "Couldn't open thin internal device";
2314                 goto bad_thin_open;
2315         }
2316
2317         ti->split_io = tc->pool->sectors_per_block;
2318         ti->num_flush_requests = 1;
2319         ti->num_discard_requests = 0;
2320         ti->discards_supported = 0;
2321
2322         dm_put(pool_md);
2323
2324         mutex_unlock(&dm_thin_pool_table.mutex);
2325
2326         return 0;
2327
2328 bad_thin_open:
2329         __pool_dec(tc->pool);
2330 bad_pool_lookup:
2331         dm_put(pool_md);
2332 bad_common:
2333         dm_put_device(ti, tc->pool_dev);
2334 bad_pool_dev:
2335         kfree(tc);
2336 out_unlock:
2337         mutex_unlock(&dm_thin_pool_table.mutex);
2338
2339         return r;
2340 }
2341
2342 static int thin_map(struct dm_target *ti, struct bio *bio,
2343                     union map_info *map_context)
2344 {
2345         bio->bi_sector -= ti->begin;
2346
2347         return thin_bio_map(ti, bio, map_context);
2348 }
2349
2350 static void thin_postsuspend(struct dm_target *ti)
2351 {
2352         if (dm_noflush_suspending(ti))
2353                 requeue_io((struct thin_c *)ti->private);
2354 }
2355
2356 /*
2357  * <nr mapped sectors> <highest mapped sector>
2358  */
2359 static void thin_status(struct dm_target *ti, status_type_t type,
2360                         char *result, unsigned maxlen)
2361 {
2362         int r;
2363         ssize_t sz = 0;
2364         dm_block_t mapped, highest;
2365         char buf[BDEVNAME_SIZE];
2366         struct thin_c *tc = ti->private;
2367
2368         if (!tc->td)
2369                 DMEMIT("-");
2370         else {
2371                 switch (type) {
2372                 case STATUSTYPE_INFO:
2373                         r = dm_thin_get_mapped_count(tc->td, &mapped);
2374                         if (r) {
2375                                 DMERR("dm_thin_get_mapped_count returned %d", r);
2376                                 goto err;
2377                         }
2378
2379                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2380                         if (r < 0) {
2381                                 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
2382                                 goto err;
2383                         }
2384
2385                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2386                         if (r)
2387                                 DMEMIT("%llu", ((highest + 1) *
2388                                                 tc->pool->sectors_per_block) - 1);
2389                         else
2390                                 DMEMIT("-");
2391                         break;
2392
2393                 case STATUSTYPE_TABLE:
2394                         DMEMIT("%s %lu",
2395                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2396                                (unsigned long) tc->dev_id);
2397                         break;
2398                 }
2399         }
2400
2401         return;
2402
2403 err:
2404         DMEMIT("Error");
2405 }
2406
2407 static int thin_iterate_devices(struct dm_target *ti,
2408                                 iterate_devices_callout_fn fn, void *data)
2409 {
2410         dm_block_t blocks;
2411         struct thin_c *tc = ti->private;
2412
2413         /*
2414          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
2415          * we follow a more convoluted path through to the pool's target.
2416          */
2417         if (!tc->pool->ti)
2418                 return 0;       /* nothing is bound */
2419
2420         blocks = tc->pool->ti->len >> tc->pool->block_shift;
2421         if (blocks)
2422                 return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
2423
2424         return 0;
2425 }
2426
2427 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2428 {
2429         struct thin_c *tc = ti->private;
2430
2431         blk_limits_io_min(limits, 0);
2432         blk_limits_io_opt(limits, tc->pool->sectors_per_block << SECTOR_SHIFT);
2433 }
2434
2435 static struct target_type thin_target = {
2436         .name = "thin",
2437         .version = {1, 0, 2},
2438         .module = THIS_MODULE,
2439         .ctr = thin_ctr,
2440         .dtr = thin_dtr,
2441         .map = thin_map,
2442         .postsuspend = thin_postsuspend,
2443         .status = thin_status,
2444         .iterate_devices = thin_iterate_devices,
2445         .io_hints = thin_io_hints,
2446 };
2447
2448 /*----------------------------------------------------------------*/
2449
2450 static int __init dm_thin_init(void)
2451 {
2452         int r;
2453
2454         pool_table_init();
2455
2456         r = dm_register_target(&thin_target);
2457         if (r)
2458                 return r;
2459
2460         r = dm_register_target(&pool_target);
2461         if (r)
2462                 dm_unregister_target(&thin_target);
2463
2464         return r;
2465 }
2466
2467 static void dm_thin_exit(void)
2468 {
2469         dm_unregister_target(&thin_target);
2470         dm_unregister_target(&pool_target);
2471 }
2472
2473 module_init(dm_thin_init);
2474 module_exit(dm_thin_exit);
2475
2476 MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target");
2477 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2478 MODULE_LICENSE("GPL");