md/raid1: consider WRITE as successful only if at least one non-Faulty and non-rebuil...
[pandora-kernel.git] / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22
23 #include "dm-exception-store.h"
24
25 #define DM_MSG_PREFIX "snapshots"
26
27 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
28
29 #define dm_target_is_snapshot_merge(ti) \
30         ((ti)->type->name == dm_snapshot_merge_target_name)
31
32 /*
33  * The size of the mempool used to track chunks in use.
34  */
35 #define MIN_IOS 256
36
37 #define DM_TRACKED_CHUNK_HASH_SIZE      16
38 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
39                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
40
41 struct dm_exception_table {
42         uint32_t hash_mask;
43         unsigned hash_shift;
44         struct list_head *table;
45 };
46
47 struct dm_snapshot {
48         struct rw_semaphore lock;
49
50         struct dm_dev *origin;
51         struct dm_dev *cow;
52
53         struct dm_target *ti;
54
55         /* List of snapshots per Origin */
56         struct list_head list;
57
58         /*
59          * You can't use a snapshot if this is 0 (e.g. if full).
60          * A snapshot-merge target never clears this.
61          */
62         int valid;
63
64         /* Origin writes don't trigger exceptions until this is set */
65         int active;
66
67         atomic_t pending_exceptions_count;
68
69         mempool_t *pending_pool;
70
71         struct dm_exception_table pending;
72         struct dm_exception_table complete;
73
74         /*
75          * pe_lock protects all pending_exception operations and access
76          * as well as the snapshot_bios list.
77          */
78         spinlock_t pe_lock;
79
80         /* Chunks with outstanding reads */
81         spinlock_t tracked_chunk_lock;
82         mempool_t *tracked_chunk_pool;
83         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
84
85         /* The on disk metadata handler */
86         struct dm_exception_store *store;
87
88         struct dm_kcopyd_client *kcopyd_client;
89
90         /* Wait for events based on state_bits */
91         unsigned long state_bits;
92
93         /* Range of chunks currently being merged. */
94         chunk_t first_merging_chunk;
95         int num_merging_chunks;
96
97         /*
98          * The merge operation failed if this flag is set.
99          * Failure modes are handled as follows:
100          * - I/O error reading the header
101          *      => don't load the target; abort.
102          * - Header does not have "valid" flag set
103          *      => use the origin; forget about the snapshot.
104          * - I/O error when reading exceptions
105          *      => don't load the target; abort.
106          *         (We can't use the intermediate origin state.)
107          * - I/O error while merging
108          *      => stop merging; set merge_failed; process I/O normally.
109          */
110         int merge_failed;
111
112         /*
113          * Incoming bios that overlap with chunks being merged must wait
114          * for them to be committed.
115          */
116         struct bio_list bios_queued_during_merge;
117 };
118
119 /*
120  * state_bits:
121  *   RUNNING_MERGE  - Merge operation is in progress.
122  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
123  *                    cleared afterwards.
124  */
125 #define RUNNING_MERGE          0
126 #define SHUTDOWN_MERGE         1
127
128 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
129 {
130         return s->origin;
131 }
132 EXPORT_SYMBOL(dm_snap_origin);
133
134 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
135 {
136         return s->cow;
137 }
138 EXPORT_SYMBOL(dm_snap_cow);
139
140 static sector_t chunk_to_sector(struct dm_exception_store *store,
141                                 chunk_t chunk)
142 {
143         return chunk << store->chunk_shift;
144 }
145
146 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
147 {
148         /*
149          * There is only ever one instance of a particular block
150          * device so we can compare pointers safely.
151          */
152         return lhs == rhs;
153 }
154
155 struct dm_snap_pending_exception {
156         struct dm_exception e;
157
158         /*
159          * Origin buffers waiting for this to complete are held
160          * in a bio list
161          */
162         struct bio_list origin_bios;
163         struct bio_list snapshot_bios;
164
165         /* Pointer back to snapshot context */
166         struct dm_snapshot *snap;
167
168         /*
169          * 1 indicates the exception has already been sent to
170          * kcopyd.
171          */
172         int started;
173
174         /*
175          * For writing a complete chunk, bypassing the copy.
176          */
177         struct bio *full_bio;
178         bio_end_io_t *full_bio_end_io;
179         void *full_bio_private;
180 };
181
182 /*
183  * Hash table mapping origin volumes to lists of snapshots and
184  * a lock to protect it
185  */
186 static struct kmem_cache *exception_cache;
187 static struct kmem_cache *pending_cache;
188
189 struct dm_snap_tracked_chunk {
190         struct hlist_node node;
191         chunk_t chunk;
192 };
193
194 static struct kmem_cache *tracked_chunk_cache;
195
196 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
197                                                  chunk_t chunk)
198 {
199         struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
200                                                         GFP_NOIO);
201         unsigned long flags;
202
203         c->chunk = chunk;
204
205         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
206         hlist_add_head(&c->node,
207                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
208         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
209
210         return c;
211 }
212
213 static void stop_tracking_chunk(struct dm_snapshot *s,
214                                 struct dm_snap_tracked_chunk *c)
215 {
216         unsigned long flags;
217
218         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
219         hlist_del(&c->node);
220         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
221
222         mempool_free(c, s->tracked_chunk_pool);
223 }
224
225 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
226 {
227         struct dm_snap_tracked_chunk *c;
228         struct hlist_node *hn;
229         int found = 0;
230
231         spin_lock_irq(&s->tracked_chunk_lock);
232
233         hlist_for_each_entry(c, hn,
234             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
235                 if (c->chunk == chunk) {
236                         found = 1;
237                         break;
238                 }
239         }
240
241         spin_unlock_irq(&s->tracked_chunk_lock);
242
243         return found;
244 }
245
246 /*
247  * This conflicting I/O is extremely improbable in the caller,
248  * so msleep(1) is sufficient and there is no need for a wait queue.
249  */
250 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
251 {
252         while (__chunk_is_tracked(s, chunk))
253                 msleep(1);
254 }
255
256 /*
257  * One of these per registered origin, held in the snapshot_origins hash
258  */
259 struct origin {
260         /* The origin device */
261         struct block_device *bdev;
262
263         struct list_head hash_list;
264
265         /* List of snapshots for this origin */
266         struct list_head snapshots;
267 };
268
269 /*
270  * Size of the hash table for origin volumes. If we make this
271  * the size of the minors list then it should be nearly perfect
272  */
273 #define ORIGIN_HASH_SIZE 256
274 #define ORIGIN_MASK      0xFF
275 static struct list_head *_origins;
276 static struct rw_semaphore _origins_lock;
277
278 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
279 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
280 static uint64_t _pending_exceptions_done_count;
281
282 static int init_origin_hash(void)
283 {
284         int i;
285
286         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
287                            GFP_KERNEL);
288         if (!_origins) {
289                 DMERR("unable to allocate memory");
290                 return -ENOMEM;
291         }
292
293         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
294                 INIT_LIST_HEAD(_origins + i);
295         init_rwsem(&_origins_lock);
296
297         return 0;
298 }
299
300 static void exit_origin_hash(void)
301 {
302         kfree(_origins);
303 }
304
305 static unsigned origin_hash(struct block_device *bdev)
306 {
307         return bdev->bd_dev & ORIGIN_MASK;
308 }
309
310 static struct origin *__lookup_origin(struct block_device *origin)
311 {
312         struct list_head *ol;
313         struct origin *o;
314
315         ol = &_origins[origin_hash(origin)];
316         list_for_each_entry (o, ol, hash_list)
317                 if (bdev_equal(o->bdev, origin))
318                         return o;
319
320         return NULL;
321 }
322
323 static void __insert_origin(struct origin *o)
324 {
325         struct list_head *sl = &_origins[origin_hash(o->bdev)];
326         list_add_tail(&o->hash_list, sl);
327 }
328
329 /*
330  * _origins_lock must be held when calling this function.
331  * Returns number of snapshots registered using the supplied cow device, plus:
332  * snap_src - a snapshot suitable for use as a source of exception handover
333  * snap_dest - a snapshot capable of receiving exception handover.
334  * snap_merge - an existing snapshot-merge target linked to the same origin.
335  *   There can be at most one snapshot-merge target. The parameter is optional.
336  *
337  * Possible return values and states of snap_src and snap_dest.
338  *   0: NULL, NULL  - first new snapshot
339  *   1: snap_src, NULL - normal snapshot
340  *   2: snap_src, snap_dest  - waiting for handover
341  *   2: snap_src, NULL - handed over, waiting for old to be deleted
342  *   1: NULL, snap_dest - source got destroyed without handover
343  */
344 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
345                                         struct dm_snapshot **snap_src,
346                                         struct dm_snapshot **snap_dest,
347                                         struct dm_snapshot **snap_merge)
348 {
349         struct dm_snapshot *s;
350         struct origin *o;
351         int count = 0;
352         int active;
353
354         o = __lookup_origin(snap->origin->bdev);
355         if (!o)
356                 goto out;
357
358         list_for_each_entry(s, &o->snapshots, list) {
359                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
360                         *snap_merge = s;
361                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
362                         continue;
363
364                 down_read(&s->lock);
365                 active = s->active;
366                 up_read(&s->lock);
367
368                 if (active) {
369                         if (snap_src)
370                                 *snap_src = s;
371                 } else if (snap_dest)
372                         *snap_dest = s;
373
374                 count++;
375         }
376
377 out:
378         return count;
379 }
380
381 /*
382  * On success, returns 1 if this snapshot is a handover destination,
383  * otherwise returns 0.
384  */
385 static int __validate_exception_handover(struct dm_snapshot *snap)
386 {
387         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
388         struct dm_snapshot *snap_merge = NULL;
389
390         /* Does snapshot need exceptions handed over to it? */
391         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
392                                           &snap_merge) == 2) ||
393             snap_dest) {
394                 snap->ti->error = "Snapshot cow pairing for exception "
395                                   "table handover failed";
396                 return -EINVAL;
397         }
398
399         /*
400          * If no snap_src was found, snap cannot become a handover
401          * destination.
402          */
403         if (!snap_src)
404                 return 0;
405
406         /*
407          * Non-snapshot-merge handover?
408          */
409         if (!dm_target_is_snapshot_merge(snap->ti))
410                 return 1;
411
412         /*
413          * Do not allow more than one merging snapshot.
414          */
415         if (snap_merge) {
416                 snap->ti->error = "A snapshot is already merging.";
417                 return -EINVAL;
418         }
419
420         if (!snap_src->store->type->prepare_merge ||
421             !snap_src->store->type->commit_merge) {
422                 snap->ti->error = "Snapshot exception store does not "
423                                   "support snapshot-merge.";
424                 return -EINVAL;
425         }
426
427         return 1;
428 }
429
430 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
431 {
432         struct dm_snapshot *l;
433
434         /* Sort the list according to chunk size, largest-first smallest-last */
435         list_for_each_entry(l, &o->snapshots, list)
436                 if (l->store->chunk_size < s->store->chunk_size)
437                         break;
438         list_add_tail(&s->list, &l->list);
439 }
440
441 /*
442  * Make a note of the snapshot and its origin so we can look it
443  * up when the origin has a write on it.
444  *
445  * Also validate snapshot exception store handovers.
446  * On success, returns 1 if this registration is a handover destination,
447  * otherwise returns 0.
448  */
449 static int register_snapshot(struct dm_snapshot *snap)
450 {
451         struct origin *o, *new_o = NULL;
452         struct block_device *bdev = snap->origin->bdev;
453         int r = 0;
454
455         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
456         if (!new_o)
457                 return -ENOMEM;
458
459         down_write(&_origins_lock);
460
461         r = __validate_exception_handover(snap);
462         if (r < 0) {
463                 kfree(new_o);
464                 goto out;
465         }
466
467         o = __lookup_origin(bdev);
468         if (o)
469                 kfree(new_o);
470         else {
471                 /* New origin */
472                 o = new_o;
473
474                 /* Initialise the struct */
475                 INIT_LIST_HEAD(&o->snapshots);
476                 o->bdev = bdev;
477
478                 __insert_origin(o);
479         }
480
481         __insert_snapshot(o, snap);
482
483 out:
484         up_write(&_origins_lock);
485
486         return r;
487 }
488
489 /*
490  * Move snapshot to correct place in list according to chunk size.
491  */
492 static void reregister_snapshot(struct dm_snapshot *s)
493 {
494         struct block_device *bdev = s->origin->bdev;
495
496         down_write(&_origins_lock);
497
498         list_del(&s->list);
499         __insert_snapshot(__lookup_origin(bdev), s);
500
501         up_write(&_origins_lock);
502 }
503
504 static void unregister_snapshot(struct dm_snapshot *s)
505 {
506         struct origin *o;
507
508         down_write(&_origins_lock);
509         o = __lookup_origin(s->origin->bdev);
510
511         list_del(&s->list);
512         if (o && list_empty(&o->snapshots)) {
513                 list_del(&o->hash_list);
514                 kfree(o);
515         }
516
517         up_write(&_origins_lock);
518 }
519
520 /*
521  * Implementation of the exception hash tables.
522  * The lowest hash_shift bits of the chunk number are ignored, allowing
523  * some consecutive chunks to be grouped together.
524  */
525 static int dm_exception_table_init(struct dm_exception_table *et,
526                                    uint32_t size, unsigned hash_shift)
527 {
528         unsigned int i;
529
530         et->hash_shift = hash_shift;
531         et->hash_mask = size - 1;
532         et->table = dm_vcalloc(size, sizeof(struct list_head));
533         if (!et->table)
534                 return -ENOMEM;
535
536         for (i = 0; i < size; i++)
537                 INIT_LIST_HEAD(et->table + i);
538
539         return 0;
540 }
541
542 static void dm_exception_table_exit(struct dm_exception_table *et,
543                                     struct kmem_cache *mem)
544 {
545         struct list_head *slot;
546         struct dm_exception *ex, *next;
547         int i, size;
548
549         size = et->hash_mask + 1;
550         for (i = 0; i < size; i++) {
551                 slot = et->table + i;
552
553                 list_for_each_entry_safe (ex, next, slot, hash_list)
554                         kmem_cache_free(mem, ex);
555         }
556
557         vfree(et->table);
558 }
559
560 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
561 {
562         return (chunk >> et->hash_shift) & et->hash_mask;
563 }
564
565 static void dm_remove_exception(struct dm_exception *e)
566 {
567         list_del(&e->hash_list);
568 }
569
570 /*
571  * Return the exception data for a sector, or NULL if not
572  * remapped.
573  */
574 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
575                                                 chunk_t chunk)
576 {
577         struct list_head *slot;
578         struct dm_exception *e;
579
580         slot = &et->table[exception_hash(et, chunk)];
581         list_for_each_entry (e, slot, hash_list)
582                 if (chunk >= e->old_chunk &&
583                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
584                         return e;
585
586         return NULL;
587 }
588
589 static struct dm_exception *alloc_completed_exception(void)
590 {
591         struct dm_exception *e;
592
593         e = kmem_cache_alloc(exception_cache, GFP_NOIO);
594         if (!e)
595                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
596
597         return e;
598 }
599
600 static void free_completed_exception(struct dm_exception *e)
601 {
602         kmem_cache_free(exception_cache, e);
603 }
604
605 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
606 {
607         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
608                                                              GFP_NOIO);
609
610         atomic_inc(&s->pending_exceptions_count);
611         pe->snap = s;
612
613         return pe;
614 }
615
616 static void free_pending_exception(struct dm_snap_pending_exception *pe)
617 {
618         struct dm_snapshot *s = pe->snap;
619
620         mempool_free(pe, s->pending_pool);
621         smp_mb__before_atomic_dec();
622         atomic_dec(&s->pending_exceptions_count);
623 }
624
625 static void dm_insert_exception(struct dm_exception_table *eh,
626                                 struct dm_exception *new_e)
627 {
628         struct list_head *l;
629         struct dm_exception *e = NULL;
630
631         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
632
633         /* Add immediately if this table doesn't support consecutive chunks */
634         if (!eh->hash_shift)
635                 goto out;
636
637         /* List is ordered by old_chunk */
638         list_for_each_entry_reverse(e, l, hash_list) {
639                 /* Insert after an existing chunk? */
640                 if (new_e->old_chunk == (e->old_chunk +
641                                          dm_consecutive_chunk_count(e) + 1) &&
642                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
643                                          dm_consecutive_chunk_count(e) + 1)) {
644                         dm_consecutive_chunk_count_inc(e);
645                         free_completed_exception(new_e);
646                         return;
647                 }
648
649                 /* Insert before an existing chunk? */
650                 if (new_e->old_chunk == (e->old_chunk - 1) &&
651                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
652                         dm_consecutive_chunk_count_inc(e);
653                         e->old_chunk--;
654                         e->new_chunk--;
655                         free_completed_exception(new_e);
656                         return;
657                 }
658
659                 if (new_e->old_chunk > e->old_chunk)
660                         break;
661         }
662
663 out:
664         list_add(&new_e->hash_list, e ? &e->hash_list : l);
665 }
666
667 /*
668  * Callback used by the exception stores to load exceptions when
669  * initialising.
670  */
671 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
672 {
673         struct dm_snapshot *s = context;
674         struct dm_exception *e;
675
676         e = alloc_completed_exception();
677         if (!e)
678                 return -ENOMEM;
679
680         e->old_chunk = old;
681
682         /* Consecutive_count is implicitly initialised to zero */
683         e->new_chunk = new;
684
685         dm_insert_exception(&s->complete, e);
686
687         return 0;
688 }
689
690 /*
691  * Return a minimum chunk size of all snapshots that have the specified origin.
692  * Return zero if the origin has no snapshots.
693  */
694 static sector_t __minimum_chunk_size(struct origin *o)
695 {
696         struct dm_snapshot *snap;
697         unsigned chunk_size = 0;
698
699         if (o)
700                 list_for_each_entry(snap, &o->snapshots, list)
701                         chunk_size = min_not_zero(chunk_size,
702                                                   snap->store->chunk_size);
703
704         return chunk_size;
705 }
706
707 /*
708  * Hard coded magic.
709  */
710 static int calc_max_buckets(void)
711 {
712         /* use a fixed size of 2MB */
713         unsigned long mem = 2 * 1024 * 1024;
714         mem /= sizeof(struct list_head);
715
716         return mem;
717 }
718
719 /*
720  * Allocate room for a suitable hash table.
721  */
722 static int init_hash_tables(struct dm_snapshot *s)
723 {
724         sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
725
726         /*
727          * Calculate based on the size of the original volume or
728          * the COW volume...
729          */
730         cow_dev_size = get_dev_size(s->cow->bdev);
731         origin_dev_size = get_dev_size(s->origin->bdev);
732         max_buckets = calc_max_buckets();
733
734         hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
735         hash_size = min(hash_size, max_buckets);
736
737         if (hash_size < 64)
738                 hash_size = 64;
739         hash_size = rounddown_pow_of_two(hash_size);
740         if (dm_exception_table_init(&s->complete, hash_size,
741                                     DM_CHUNK_CONSECUTIVE_BITS))
742                 return -ENOMEM;
743
744         /*
745          * Allocate hash table for in-flight exceptions
746          * Make this smaller than the real hash table
747          */
748         hash_size >>= 3;
749         if (hash_size < 64)
750                 hash_size = 64;
751
752         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
753                 dm_exception_table_exit(&s->complete, exception_cache);
754                 return -ENOMEM;
755         }
756
757         return 0;
758 }
759
760 static void merge_shutdown(struct dm_snapshot *s)
761 {
762         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
763         smp_mb__after_clear_bit();
764         wake_up_bit(&s->state_bits, RUNNING_MERGE);
765 }
766
767 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
768 {
769         s->first_merging_chunk = 0;
770         s->num_merging_chunks = 0;
771
772         return bio_list_get(&s->bios_queued_during_merge);
773 }
774
775 /*
776  * Remove one chunk from the index of completed exceptions.
777  */
778 static int __remove_single_exception_chunk(struct dm_snapshot *s,
779                                            chunk_t old_chunk)
780 {
781         struct dm_exception *e;
782
783         e = dm_lookup_exception(&s->complete, old_chunk);
784         if (!e) {
785                 DMERR("Corruption detected: exception for block %llu is "
786                       "on disk but not in memory",
787                       (unsigned long long)old_chunk);
788                 return -EINVAL;
789         }
790
791         /*
792          * If this is the only chunk using this exception, remove exception.
793          */
794         if (!dm_consecutive_chunk_count(e)) {
795                 dm_remove_exception(e);
796                 free_completed_exception(e);
797                 return 0;
798         }
799
800         /*
801          * The chunk may be either at the beginning or the end of a
802          * group of consecutive chunks - never in the middle.  We are
803          * removing chunks in the opposite order to that in which they
804          * were added, so this should always be true.
805          * Decrement the consecutive chunk counter and adjust the
806          * starting point if necessary.
807          */
808         if (old_chunk == e->old_chunk) {
809                 e->old_chunk++;
810                 e->new_chunk++;
811         } else if (old_chunk != e->old_chunk +
812                    dm_consecutive_chunk_count(e)) {
813                 DMERR("Attempt to merge block %llu from the "
814                       "middle of a chunk range [%llu - %llu]",
815                       (unsigned long long)old_chunk,
816                       (unsigned long long)e->old_chunk,
817                       (unsigned long long)
818                       e->old_chunk + dm_consecutive_chunk_count(e));
819                 return -EINVAL;
820         }
821
822         dm_consecutive_chunk_count_dec(e);
823
824         return 0;
825 }
826
827 static void flush_bios(struct bio *bio);
828
829 static int remove_single_exception_chunk(struct dm_snapshot *s)
830 {
831         struct bio *b = NULL;
832         int r;
833         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
834
835         down_write(&s->lock);
836
837         /*
838          * Process chunks (and associated exceptions) in reverse order
839          * so that dm_consecutive_chunk_count_dec() accounting works.
840          */
841         do {
842                 r = __remove_single_exception_chunk(s, old_chunk);
843                 if (r)
844                         goto out;
845         } while (old_chunk-- > s->first_merging_chunk);
846
847         b = __release_queued_bios_after_merge(s);
848
849 out:
850         up_write(&s->lock);
851         if (b)
852                 flush_bios(b);
853
854         return r;
855 }
856
857 static int origin_write_extent(struct dm_snapshot *merging_snap,
858                                sector_t sector, unsigned chunk_size);
859
860 static void merge_callback(int read_err, unsigned long write_err,
861                            void *context);
862
863 static uint64_t read_pending_exceptions_done_count(void)
864 {
865         uint64_t pending_exceptions_done;
866
867         spin_lock(&_pending_exceptions_done_spinlock);
868         pending_exceptions_done = _pending_exceptions_done_count;
869         spin_unlock(&_pending_exceptions_done_spinlock);
870
871         return pending_exceptions_done;
872 }
873
874 static void increment_pending_exceptions_done_count(void)
875 {
876         spin_lock(&_pending_exceptions_done_spinlock);
877         _pending_exceptions_done_count++;
878         spin_unlock(&_pending_exceptions_done_spinlock);
879
880         wake_up_all(&_pending_exceptions_done);
881 }
882
883 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
884 {
885         int i, linear_chunks;
886         chunk_t old_chunk, new_chunk;
887         struct dm_io_region src, dest;
888         sector_t io_size;
889         uint64_t previous_count;
890
891         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
892         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
893                 goto shut;
894
895         /*
896          * valid flag never changes during merge, so no lock required.
897          */
898         if (!s->valid) {
899                 DMERR("Snapshot is invalid: can't merge");
900                 goto shut;
901         }
902
903         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
904                                                       &new_chunk);
905         if (linear_chunks <= 0) {
906                 if (linear_chunks < 0) {
907                         DMERR("Read error in exception store: "
908                               "shutting down merge");
909                         down_write(&s->lock);
910                         s->merge_failed = 1;
911                         up_write(&s->lock);
912                 }
913                 goto shut;
914         }
915
916         /* Adjust old_chunk and new_chunk to reflect start of linear region */
917         old_chunk = old_chunk + 1 - linear_chunks;
918         new_chunk = new_chunk + 1 - linear_chunks;
919
920         /*
921          * Use one (potentially large) I/O to copy all 'linear_chunks'
922          * from the exception store to the origin
923          */
924         io_size = linear_chunks * s->store->chunk_size;
925
926         dest.bdev = s->origin->bdev;
927         dest.sector = chunk_to_sector(s->store, old_chunk);
928         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
929
930         src.bdev = s->cow->bdev;
931         src.sector = chunk_to_sector(s->store, new_chunk);
932         src.count = dest.count;
933
934         /*
935          * Reallocate any exceptions needed in other snapshots then
936          * wait for the pending exceptions to complete.
937          * Each time any pending exception (globally on the system)
938          * completes we are woken and repeat the process to find out
939          * if we can proceed.  While this may not seem a particularly
940          * efficient algorithm, it is not expected to have any
941          * significant impact on performance.
942          */
943         previous_count = read_pending_exceptions_done_count();
944         while (origin_write_extent(s, dest.sector, io_size)) {
945                 wait_event(_pending_exceptions_done,
946                            (read_pending_exceptions_done_count() !=
947                             previous_count));
948                 /* Retry after the wait, until all exceptions are done. */
949                 previous_count = read_pending_exceptions_done_count();
950         }
951
952         down_write(&s->lock);
953         s->first_merging_chunk = old_chunk;
954         s->num_merging_chunks = linear_chunks;
955         up_write(&s->lock);
956
957         /* Wait until writes to all 'linear_chunks' drain */
958         for (i = 0; i < linear_chunks; i++)
959                 __check_for_conflicting_io(s, old_chunk + i);
960
961         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
962         return;
963
964 shut:
965         merge_shutdown(s);
966 }
967
968 static void error_bios(struct bio *bio);
969
970 static void merge_callback(int read_err, unsigned long write_err, void *context)
971 {
972         struct dm_snapshot *s = context;
973         struct bio *b = NULL;
974
975         if (read_err || write_err) {
976                 if (read_err)
977                         DMERR("Read error: shutting down merge.");
978                 else
979                         DMERR("Write error: shutting down merge.");
980                 goto shut;
981         }
982
983         if (s->store->type->commit_merge(s->store,
984                                          s->num_merging_chunks) < 0) {
985                 DMERR("Write error in exception store: shutting down merge");
986                 goto shut;
987         }
988
989         if (remove_single_exception_chunk(s) < 0)
990                 goto shut;
991
992         snapshot_merge_next_chunks(s);
993
994         return;
995
996 shut:
997         down_write(&s->lock);
998         s->merge_failed = 1;
999         b = __release_queued_bios_after_merge(s);
1000         up_write(&s->lock);
1001         error_bios(b);
1002
1003         merge_shutdown(s);
1004 }
1005
1006 static void start_merge(struct dm_snapshot *s)
1007 {
1008         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1009                 snapshot_merge_next_chunks(s);
1010 }
1011
1012 static int wait_schedule(void *ptr)
1013 {
1014         schedule();
1015
1016         return 0;
1017 }
1018
1019 /*
1020  * Stop the merging process and wait until it finishes.
1021  */
1022 static void stop_merge(struct dm_snapshot *s)
1023 {
1024         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1025         wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1026                     TASK_UNINTERRUPTIBLE);
1027         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1028 }
1029
1030 /*
1031  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1032  */
1033 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1034 {
1035         struct dm_snapshot *s;
1036         int i;
1037         int r = -EINVAL;
1038         char *origin_path, *cow_path;
1039         unsigned args_used, num_flush_requests = 1;
1040         fmode_t origin_mode = FMODE_READ;
1041
1042         if (argc != 4) {
1043                 ti->error = "requires exactly 4 arguments";
1044                 r = -EINVAL;
1045                 goto bad;
1046         }
1047
1048         if (dm_target_is_snapshot_merge(ti)) {
1049                 num_flush_requests = 2;
1050                 origin_mode = FMODE_WRITE;
1051         }
1052
1053         s = kmalloc(sizeof(*s), GFP_KERNEL);
1054         if (!s) {
1055                 ti->error = "Cannot allocate private snapshot structure";
1056                 r = -ENOMEM;
1057                 goto bad;
1058         }
1059
1060         origin_path = argv[0];
1061         argv++;
1062         argc--;
1063
1064         r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1065         if (r) {
1066                 ti->error = "Cannot get origin device";
1067                 goto bad_origin;
1068         }
1069
1070         cow_path = argv[0];
1071         argv++;
1072         argc--;
1073
1074         r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1075         if (r) {
1076                 ti->error = "Cannot get COW device";
1077                 goto bad_cow;
1078         }
1079
1080         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1081         if (r) {
1082                 ti->error = "Couldn't create exception store";
1083                 r = -EINVAL;
1084                 goto bad_store;
1085         }
1086
1087         argv += args_used;
1088         argc -= args_used;
1089
1090         s->ti = ti;
1091         s->valid = 1;
1092         s->active = 0;
1093         atomic_set(&s->pending_exceptions_count, 0);
1094         init_rwsem(&s->lock);
1095         INIT_LIST_HEAD(&s->list);
1096         spin_lock_init(&s->pe_lock);
1097         s->state_bits = 0;
1098         s->merge_failed = 0;
1099         s->first_merging_chunk = 0;
1100         s->num_merging_chunks = 0;
1101         bio_list_init(&s->bios_queued_during_merge);
1102
1103         /* Allocate hash table for COW data */
1104         if (init_hash_tables(s)) {
1105                 ti->error = "Unable to allocate hash table space";
1106                 r = -ENOMEM;
1107                 goto bad_hash_tables;
1108         }
1109
1110         s->kcopyd_client = dm_kcopyd_client_create();
1111         if (IS_ERR(s->kcopyd_client)) {
1112                 r = PTR_ERR(s->kcopyd_client);
1113                 ti->error = "Could not create kcopyd client";
1114                 goto bad_kcopyd;
1115         }
1116
1117         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1118         if (!s->pending_pool) {
1119                 ti->error = "Could not allocate mempool for pending exceptions";
1120                 r = -ENOMEM;
1121                 goto bad_pending_pool;
1122         }
1123
1124         s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1125                                                          tracked_chunk_cache);
1126         if (!s->tracked_chunk_pool) {
1127                 ti->error = "Could not allocate tracked_chunk mempool for "
1128                             "tracking reads";
1129                 goto bad_tracked_chunk_pool;
1130         }
1131
1132         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1133                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1134
1135         spin_lock_init(&s->tracked_chunk_lock);
1136
1137         ti->private = s;
1138         ti->num_flush_requests = num_flush_requests;
1139
1140         /* Add snapshot to the list of snapshots for this origin */
1141         /* Exceptions aren't triggered till snapshot_resume() is called */
1142         r = register_snapshot(s);
1143         if (r == -ENOMEM) {
1144                 ti->error = "Snapshot origin struct allocation failed";
1145                 goto bad_load_and_register;
1146         } else if (r < 0) {
1147                 /* invalid handover, register_snapshot has set ti->error */
1148                 goto bad_load_and_register;
1149         }
1150
1151         /*
1152          * Metadata must only be loaded into one table at once, so skip this
1153          * if metadata will be handed over during resume.
1154          * Chunk size will be set during the handover - set it to zero to
1155          * ensure it's ignored.
1156          */
1157         if (r > 0) {
1158                 s->store->chunk_size = 0;
1159                 return 0;
1160         }
1161
1162         r = s->store->type->read_metadata(s->store, dm_add_exception,
1163                                           (void *)s);
1164         if (r < 0) {
1165                 ti->error = "Failed to read snapshot metadata";
1166                 goto bad_read_metadata;
1167         } else if (r > 0) {
1168                 s->valid = 0;
1169                 DMWARN("Snapshot is marked invalid.");
1170         }
1171
1172         if (!s->store->chunk_size) {
1173                 ti->error = "Chunk size not set";
1174                 goto bad_read_metadata;
1175         }
1176         ti->split_io = s->store->chunk_size;
1177
1178         return 0;
1179
1180 bad_read_metadata:
1181         unregister_snapshot(s);
1182
1183 bad_load_and_register:
1184         mempool_destroy(s->tracked_chunk_pool);
1185
1186 bad_tracked_chunk_pool:
1187         mempool_destroy(s->pending_pool);
1188
1189 bad_pending_pool:
1190         dm_kcopyd_client_destroy(s->kcopyd_client);
1191
1192 bad_kcopyd:
1193         dm_exception_table_exit(&s->pending, pending_cache);
1194         dm_exception_table_exit(&s->complete, exception_cache);
1195
1196 bad_hash_tables:
1197         dm_exception_store_destroy(s->store);
1198
1199 bad_store:
1200         dm_put_device(ti, s->cow);
1201
1202 bad_cow:
1203         dm_put_device(ti, s->origin);
1204
1205 bad_origin:
1206         kfree(s);
1207
1208 bad:
1209         return r;
1210 }
1211
1212 static void __free_exceptions(struct dm_snapshot *s)
1213 {
1214         dm_kcopyd_client_destroy(s->kcopyd_client);
1215         s->kcopyd_client = NULL;
1216
1217         dm_exception_table_exit(&s->pending, pending_cache);
1218         dm_exception_table_exit(&s->complete, exception_cache);
1219 }
1220
1221 static void __handover_exceptions(struct dm_snapshot *snap_src,
1222                                   struct dm_snapshot *snap_dest)
1223 {
1224         union {
1225                 struct dm_exception_table table_swap;
1226                 struct dm_exception_store *store_swap;
1227         } u;
1228
1229         /*
1230          * Swap all snapshot context information between the two instances.
1231          */
1232         u.table_swap = snap_dest->complete;
1233         snap_dest->complete = snap_src->complete;
1234         snap_src->complete = u.table_swap;
1235
1236         u.store_swap = snap_dest->store;
1237         snap_dest->store = snap_src->store;
1238         snap_src->store = u.store_swap;
1239
1240         snap_dest->store->snap = snap_dest;
1241         snap_src->store->snap = snap_src;
1242
1243         snap_dest->ti->split_io = snap_dest->store->chunk_size;
1244         snap_dest->valid = snap_src->valid;
1245
1246         /*
1247          * Set source invalid to ensure it receives no further I/O.
1248          */
1249         snap_src->valid = 0;
1250 }
1251
1252 static void snapshot_dtr(struct dm_target *ti)
1253 {
1254 #ifdef CONFIG_DM_DEBUG
1255         int i;
1256 #endif
1257         struct dm_snapshot *s = ti->private;
1258         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1259
1260         down_read(&_origins_lock);
1261         /* Check whether exception handover must be cancelled */
1262         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1263         if (snap_src && snap_dest && (s == snap_src)) {
1264                 down_write(&snap_dest->lock);
1265                 snap_dest->valid = 0;
1266                 up_write(&snap_dest->lock);
1267                 DMERR("Cancelling snapshot handover.");
1268         }
1269         up_read(&_origins_lock);
1270
1271         if (dm_target_is_snapshot_merge(ti))
1272                 stop_merge(s);
1273
1274         /* Prevent further origin writes from using this snapshot. */
1275         /* After this returns there can be no new kcopyd jobs. */
1276         unregister_snapshot(s);
1277
1278         while (atomic_read(&s->pending_exceptions_count))
1279                 msleep(1);
1280         /*
1281          * Ensure instructions in mempool_destroy aren't reordered
1282          * before atomic_read.
1283          */
1284         smp_mb();
1285
1286 #ifdef CONFIG_DM_DEBUG
1287         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1288                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1289 #endif
1290
1291         mempool_destroy(s->tracked_chunk_pool);
1292
1293         __free_exceptions(s);
1294
1295         mempool_destroy(s->pending_pool);
1296
1297         dm_exception_store_destroy(s->store);
1298
1299         dm_put_device(ti, s->cow);
1300
1301         dm_put_device(ti, s->origin);
1302
1303         kfree(s);
1304 }
1305
1306 /*
1307  * Flush a list of buffers.
1308  */
1309 static void flush_bios(struct bio *bio)
1310 {
1311         struct bio *n;
1312
1313         while (bio) {
1314                 n = bio->bi_next;
1315                 bio->bi_next = NULL;
1316                 generic_make_request(bio);
1317                 bio = n;
1318         }
1319 }
1320
1321 static int do_origin(struct dm_dev *origin, struct bio *bio);
1322
1323 /*
1324  * Flush a list of buffers.
1325  */
1326 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1327 {
1328         struct bio *n;
1329         int r;
1330
1331         while (bio) {
1332                 n = bio->bi_next;
1333                 bio->bi_next = NULL;
1334                 r = do_origin(s->origin, bio);
1335                 if (r == DM_MAPIO_REMAPPED)
1336                         generic_make_request(bio);
1337                 bio = n;
1338         }
1339 }
1340
1341 /*
1342  * Error a list of buffers.
1343  */
1344 static void error_bios(struct bio *bio)
1345 {
1346         struct bio *n;
1347
1348         while (bio) {
1349                 n = bio->bi_next;
1350                 bio->bi_next = NULL;
1351                 bio_io_error(bio);
1352                 bio = n;
1353         }
1354 }
1355
1356 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1357 {
1358         if (!s->valid)
1359                 return;
1360
1361         if (err == -EIO)
1362                 DMERR("Invalidating snapshot: Error reading/writing.");
1363         else if (err == -ENOMEM)
1364                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1365
1366         if (s->store->type->drop_snapshot)
1367                 s->store->type->drop_snapshot(s->store);
1368
1369         s->valid = 0;
1370
1371         dm_table_event(s->ti->table);
1372 }
1373
1374 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1375 {
1376         struct dm_exception *e;
1377         struct dm_snapshot *s = pe->snap;
1378         struct bio *origin_bios = NULL;
1379         struct bio *snapshot_bios = NULL;
1380         struct bio *full_bio = NULL;
1381         int error = 0;
1382
1383         if (!success) {
1384                 /* Read/write error - snapshot is unusable */
1385                 down_write(&s->lock);
1386                 __invalidate_snapshot(s, -EIO);
1387                 error = 1;
1388                 goto out;
1389         }
1390
1391         e = alloc_completed_exception();
1392         if (!e) {
1393                 down_write(&s->lock);
1394                 __invalidate_snapshot(s, -ENOMEM);
1395                 error = 1;
1396                 goto out;
1397         }
1398         *e = pe->e;
1399
1400         down_write(&s->lock);
1401         if (!s->valid) {
1402                 free_completed_exception(e);
1403                 error = 1;
1404                 goto out;
1405         }
1406
1407         /* Check for conflicting reads */
1408         __check_for_conflicting_io(s, pe->e.old_chunk);
1409
1410         /*
1411          * Add a proper exception, and remove the
1412          * in-flight exception from the list.
1413          */
1414         dm_insert_exception(&s->complete, e);
1415
1416 out:
1417         dm_remove_exception(&pe->e);
1418         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1419         origin_bios = bio_list_get(&pe->origin_bios);
1420         full_bio = pe->full_bio;
1421         if (full_bio) {
1422                 full_bio->bi_end_io = pe->full_bio_end_io;
1423                 full_bio->bi_private = pe->full_bio_private;
1424         }
1425         free_pending_exception(pe);
1426
1427         increment_pending_exceptions_done_count();
1428
1429         up_write(&s->lock);
1430
1431         /* Submit any pending write bios */
1432         if (error) {
1433                 if (full_bio)
1434                         bio_io_error(full_bio);
1435                 error_bios(snapshot_bios);
1436         } else {
1437                 if (full_bio)
1438                         bio_endio(full_bio, 0);
1439                 flush_bios(snapshot_bios);
1440         }
1441
1442         retry_origin_bios(s, origin_bios);
1443 }
1444
1445 static void commit_callback(void *context, int success)
1446 {
1447         struct dm_snap_pending_exception *pe = context;
1448
1449         pending_complete(pe, success);
1450 }
1451
1452 /*
1453  * Called when the copy I/O has finished.  kcopyd actually runs
1454  * this code so don't block.
1455  */
1456 static void copy_callback(int read_err, unsigned long write_err, void *context)
1457 {
1458         struct dm_snap_pending_exception *pe = context;
1459         struct dm_snapshot *s = pe->snap;
1460
1461         if (read_err || write_err)
1462                 pending_complete(pe, 0);
1463
1464         else
1465                 /* Update the metadata if we are persistent */
1466                 s->store->type->commit_exception(s->store, &pe->e,
1467                                                  commit_callback, pe);
1468 }
1469
1470 /*
1471  * Dispatches the copy operation to kcopyd.
1472  */
1473 static void start_copy(struct dm_snap_pending_exception *pe)
1474 {
1475         struct dm_snapshot *s = pe->snap;
1476         struct dm_io_region src, dest;
1477         struct block_device *bdev = s->origin->bdev;
1478         sector_t dev_size;
1479
1480         dev_size = get_dev_size(bdev);
1481
1482         src.bdev = bdev;
1483         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1484         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1485
1486         dest.bdev = s->cow->bdev;
1487         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1488         dest.count = src.count;
1489
1490         /* Hand over to kcopyd */
1491         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1492 }
1493
1494 static void full_bio_end_io(struct bio *bio, int error)
1495 {
1496         void *callback_data = bio->bi_private;
1497
1498         dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1499 }
1500
1501 static void start_full_bio(struct dm_snap_pending_exception *pe,
1502                            struct bio *bio)
1503 {
1504         struct dm_snapshot *s = pe->snap;
1505         void *callback_data;
1506
1507         pe->full_bio = bio;
1508         pe->full_bio_end_io = bio->bi_end_io;
1509         pe->full_bio_private = bio->bi_private;
1510
1511         callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1512                                                    copy_callback, pe);
1513
1514         bio->bi_end_io = full_bio_end_io;
1515         bio->bi_private = callback_data;
1516
1517         generic_make_request(bio);
1518 }
1519
1520 static struct dm_snap_pending_exception *
1521 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1522 {
1523         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1524
1525         if (!e)
1526                 return NULL;
1527
1528         return container_of(e, struct dm_snap_pending_exception, e);
1529 }
1530
1531 /*
1532  * Looks to see if this snapshot already has a pending exception
1533  * for this chunk, otherwise it allocates a new one and inserts
1534  * it into the pending table.
1535  *
1536  * NOTE: a write lock must be held on snap->lock before calling
1537  * this.
1538  */
1539 static struct dm_snap_pending_exception *
1540 __find_pending_exception(struct dm_snapshot *s,
1541                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1542 {
1543         struct dm_snap_pending_exception *pe2;
1544
1545         pe2 = __lookup_pending_exception(s, chunk);
1546         if (pe2) {
1547                 free_pending_exception(pe);
1548                 return pe2;
1549         }
1550
1551         pe->e.old_chunk = chunk;
1552         bio_list_init(&pe->origin_bios);
1553         bio_list_init(&pe->snapshot_bios);
1554         pe->started = 0;
1555         pe->full_bio = NULL;
1556
1557         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1558                 free_pending_exception(pe);
1559                 return NULL;
1560         }
1561
1562         dm_insert_exception(&s->pending, &pe->e);
1563
1564         return pe;
1565 }
1566
1567 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1568                             struct bio *bio, chunk_t chunk)
1569 {
1570         bio->bi_bdev = s->cow->bdev;
1571         bio->bi_sector = chunk_to_sector(s->store,
1572                                          dm_chunk_number(e->new_chunk) +
1573                                          (chunk - e->old_chunk)) +
1574                                          (bio->bi_sector &
1575                                           s->store->chunk_mask);
1576 }
1577
1578 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1579                         union map_info *map_context)
1580 {
1581         struct dm_exception *e;
1582         struct dm_snapshot *s = ti->private;
1583         int r = DM_MAPIO_REMAPPED;
1584         chunk_t chunk;
1585         struct dm_snap_pending_exception *pe = NULL;
1586
1587         if (bio->bi_rw & REQ_FLUSH) {
1588                 bio->bi_bdev = s->cow->bdev;
1589                 return DM_MAPIO_REMAPPED;
1590         }
1591
1592         chunk = sector_to_chunk(s->store, bio->bi_sector);
1593
1594         /* Full snapshots are not usable */
1595         /* To get here the table must be live so s->active is always set. */
1596         if (!s->valid)
1597                 return -EIO;
1598
1599         /* FIXME: should only take write lock if we need
1600          * to copy an exception */
1601         down_write(&s->lock);
1602
1603         if (!s->valid) {
1604                 r = -EIO;
1605                 goto out_unlock;
1606         }
1607
1608         /* If the block is already remapped - use that, else remap it */
1609         e = dm_lookup_exception(&s->complete, chunk);
1610         if (e) {
1611                 remap_exception(s, e, bio, chunk);
1612                 goto out_unlock;
1613         }
1614
1615         /*
1616          * Write to snapshot - higher level takes care of RW/RO
1617          * flags so we should only get this if we are
1618          * writeable.
1619          */
1620         if (bio_rw(bio) == WRITE) {
1621                 pe = __lookup_pending_exception(s, chunk);
1622                 if (!pe) {
1623                         up_write(&s->lock);
1624                         pe = alloc_pending_exception(s);
1625                         down_write(&s->lock);
1626
1627                         if (!s->valid) {
1628                                 free_pending_exception(pe);
1629                                 r = -EIO;
1630                                 goto out_unlock;
1631                         }
1632
1633                         e = dm_lookup_exception(&s->complete, chunk);
1634                         if (e) {
1635                                 free_pending_exception(pe);
1636                                 remap_exception(s, e, bio, chunk);
1637                                 goto out_unlock;
1638                         }
1639
1640                         pe = __find_pending_exception(s, pe, chunk);
1641                         if (!pe) {
1642                                 __invalidate_snapshot(s, -ENOMEM);
1643                                 r = -EIO;
1644                                 goto out_unlock;
1645                         }
1646                 }
1647
1648                 remap_exception(s, &pe->e, bio, chunk);
1649
1650                 r = DM_MAPIO_SUBMITTED;
1651
1652                 if (!pe->started &&
1653                     bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
1654                         pe->started = 1;
1655                         up_write(&s->lock);
1656                         start_full_bio(pe, bio);
1657                         goto out;
1658                 }
1659
1660                 bio_list_add(&pe->snapshot_bios, bio);
1661
1662                 if (!pe->started) {
1663                         /* this is protected by snap->lock */
1664                         pe->started = 1;
1665                         up_write(&s->lock);
1666                         start_copy(pe);
1667                         goto out;
1668                 }
1669         } else {
1670                 bio->bi_bdev = s->origin->bdev;
1671                 map_context->ptr = track_chunk(s, chunk);
1672         }
1673
1674 out_unlock:
1675         up_write(&s->lock);
1676 out:
1677         return r;
1678 }
1679
1680 /*
1681  * A snapshot-merge target behaves like a combination of a snapshot
1682  * target and a snapshot-origin target.  It only generates new
1683  * exceptions in other snapshots and not in the one that is being
1684  * merged.
1685  *
1686  * For each chunk, if there is an existing exception, it is used to
1687  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1688  * which in turn might generate exceptions in other snapshots.
1689  * If merging is currently taking place on the chunk in question, the
1690  * I/O is deferred by adding it to s->bios_queued_during_merge.
1691  */
1692 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1693                               union map_info *map_context)
1694 {
1695         struct dm_exception *e;
1696         struct dm_snapshot *s = ti->private;
1697         int r = DM_MAPIO_REMAPPED;
1698         chunk_t chunk;
1699
1700         if (bio->bi_rw & REQ_FLUSH) {
1701                 if (!map_context->target_request_nr)
1702                         bio->bi_bdev = s->origin->bdev;
1703                 else
1704                         bio->bi_bdev = s->cow->bdev;
1705                 map_context->ptr = NULL;
1706                 return DM_MAPIO_REMAPPED;
1707         }
1708
1709         chunk = sector_to_chunk(s->store, bio->bi_sector);
1710
1711         down_write(&s->lock);
1712
1713         /* Full merging snapshots are redirected to the origin */
1714         if (!s->valid)
1715                 goto redirect_to_origin;
1716
1717         /* If the block is already remapped - use that */
1718         e = dm_lookup_exception(&s->complete, chunk);
1719         if (e) {
1720                 /* Queue writes overlapping with chunks being merged */
1721                 if (bio_rw(bio) == WRITE &&
1722                     chunk >= s->first_merging_chunk &&
1723                     chunk < (s->first_merging_chunk +
1724                              s->num_merging_chunks)) {
1725                         bio->bi_bdev = s->origin->bdev;
1726                         bio_list_add(&s->bios_queued_during_merge, bio);
1727                         r = DM_MAPIO_SUBMITTED;
1728                         goto out_unlock;
1729                 }
1730
1731                 remap_exception(s, e, bio, chunk);
1732
1733                 if (bio_rw(bio) == WRITE)
1734                         map_context->ptr = track_chunk(s, chunk);
1735                 goto out_unlock;
1736         }
1737
1738 redirect_to_origin:
1739         bio->bi_bdev = s->origin->bdev;
1740
1741         if (bio_rw(bio) == WRITE) {
1742                 up_write(&s->lock);
1743                 return do_origin(s->origin, bio);
1744         }
1745
1746 out_unlock:
1747         up_write(&s->lock);
1748
1749         return r;
1750 }
1751
1752 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1753                            int error, union map_info *map_context)
1754 {
1755         struct dm_snapshot *s = ti->private;
1756         struct dm_snap_tracked_chunk *c = map_context->ptr;
1757
1758         if (c)
1759                 stop_tracking_chunk(s, c);
1760
1761         return 0;
1762 }
1763
1764 static void snapshot_merge_presuspend(struct dm_target *ti)
1765 {
1766         struct dm_snapshot *s = ti->private;
1767
1768         stop_merge(s);
1769 }
1770
1771 static int snapshot_preresume(struct dm_target *ti)
1772 {
1773         int r = 0;
1774         struct dm_snapshot *s = ti->private;
1775         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1776
1777         down_read(&_origins_lock);
1778         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1779         if (snap_src && snap_dest) {
1780                 down_read(&snap_src->lock);
1781                 if (s == snap_src) {
1782                         DMERR("Unable to resume snapshot source until "
1783                               "handover completes.");
1784                         r = -EINVAL;
1785                 } else if (!dm_suspended(snap_src->ti)) {
1786                         DMERR("Unable to perform snapshot handover until "
1787                               "source is suspended.");
1788                         r = -EINVAL;
1789                 }
1790                 up_read(&snap_src->lock);
1791         }
1792         up_read(&_origins_lock);
1793
1794         return r;
1795 }
1796
1797 static void snapshot_resume(struct dm_target *ti)
1798 {
1799         struct dm_snapshot *s = ti->private;
1800         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1801
1802         down_read(&_origins_lock);
1803         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1804         if (snap_src && snap_dest) {
1805                 down_write(&snap_src->lock);
1806                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1807                 __handover_exceptions(snap_src, snap_dest);
1808                 up_write(&snap_dest->lock);
1809                 up_write(&snap_src->lock);
1810         }
1811         up_read(&_origins_lock);
1812
1813         /* Now we have correct chunk size, reregister */
1814         reregister_snapshot(s);
1815
1816         down_write(&s->lock);
1817         s->active = 1;
1818         up_write(&s->lock);
1819 }
1820
1821 static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1822 {
1823         sector_t min_chunksize;
1824
1825         down_read(&_origins_lock);
1826         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1827         up_read(&_origins_lock);
1828
1829         return min_chunksize;
1830 }
1831
1832 static void snapshot_merge_resume(struct dm_target *ti)
1833 {
1834         struct dm_snapshot *s = ti->private;
1835
1836         /*
1837          * Handover exceptions from existing snapshot.
1838          */
1839         snapshot_resume(ti);
1840
1841         /*
1842          * snapshot-merge acts as an origin, so set ti->split_io
1843          */
1844         ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1845
1846         start_merge(s);
1847 }
1848
1849 static void snapshot_status(struct dm_target *ti, status_type_t type,
1850                             char *result, unsigned maxlen)
1851 {
1852         unsigned sz = 0;
1853         struct dm_snapshot *snap = ti->private;
1854
1855         switch (type) {
1856         case STATUSTYPE_INFO:
1857
1858                 down_write(&snap->lock);
1859
1860                 if (!snap->valid)
1861                         DMEMIT("Invalid");
1862                 else if (snap->merge_failed)
1863                         DMEMIT("Merge failed");
1864                 else {
1865                         if (snap->store->type->usage) {
1866                                 sector_t total_sectors, sectors_allocated,
1867                                          metadata_sectors;
1868                                 snap->store->type->usage(snap->store,
1869                                                          &total_sectors,
1870                                                          &sectors_allocated,
1871                                                          &metadata_sectors);
1872                                 DMEMIT("%llu/%llu %llu",
1873                                        (unsigned long long)sectors_allocated,
1874                                        (unsigned long long)total_sectors,
1875                                        (unsigned long long)metadata_sectors);
1876                         }
1877                         else
1878                                 DMEMIT("Unknown");
1879                 }
1880
1881                 up_write(&snap->lock);
1882
1883                 break;
1884
1885         case STATUSTYPE_TABLE:
1886                 /*
1887                  * kdevname returns a static pointer so we need
1888                  * to make private copies if the output is to
1889                  * make sense.
1890                  */
1891                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1892                 snap->store->type->status(snap->store, type, result + sz,
1893                                           maxlen - sz);
1894                 break;
1895         }
1896 }
1897
1898 static int snapshot_iterate_devices(struct dm_target *ti,
1899                                     iterate_devices_callout_fn fn, void *data)
1900 {
1901         struct dm_snapshot *snap = ti->private;
1902         int r;
1903
1904         r = fn(ti, snap->origin, 0, ti->len, data);
1905
1906         if (!r)
1907                 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1908
1909         return r;
1910 }
1911
1912
1913 /*-----------------------------------------------------------------
1914  * Origin methods
1915  *---------------------------------------------------------------*/
1916
1917 /*
1918  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1919  * supplied bio was ignored.  The caller may submit it immediately.
1920  * (No remapping actually occurs as the origin is always a direct linear
1921  * map.)
1922  *
1923  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1924  * and any supplied bio is added to a list to be submitted once all
1925  * the necessary exceptions exist.
1926  */
1927 static int __origin_write(struct list_head *snapshots, sector_t sector,
1928                           struct bio *bio)
1929 {
1930         int r = DM_MAPIO_REMAPPED;
1931         struct dm_snapshot *snap;
1932         struct dm_exception *e;
1933         struct dm_snap_pending_exception *pe;
1934         struct dm_snap_pending_exception *pe_to_start_now = NULL;
1935         struct dm_snap_pending_exception *pe_to_start_last = NULL;
1936         chunk_t chunk;
1937
1938         /* Do all the snapshots on this origin */
1939         list_for_each_entry (snap, snapshots, list) {
1940                 /*
1941                  * Don't make new exceptions in a merging snapshot
1942                  * because it has effectively been deleted
1943                  */
1944                 if (dm_target_is_snapshot_merge(snap->ti))
1945                         continue;
1946
1947                 down_write(&snap->lock);
1948
1949                 /* Only deal with valid and active snapshots */
1950                 if (!snap->valid || !snap->active)
1951                         goto next_snapshot;
1952
1953                 /* Nothing to do if writing beyond end of snapshot */
1954                 if (sector >= dm_table_get_size(snap->ti->table))
1955                         goto next_snapshot;
1956
1957                 /*
1958                  * Remember, different snapshots can have
1959                  * different chunk sizes.
1960                  */
1961                 chunk = sector_to_chunk(snap->store, sector);
1962
1963                 /*
1964                  * Check exception table to see if block
1965                  * is already remapped in this snapshot
1966                  * and trigger an exception if not.
1967                  */
1968                 e = dm_lookup_exception(&snap->complete, chunk);
1969                 if (e)
1970                         goto next_snapshot;
1971
1972                 pe = __lookup_pending_exception(snap, chunk);
1973                 if (!pe) {
1974                         up_write(&snap->lock);
1975                         pe = alloc_pending_exception(snap);
1976                         down_write(&snap->lock);
1977
1978                         if (!snap->valid) {
1979                                 free_pending_exception(pe);
1980                                 goto next_snapshot;
1981                         }
1982
1983                         e = dm_lookup_exception(&snap->complete, chunk);
1984                         if (e) {
1985                                 free_pending_exception(pe);
1986                                 goto next_snapshot;
1987                         }
1988
1989                         pe = __find_pending_exception(snap, pe, chunk);
1990                         if (!pe) {
1991                                 __invalidate_snapshot(snap, -ENOMEM);
1992                                 goto next_snapshot;
1993                         }
1994                 }
1995
1996                 r = DM_MAPIO_SUBMITTED;
1997
1998                 /*
1999                  * If an origin bio was supplied, queue it to wait for the
2000                  * completion of this exception, and start this one last,
2001                  * at the end of the function.
2002                  */
2003                 if (bio) {
2004                         bio_list_add(&pe->origin_bios, bio);
2005                         bio = NULL;
2006
2007                         if (!pe->started) {
2008                                 pe->started = 1;
2009                                 pe_to_start_last = pe;
2010                         }
2011                 }
2012
2013                 if (!pe->started) {
2014                         pe->started = 1;
2015                         pe_to_start_now = pe;
2016                 }
2017
2018 next_snapshot:
2019                 up_write(&snap->lock);
2020
2021                 if (pe_to_start_now) {
2022                         start_copy(pe_to_start_now);
2023                         pe_to_start_now = NULL;
2024                 }
2025         }
2026
2027         /*
2028          * Submit the exception against which the bio is queued last,
2029          * to give the other exceptions a head start.
2030          */
2031         if (pe_to_start_last)
2032                 start_copy(pe_to_start_last);
2033
2034         return r;
2035 }
2036
2037 /*
2038  * Called on a write from the origin driver.
2039  */
2040 static int do_origin(struct dm_dev *origin, struct bio *bio)
2041 {
2042         struct origin *o;
2043         int r = DM_MAPIO_REMAPPED;
2044
2045         down_read(&_origins_lock);
2046         o = __lookup_origin(origin->bdev);
2047         if (o)
2048                 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2049         up_read(&_origins_lock);
2050
2051         return r;
2052 }
2053
2054 /*
2055  * Trigger exceptions in all non-merging snapshots.
2056  *
2057  * The chunk size of the merging snapshot may be larger than the chunk
2058  * size of some other snapshot so we may need to reallocate multiple
2059  * chunks in other snapshots.
2060  *
2061  * We scan all the overlapping exceptions in the other snapshots.
2062  * Returns 1 if anything was reallocated and must be waited for,
2063  * otherwise returns 0.
2064  *
2065  * size must be a multiple of merging_snap's chunk_size.
2066  */
2067 static int origin_write_extent(struct dm_snapshot *merging_snap,
2068                                sector_t sector, unsigned size)
2069 {
2070         int must_wait = 0;
2071         sector_t n;
2072         struct origin *o;
2073
2074         /*
2075          * The origin's __minimum_chunk_size() got stored in split_io
2076          * by snapshot_merge_resume().
2077          */
2078         down_read(&_origins_lock);
2079         o = __lookup_origin(merging_snap->origin->bdev);
2080         for (n = 0; n < size; n += merging_snap->ti->split_io)
2081                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2082                     DM_MAPIO_SUBMITTED)
2083                         must_wait = 1;
2084         up_read(&_origins_lock);
2085
2086         return must_wait;
2087 }
2088
2089 /*
2090  * Origin: maps a linear range of a device, with hooks for snapshotting.
2091  */
2092
2093 /*
2094  * Construct an origin mapping: <dev_path>
2095  * The context for an origin is merely a 'struct dm_dev *'
2096  * pointing to the real device.
2097  */
2098 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2099 {
2100         int r;
2101         struct dm_dev *dev;
2102
2103         if (argc != 1) {
2104                 ti->error = "origin: incorrect number of arguments";
2105                 return -EINVAL;
2106         }
2107
2108         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2109         if (r) {
2110                 ti->error = "Cannot get target device";
2111                 return r;
2112         }
2113
2114         ti->private = dev;
2115         ti->num_flush_requests = 1;
2116
2117         return 0;
2118 }
2119
2120 static void origin_dtr(struct dm_target *ti)
2121 {
2122         struct dm_dev *dev = ti->private;
2123         dm_put_device(ti, dev);
2124 }
2125
2126 static int origin_map(struct dm_target *ti, struct bio *bio,
2127                       union map_info *map_context)
2128 {
2129         struct dm_dev *dev = ti->private;
2130         bio->bi_bdev = dev->bdev;
2131
2132         if (bio->bi_rw & REQ_FLUSH)
2133                 return DM_MAPIO_REMAPPED;
2134
2135         /* Only tell snapshots if this is a write */
2136         return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2137 }
2138
2139 /*
2140  * Set the target "split_io" field to the minimum of all the snapshots'
2141  * chunk sizes.
2142  */
2143 static void origin_resume(struct dm_target *ti)
2144 {
2145         struct dm_dev *dev = ti->private;
2146
2147         ti->split_io = get_origin_minimum_chunksize(dev->bdev);
2148 }
2149
2150 static void origin_status(struct dm_target *ti, status_type_t type,
2151                           char *result, unsigned maxlen)
2152 {
2153         struct dm_dev *dev = ti->private;
2154
2155         switch (type) {
2156         case STATUSTYPE_INFO:
2157                 result[0] = '\0';
2158                 break;
2159
2160         case STATUSTYPE_TABLE:
2161                 snprintf(result, maxlen, "%s", dev->name);
2162                 break;
2163         }
2164 }
2165
2166 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2167                         struct bio_vec *biovec, int max_size)
2168 {
2169         struct dm_dev *dev = ti->private;
2170         struct request_queue *q = bdev_get_queue(dev->bdev);
2171
2172         if (!q->merge_bvec_fn)
2173                 return max_size;
2174
2175         bvm->bi_bdev = dev->bdev;
2176         bvm->bi_sector = bvm->bi_sector;
2177
2178         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2179 }
2180
2181 static int origin_iterate_devices(struct dm_target *ti,
2182                                   iterate_devices_callout_fn fn, void *data)
2183 {
2184         struct dm_dev *dev = ti->private;
2185
2186         return fn(ti, dev, 0, ti->len, data);
2187 }
2188
2189 static struct target_type origin_target = {
2190         .name    = "snapshot-origin",
2191         .version = {1, 7, 2},
2192         .module  = THIS_MODULE,
2193         .ctr     = origin_ctr,
2194         .dtr     = origin_dtr,
2195         .map     = origin_map,
2196         .resume  = origin_resume,
2197         .status  = origin_status,
2198         .merge   = origin_merge,
2199         .iterate_devices = origin_iterate_devices,
2200 };
2201
2202 static struct target_type snapshot_target = {
2203         .name    = "snapshot",
2204         .version = {1, 10, 1},
2205         .module  = THIS_MODULE,
2206         .ctr     = snapshot_ctr,
2207         .dtr     = snapshot_dtr,
2208         .map     = snapshot_map,
2209         .end_io  = snapshot_end_io,
2210         .preresume  = snapshot_preresume,
2211         .resume  = snapshot_resume,
2212         .status  = snapshot_status,
2213         .iterate_devices = snapshot_iterate_devices,
2214 };
2215
2216 static struct target_type merge_target = {
2217         .name    = dm_snapshot_merge_target_name,
2218         .version = {1, 1, 0},
2219         .module  = THIS_MODULE,
2220         .ctr     = snapshot_ctr,
2221         .dtr     = snapshot_dtr,
2222         .map     = snapshot_merge_map,
2223         .end_io  = snapshot_end_io,
2224         .presuspend = snapshot_merge_presuspend,
2225         .preresume  = snapshot_preresume,
2226         .resume  = snapshot_merge_resume,
2227         .status  = snapshot_status,
2228         .iterate_devices = snapshot_iterate_devices,
2229 };
2230
2231 static int __init dm_snapshot_init(void)
2232 {
2233         int r;
2234
2235         r = dm_exception_store_init();
2236         if (r) {
2237                 DMERR("Failed to initialize exception stores");
2238                 return r;
2239         }
2240
2241         r = dm_register_target(&snapshot_target);
2242         if (r < 0) {
2243                 DMERR("snapshot target register failed %d", r);
2244                 goto bad_register_snapshot_target;
2245         }
2246
2247         r = dm_register_target(&origin_target);
2248         if (r < 0) {
2249                 DMERR("Origin target register failed %d", r);
2250                 goto bad_register_origin_target;
2251         }
2252
2253         r = dm_register_target(&merge_target);
2254         if (r < 0) {
2255                 DMERR("Merge target register failed %d", r);
2256                 goto bad_register_merge_target;
2257         }
2258
2259         r = init_origin_hash();
2260         if (r) {
2261                 DMERR("init_origin_hash failed.");
2262                 goto bad_origin_hash;
2263         }
2264
2265         exception_cache = KMEM_CACHE(dm_exception, 0);
2266         if (!exception_cache) {
2267                 DMERR("Couldn't create exception cache.");
2268                 r = -ENOMEM;
2269                 goto bad_exception_cache;
2270         }
2271
2272         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2273         if (!pending_cache) {
2274                 DMERR("Couldn't create pending cache.");
2275                 r = -ENOMEM;
2276                 goto bad_pending_cache;
2277         }
2278
2279         tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2280         if (!tracked_chunk_cache) {
2281                 DMERR("Couldn't create cache to track chunks in use.");
2282                 r = -ENOMEM;
2283                 goto bad_tracked_chunk_cache;
2284         }
2285
2286         return 0;
2287
2288 bad_tracked_chunk_cache:
2289         kmem_cache_destroy(pending_cache);
2290 bad_pending_cache:
2291         kmem_cache_destroy(exception_cache);
2292 bad_exception_cache:
2293         exit_origin_hash();
2294 bad_origin_hash:
2295         dm_unregister_target(&merge_target);
2296 bad_register_merge_target:
2297         dm_unregister_target(&origin_target);
2298 bad_register_origin_target:
2299         dm_unregister_target(&snapshot_target);
2300 bad_register_snapshot_target:
2301         dm_exception_store_exit();
2302
2303         return r;
2304 }
2305
2306 static void __exit dm_snapshot_exit(void)
2307 {
2308         dm_unregister_target(&snapshot_target);
2309         dm_unregister_target(&origin_target);
2310         dm_unregister_target(&merge_target);
2311
2312         exit_origin_hash();
2313         kmem_cache_destroy(pending_cache);
2314         kmem_cache_destroy(exception_cache);
2315         kmem_cache_destroy(tracked_chunk_cache);
2316
2317         dm_exception_store_exit();
2318 }
2319
2320 /* Module hooks */
2321 module_init(dm_snapshot_init);
2322 module_exit(dm_snapshot_exit);
2323
2324 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2325 MODULE_AUTHOR("Joe Thornber");
2326 MODULE_LICENSE("GPL");
2327 MODULE_ALIAS("dm-snapshot-origin");
2328 MODULE_ALIAS("dm-snapshot-merge");