Merge branch 'sched/core-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederi...
[pandora-kernel.git] / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22
23 #include "dm-exception-store.h"
24
25 #define DM_MSG_PREFIX "snapshots"
26
27 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
28
29 #define dm_target_is_snapshot_merge(ti) \
30         ((ti)->type->name == dm_snapshot_merge_target_name)
31
32 /*
33  * The percentage increment we will wake up users at
34  */
35 #define WAKE_UP_PERCENT 5
36
37 /*
38  * kcopyd priority of snapshot operations
39  */
40 #define SNAPSHOT_COPY_PRIORITY 2
41
42 /*
43  * The size of the mempool used to track chunks in use.
44  */
45 #define MIN_IOS 256
46
47 #define DM_TRACKED_CHUNK_HASH_SIZE      16
48 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
49                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
50
51 struct dm_exception_table {
52         uint32_t hash_mask;
53         unsigned hash_shift;
54         struct list_head *table;
55 };
56
57 struct dm_snapshot {
58         struct rw_semaphore lock;
59
60         struct dm_dev *origin;
61         struct dm_dev *cow;
62
63         struct dm_target *ti;
64
65         /* List of snapshots per Origin */
66         struct list_head list;
67
68         /*
69          * You can't use a snapshot if this is 0 (e.g. if full).
70          * A snapshot-merge target never clears this.
71          */
72         int valid;
73
74         /* Origin writes don't trigger exceptions until this is set */
75         int active;
76
77         atomic_t pending_exceptions_count;
78
79         mempool_t *pending_pool;
80
81         struct dm_exception_table pending;
82         struct dm_exception_table complete;
83
84         /*
85          * pe_lock protects all pending_exception operations and access
86          * as well as the snapshot_bios list.
87          */
88         spinlock_t pe_lock;
89
90         /* Chunks with outstanding reads */
91         spinlock_t tracked_chunk_lock;
92         mempool_t *tracked_chunk_pool;
93         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
94
95         /* The on disk metadata handler */
96         struct dm_exception_store *store;
97
98         struct dm_kcopyd_client *kcopyd_client;
99
100         /* Wait for events based on state_bits */
101         unsigned long state_bits;
102
103         /* Range of chunks currently being merged. */
104         chunk_t first_merging_chunk;
105         int num_merging_chunks;
106
107         /*
108          * The merge operation failed if this flag is set.
109          * Failure modes are handled as follows:
110          * - I/O error reading the header
111          *      => don't load the target; abort.
112          * - Header does not have "valid" flag set
113          *      => use the origin; forget about the snapshot.
114          * - I/O error when reading exceptions
115          *      => don't load the target; abort.
116          *         (We can't use the intermediate origin state.)
117          * - I/O error while merging
118          *      => stop merging; set merge_failed; process I/O normally.
119          */
120         int merge_failed;
121
122         /*
123          * Incoming bios that overlap with chunks being merged must wait
124          * for them to be committed.
125          */
126         struct bio_list bios_queued_during_merge;
127 };
128
129 /*
130  * state_bits:
131  *   RUNNING_MERGE  - Merge operation is in progress.
132  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
133  *                    cleared afterwards.
134  */
135 #define RUNNING_MERGE          0
136 #define SHUTDOWN_MERGE         1
137
138 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
139 {
140         return s->origin;
141 }
142 EXPORT_SYMBOL(dm_snap_origin);
143
144 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
145 {
146         return s->cow;
147 }
148 EXPORT_SYMBOL(dm_snap_cow);
149
150 static sector_t chunk_to_sector(struct dm_exception_store *store,
151                                 chunk_t chunk)
152 {
153         return chunk << store->chunk_shift;
154 }
155
156 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
157 {
158         /*
159          * There is only ever one instance of a particular block
160          * device so we can compare pointers safely.
161          */
162         return lhs == rhs;
163 }
164
165 struct dm_snap_pending_exception {
166         struct dm_exception e;
167
168         /*
169          * Origin buffers waiting for this to complete are held
170          * in a bio list
171          */
172         struct bio_list origin_bios;
173         struct bio_list snapshot_bios;
174
175         /* Pointer back to snapshot context */
176         struct dm_snapshot *snap;
177
178         /*
179          * 1 indicates the exception has already been sent to
180          * kcopyd.
181          */
182         int started;
183 };
184
185 /*
186  * Hash table mapping origin volumes to lists of snapshots and
187  * a lock to protect it
188  */
189 static struct kmem_cache *exception_cache;
190 static struct kmem_cache *pending_cache;
191
192 struct dm_snap_tracked_chunk {
193         struct hlist_node node;
194         chunk_t chunk;
195 };
196
197 static struct kmem_cache *tracked_chunk_cache;
198
199 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
200                                                  chunk_t chunk)
201 {
202         struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
203                                                         GFP_NOIO);
204         unsigned long flags;
205
206         c->chunk = chunk;
207
208         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
209         hlist_add_head(&c->node,
210                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
211         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
212
213         return c;
214 }
215
216 static void stop_tracking_chunk(struct dm_snapshot *s,
217                                 struct dm_snap_tracked_chunk *c)
218 {
219         unsigned long flags;
220
221         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
222         hlist_del(&c->node);
223         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
224
225         mempool_free(c, s->tracked_chunk_pool);
226 }
227
228 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
229 {
230         struct dm_snap_tracked_chunk *c;
231         struct hlist_node *hn;
232         int found = 0;
233
234         spin_lock_irq(&s->tracked_chunk_lock);
235
236         hlist_for_each_entry(c, hn,
237             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
238                 if (c->chunk == chunk) {
239                         found = 1;
240                         break;
241                 }
242         }
243
244         spin_unlock_irq(&s->tracked_chunk_lock);
245
246         return found;
247 }
248
249 /*
250  * This conflicting I/O is extremely improbable in the caller,
251  * so msleep(1) is sufficient and there is no need for a wait queue.
252  */
253 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
254 {
255         while (__chunk_is_tracked(s, chunk))
256                 msleep(1);
257 }
258
259 /*
260  * One of these per registered origin, held in the snapshot_origins hash
261  */
262 struct origin {
263         /* The origin device */
264         struct block_device *bdev;
265
266         struct list_head hash_list;
267
268         /* List of snapshots for this origin */
269         struct list_head snapshots;
270 };
271
272 /*
273  * Size of the hash table for origin volumes. If we make this
274  * the size of the minors list then it should be nearly perfect
275  */
276 #define ORIGIN_HASH_SIZE 256
277 #define ORIGIN_MASK      0xFF
278 static struct list_head *_origins;
279 static struct rw_semaphore _origins_lock;
280
281 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
282 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
283 static uint64_t _pending_exceptions_done_count;
284
285 static int init_origin_hash(void)
286 {
287         int i;
288
289         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
290                            GFP_KERNEL);
291         if (!_origins) {
292                 DMERR("unable to allocate memory");
293                 return -ENOMEM;
294         }
295
296         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
297                 INIT_LIST_HEAD(_origins + i);
298         init_rwsem(&_origins_lock);
299
300         return 0;
301 }
302
303 static void exit_origin_hash(void)
304 {
305         kfree(_origins);
306 }
307
308 static unsigned origin_hash(struct block_device *bdev)
309 {
310         return bdev->bd_dev & ORIGIN_MASK;
311 }
312
313 static struct origin *__lookup_origin(struct block_device *origin)
314 {
315         struct list_head *ol;
316         struct origin *o;
317
318         ol = &_origins[origin_hash(origin)];
319         list_for_each_entry (o, ol, hash_list)
320                 if (bdev_equal(o->bdev, origin))
321                         return o;
322
323         return NULL;
324 }
325
326 static void __insert_origin(struct origin *o)
327 {
328         struct list_head *sl = &_origins[origin_hash(o->bdev)];
329         list_add_tail(&o->hash_list, sl);
330 }
331
332 /*
333  * _origins_lock must be held when calling this function.
334  * Returns number of snapshots registered using the supplied cow device, plus:
335  * snap_src - a snapshot suitable for use as a source of exception handover
336  * snap_dest - a snapshot capable of receiving exception handover.
337  * snap_merge - an existing snapshot-merge target linked to the same origin.
338  *   There can be at most one snapshot-merge target. The parameter is optional.
339  *
340  * Possible return values and states of snap_src and snap_dest.
341  *   0: NULL, NULL  - first new snapshot
342  *   1: snap_src, NULL - normal snapshot
343  *   2: snap_src, snap_dest  - waiting for handover
344  *   2: snap_src, NULL - handed over, waiting for old to be deleted
345  *   1: NULL, snap_dest - source got destroyed without handover
346  */
347 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
348                                         struct dm_snapshot **snap_src,
349                                         struct dm_snapshot **snap_dest,
350                                         struct dm_snapshot **snap_merge)
351 {
352         struct dm_snapshot *s;
353         struct origin *o;
354         int count = 0;
355         int active;
356
357         o = __lookup_origin(snap->origin->bdev);
358         if (!o)
359                 goto out;
360
361         list_for_each_entry(s, &o->snapshots, list) {
362                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
363                         *snap_merge = s;
364                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
365                         continue;
366
367                 down_read(&s->lock);
368                 active = s->active;
369                 up_read(&s->lock);
370
371                 if (active) {
372                         if (snap_src)
373                                 *snap_src = s;
374                 } else if (snap_dest)
375                         *snap_dest = s;
376
377                 count++;
378         }
379
380 out:
381         return count;
382 }
383
384 /*
385  * On success, returns 1 if this snapshot is a handover destination,
386  * otherwise returns 0.
387  */
388 static int __validate_exception_handover(struct dm_snapshot *snap)
389 {
390         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
391         struct dm_snapshot *snap_merge = NULL;
392
393         /* Does snapshot need exceptions handed over to it? */
394         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
395                                           &snap_merge) == 2) ||
396             snap_dest) {
397                 snap->ti->error = "Snapshot cow pairing for exception "
398                                   "table handover failed";
399                 return -EINVAL;
400         }
401
402         /*
403          * If no snap_src was found, snap cannot become a handover
404          * destination.
405          */
406         if (!snap_src)
407                 return 0;
408
409         /*
410          * Non-snapshot-merge handover?
411          */
412         if (!dm_target_is_snapshot_merge(snap->ti))
413                 return 1;
414
415         /*
416          * Do not allow more than one merging snapshot.
417          */
418         if (snap_merge) {
419                 snap->ti->error = "A snapshot is already merging.";
420                 return -EINVAL;
421         }
422
423         if (!snap_src->store->type->prepare_merge ||
424             !snap_src->store->type->commit_merge) {
425                 snap->ti->error = "Snapshot exception store does not "
426                                   "support snapshot-merge.";
427                 return -EINVAL;
428         }
429
430         return 1;
431 }
432
433 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
434 {
435         struct dm_snapshot *l;
436
437         /* Sort the list according to chunk size, largest-first smallest-last */
438         list_for_each_entry(l, &o->snapshots, list)
439                 if (l->store->chunk_size < s->store->chunk_size)
440                         break;
441         list_add_tail(&s->list, &l->list);
442 }
443
444 /*
445  * Make a note of the snapshot and its origin so we can look it
446  * up when the origin has a write on it.
447  *
448  * Also validate snapshot exception store handovers.
449  * On success, returns 1 if this registration is a handover destination,
450  * otherwise returns 0.
451  */
452 static int register_snapshot(struct dm_snapshot *snap)
453 {
454         struct origin *o, *new_o = NULL;
455         struct block_device *bdev = snap->origin->bdev;
456         int r = 0;
457
458         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
459         if (!new_o)
460                 return -ENOMEM;
461
462         down_write(&_origins_lock);
463
464         r = __validate_exception_handover(snap);
465         if (r < 0) {
466                 kfree(new_o);
467                 goto out;
468         }
469
470         o = __lookup_origin(bdev);
471         if (o)
472                 kfree(new_o);
473         else {
474                 /* New origin */
475                 o = new_o;
476
477                 /* Initialise the struct */
478                 INIT_LIST_HEAD(&o->snapshots);
479                 o->bdev = bdev;
480
481                 __insert_origin(o);
482         }
483
484         __insert_snapshot(o, snap);
485
486 out:
487         up_write(&_origins_lock);
488
489         return r;
490 }
491
492 /*
493  * Move snapshot to correct place in list according to chunk size.
494  */
495 static void reregister_snapshot(struct dm_snapshot *s)
496 {
497         struct block_device *bdev = s->origin->bdev;
498
499         down_write(&_origins_lock);
500
501         list_del(&s->list);
502         __insert_snapshot(__lookup_origin(bdev), s);
503
504         up_write(&_origins_lock);
505 }
506
507 static void unregister_snapshot(struct dm_snapshot *s)
508 {
509         struct origin *o;
510
511         down_write(&_origins_lock);
512         o = __lookup_origin(s->origin->bdev);
513
514         list_del(&s->list);
515         if (o && list_empty(&o->snapshots)) {
516                 list_del(&o->hash_list);
517                 kfree(o);
518         }
519
520         up_write(&_origins_lock);
521 }
522
523 /*
524  * Implementation of the exception hash tables.
525  * The lowest hash_shift bits of the chunk number are ignored, allowing
526  * some consecutive chunks to be grouped together.
527  */
528 static int dm_exception_table_init(struct dm_exception_table *et,
529                                    uint32_t size, unsigned hash_shift)
530 {
531         unsigned int i;
532
533         et->hash_shift = hash_shift;
534         et->hash_mask = size - 1;
535         et->table = dm_vcalloc(size, sizeof(struct list_head));
536         if (!et->table)
537                 return -ENOMEM;
538
539         for (i = 0; i < size; i++)
540                 INIT_LIST_HEAD(et->table + i);
541
542         return 0;
543 }
544
545 static void dm_exception_table_exit(struct dm_exception_table *et,
546                                     struct kmem_cache *mem)
547 {
548         struct list_head *slot;
549         struct dm_exception *ex, *next;
550         int i, size;
551
552         size = et->hash_mask + 1;
553         for (i = 0; i < size; i++) {
554                 slot = et->table + i;
555
556                 list_for_each_entry_safe (ex, next, slot, hash_list)
557                         kmem_cache_free(mem, ex);
558         }
559
560         vfree(et->table);
561 }
562
563 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
564 {
565         return (chunk >> et->hash_shift) & et->hash_mask;
566 }
567
568 static void dm_remove_exception(struct dm_exception *e)
569 {
570         list_del(&e->hash_list);
571 }
572
573 /*
574  * Return the exception data for a sector, or NULL if not
575  * remapped.
576  */
577 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
578                                                 chunk_t chunk)
579 {
580         struct list_head *slot;
581         struct dm_exception *e;
582
583         slot = &et->table[exception_hash(et, chunk)];
584         list_for_each_entry (e, slot, hash_list)
585                 if (chunk >= e->old_chunk &&
586                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
587                         return e;
588
589         return NULL;
590 }
591
592 static struct dm_exception *alloc_completed_exception(void)
593 {
594         struct dm_exception *e;
595
596         e = kmem_cache_alloc(exception_cache, GFP_NOIO);
597         if (!e)
598                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
599
600         return e;
601 }
602
603 static void free_completed_exception(struct dm_exception *e)
604 {
605         kmem_cache_free(exception_cache, e);
606 }
607
608 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
609 {
610         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
611                                                              GFP_NOIO);
612
613         atomic_inc(&s->pending_exceptions_count);
614         pe->snap = s;
615
616         return pe;
617 }
618
619 static void free_pending_exception(struct dm_snap_pending_exception *pe)
620 {
621         struct dm_snapshot *s = pe->snap;
622
623         mempool_free(pe, s->pending_pool);
624         smp_mb__before_atomic_dec();
625         atomic_dec(&s->pending_exceptions_count);
626 }
627
628 static void dm_insert_exception(struct dm_exception_table *eh,
629                                 struct dm_exception *new_e)
630 {
631         struct list_head *l;
632         struct dm_exception *e = NULL;
633
634         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
635
636         /* Add immediately if this table doesn't support consecutive chunks */
637         if (!eh->hash_shift)
638                 goto out;
639
640         /* List is ordered by old_chunk */
641         list_for_each_entry_reverse(e, l, hash_list) {
642                 /* Insert after an existing chunk? */
643                 if (new_e->old_chunk == (e->old_chunk +
644                                          dm_consecutive_chunk_count(e) + 1) &&
645                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
646                                          dm_consecutive_chunk_count(e) + 1)) {
647                         dm_consecutive_chunk_count_inc(e);
648                         free_completed_exception(new_e);
649                         return;
650                 }
651
652                 /* Insert before an existing chunk? */
653                 if (new_e->old_chunk == (e->old_chunk - 1) &&
654                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
655                         dm_consecutive_chunk_count_inc(e);
656                         e->old_chunk--;
657                         e->new_chunk--;
658                         free_completed_exception(new_e);
659                         return;
660                 }
661
662                 if (new_e->old_chunk > e->old_chunk)
663                         break;
664         }
665
666 out:
667         list_add(&new_e->hash_list, e ? &e->hash_list : l);
668 }
669
670 /*
671  * Callback used by the exception stores to load exceptions when
672  * initialising.
673  */
674 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
675 {
676         struct dm_snapshot *s = context;
677         struct dm_exception *e;
678
679         e = alloc_completed_exception();
680         if (!e)
681                 return -ENOMEM;
682
683         e->old_chunk = old;
684
685         /* Consecutive_count is implicitly initialised to zero */
686         e->new_chunk = new;
687
688         dm_insert_exception(&s->complete, e);
689
690         return 0;
691 }
692
693 /*
694  * Return a minimum chunk size of all snapshots that have the specified origin.
695  * Return zero if the origin has no snapshots.
696  */
697 static sector_t __minimum_chunk_size(struct origin *o)
698 {
699         struct dm_snapshot *snap;
700         unsigned chunk_size = 0;
701
702         if (o)
703                 list_for_each_entry(snap, &o->snapshots, list)
704                         chunk_size = min_not_zero(chunk_size,
705                                                   snap->store->chunk_size);
706
707         return chunk_size;
708 }
709
710 /*
711  * Hard coded magic.
712  */
713 static int calc_max_buckets(void)
714 {
715         /* use a fixed size of 2MB */
716         unsigned long mem = 2 * 1024 * 1024;
717         mem /= sizeof(struct list_head);
718
719         return mem;
720 }
721
722 /*
723  * Allocate room for a suitable hash table.
724  */
725 static int init_hash_tables(struct dm_snapshot *s)
726 {
727         sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
728
729         /*
730          * Calculate based on the size of the original volume or
731          * the COW volume...
732          */
733         cow_dev_size = get_dev_size(s->cow->bdev);
734         origin_dev_size = get_dev_size(s->origin->bdev);
735         max_buckets = calc_max_buckets();
736
737         hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
738         hash_size = min(hash_size, max_buckets);
739
740         if (hash_size < 64)
741                 hash_size = 64;
742         hash_size = rounddown_pow_of_two(hash_size);
743         if (dm_exception_table_init(&s->complete, hash_size,
744                                     DM_CHUNK_CONSECUTIVE_BITS))
745                 return -ENOMEM;
746
747         /*
748          * Allocate hash table for in-flight exceptions
749          * Make this smaller than the real hash table
750          */
751         hash_size >>= 3;
752         if (hash_size < 64)
753                 hash_size = 64;
754
755         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
756                 dm_exception_table_exit(&s->complete, exception_cache);
757                 return -ENOMEM;
758         }
759
760         return 0;
761 }
762
763 static void merge_shutdown(struct dm_snapshot *s)
764 {
765         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
766         smp_mb__after_clear_bit();
767         wake_up_bit(&s->state_bits, RUNNING_MERGE);
768 }
769
770 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
771 {
772         s->first_merging_chunk = 0;
773         s->num_merging_chunks = 0;
774
775         return bio_list_get(&s->bios_queued_during_merge);
776 }
777
778 /*
779  * Remove one chunk from the index of completed exceptions.
780  */
781 static int __remove_single_exception_chunk(struct dm_snapshot *s,
782                                            chunk_t old_chunk)
783 {
784         struct dm_exception *e;
785
786         e = dm_lookup_exception(&s->complete, old_chunk);
787         if (!e) {
788                 DMERR("Corruption detected: exception for block %llu is "
789                       "on disk but not in memory",
790                       (unsigned long long)old_chunk);
791                 return -EINVAL;
792         }
793
794         /*
795          * If this is the only chunk using this exception, remove exception.
796          */
797         if (!dm_consecutive_chunk_count(e)) {
798                 dm_remove_exception(e);
799                 free_completed_exception(e);
800                 return 0;
801         }
802
803         /*
804          * The chunk may be either at the beginning or the end of a
805          * group of consecutive chunks - never in the middle.  We are
806          * removing chunks in the opposite order to that in which they
807          * were added, so this should always be true.
808          * Decrement the consecutive chunk counter and adjust the
809          * starting point if necessary.
810          */
811         if (old_chunk == e->old_chunk) {
812                 e->old_chunk++;
813                 e->new_chunk++;
814         } else if (old_chunk != e->old_chunk +
815                    dm_consecutive_chunk_count(e)) {
816                 DMERR("Attempt to merge block %llu from the "
817                       "middle of a chunk range [%llu - %llu]",
818                       (unsigned long long)old_chunk,
819                       (unsigned long long)e->old_chunk,
820                       (unsigned long long)
821                       e->old_chunk + dm_consecutive_chunk_count(e));
822                 return -EINVAL;
823         }
824
825         dm_consecutive_chunk_count_dec(e);
826
827         return 0;
828 }
829
830 static void flush_bios(struct bio *bio);
831
832 static int remove_single_exception_chunk(struct dm_snapshot *s)
833 {
834         struct bio *b = NULL;
835         int r;
836         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
837
838         down_write(&s->lock);
839
840         /*
841          * Process chunks (and associated exceptions) in reverse order
842          * so that dm_consecutive_chunk_count_dec() accounting works.
843          */
844         do {
845                 r = __remove_single_exception_chunk(s, old_chunk);
846                 if (r)
847                         goto out;
848         } while (old_chunk-- > s->first_merging_chunk);
849
850         b = __release_queued_bios_after_merge(s);
851
852 out:
853         up_write(&s->lock);
854         if (b)
855                 flush_bios(b);
856
857         return r;
858 }
859
860 static int origin_write_extent(struct dm_snapshot *merging_snap,
861                                sector_t sector, unsigned chunk_size);
862
863 static void merge_callback(int read_err, unsigned long write_err,
864                            void *context);
865
866 static uint64_t read_pending_exceptions_done_count(void)
867 {
868         uint64_t pending_exceptions_done;
869
870         spin_lock(&_pending_exceptions_done_spinlock);
871         pending_exceptions_done = _pending_exceptions_done_count;
872         spin_unlock(&_pending_exceptions_done_spinlock);
873
874         return pending_exceptions_done;
875 }
876
877 static void increment_pending_exceptions_done_count(void)
878 {
879         spin_lock(&_pending_exceptions_done_spinlock);
880         _pending_exceptions_done_count++;
881         spin_unlock(&_pending_exceptions_done_spinlock);
882
883         wake_up_all(&_pending_exceptions_done);
884 }
885
886 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
887 {
888         int i, linear_chunks;
889         chunk_t old_chunk, new_chunk;
890         struct dm_io_region src, dest;
891         sector_t io_size;
892         uint64_t previous_count;
893
894         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
895         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
896                 goto shut;
897
898         /*
899          * valid flag never changes during merge, so no lock required.
900          */
901         if (!s->valid) {
902                 DMERR("Snapshot is invalid: can't merge");
903                 goto shut;
904         }
905
906         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
907                                                       &new_chunk);
908         if (linear_chunks <= 0) {
909                 if (linear_chunks < 0) {
910                         DMERR("Read error in exception store: "
911                               "shutting down merge");
912                         down_write(&s->lock);
913                         s->merge_failed = 1;
914                         up_write(&s->lock);
915                 }
916                 goto shut;
917         }
918
919         /* Adjust old_chunk and new_chunk to reflect start of linear region */
920         old_chunk = old_chunk + 1 - linear_chunks;
921         new_chunk = new_chunk + 1 - linear_chunks;
922
923         /*
924          * Use one (potentially large) I/O to copy all 'linear_chunks'
925          * from the exception store to the origin
926          */
927         io_size = linear_chunks * s->store->chunk_size;
928
929         dest.bdev = s->origin->bdev;
930         dest.sector = chunk_to_sector(s->store, old_chunk);
931         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
932
933         src.bdev = s->cow->bdev;
934         src.sector = chunk_to_sector(s->store, new_chunk);
935         src.count = dest.count;
936
937         /*
938          * Reallocate any exceptions needed in other snapshots then
939          * wait for the pending exceptions to complete.
940          * Each time any pending exception (globally on the system)
941          * completes we are woken and repeat the process to find out
942          * if we can proceed.  While this may not seem a particularly
943          * efficient algorithm, it is not expected to have any
944          * significant impact on performance.
945          */
946         previous_count = read_pending_exceptions_done_count();
947         while (origin_write_extent(s, dest.sector, io_size)) {
948                 wait_event(_pending_exceptions_done,
949                            (read_pending_exceptions_done_count() !=
950                             previous_count));
951                 /* Retry after the wait, until all exceptions are done. */
952                 previous_count = read_pending_exceptions_done_count();
953         }
954
955         down_write(&s->lock);
956         s->first_merging_chunk = old_chunk;
957         s->num_merging_chunks = linear_chunks;
958         up_write(&s->lock);
959
960         /* Wait until writes to all 'linear_chunks' drain */
961         for (i = 0; i < linear_chunks; i++)
962                 __check_for_conflicting_io(s, old_chunk + i);
963
964         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
965         return;
966
967 shut:
968         merge_shutdown(s);
969 }
970
971 static void error_bios(struct bio *bio);
972
973 static void merge_callback(int read_err, unsigned long write_err, void *context)
974 {
975         struct dm_snapshot *s = context;
976         struct bio *b = NULL;
977
978         if (read_err || write_err) {
979                 if (read_err)
980                         DMERR("Read error: shutting down merge.");
981                 else
982                         DMERR("Write error: shutting down merge.");
983                 goto shut;
984         }
985
986         if (s->store->type->commit_merge(s->store,
987                                          s->num_merging_chunks) < 0) {
988                 DMERR("Write error in exception store: shutting down merge");
989                 goto shut;
990         }
991
992         if (remove_single_exception_chunk(s) < 0)
993                 goto shut;
994
995         snapshot_merge_next_chunks(s);
996
997         return;
998
999 shut:
1000         down_write(&s->lock);
1001         s->merge_failed = 1;
1002         b = __release_queued_bios_after_merge(s);
1003         up_write(&s->lock);
1004         error_bios(b);
1005
1006         merge_shutdown(s);
1007 }
1008
1009 static void start_merge(struct dm_snapshot *s)
1010 {
1011         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1012                 snapshot_merge_next_chunks(s);
1013 }
1014
1015 static int wait_schedule(void *ptr)
1016 {
1017         schedule();
1018
1019         return 0;
1020 }
1021
1022 /*
1023  * Stop the merging process and wait until it finishes.
1024  */
1025 static void stop_merge(struct dm_snapshot *s)
1026 {
1027         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1028         wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1029                     TASK_UNINTERRUPTIBLE);
1030         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1031 }
1032
1033 /*
1034  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1035  */
1036 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1037 {
1038         struct dm_snapshot *s;
1039         int i;
1040         int r = -EINVAL;
1041         char *origin_path, *cow_path;
1042         unsigned args_used, num_flush_requests = 1;
1043         fmode_t origin_mode = FMODE_READ;
1044
1045         if (argc != 4) {
1046                 ti->error = "requires exactly 4 arguments";
1047                 r = -EINVAL;
1048                 goto bad;
1049         }
1050
1051         if (dm_target_is_snapshot_merge(ti)) {
1052                 num_flush_requests = 2;
1053                 origin_mode = FMODE_WRITE;
1054         }
1055
1056         s = kmalloc(sizeof(*s), GFP_KERNEL);
1057         if (!s) {
1058                 ti->error = "Cannot allocate snapshot context private "
1059                     "structure";
1060                 r = -ENOMEM;
1061                 goto bad;
1062         }
1063
1064         origin_path = argv[0];
1065         argv++;
1066         argc--;
1067
1068         r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1069         if (r) {
1070                 ti->error = "Cannot get origin device";
1071                 goto bad_origin;
1072         }
1073
1074         cow_path = argv[0];
1075         argv++;
1076         argc--;
1077
1078         r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1079         if (r) {
1080                 ti->error = "Cannot get COW device";
1081                 goto bad_cow;
1082         }
1083
1084         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1085         if (r) {
1086                 ti->error = "Couldn't create exception store";
1087                 r = -EINVAL;
1088                 goto bad_store;
1089         }
1090
1091         argv += args_used;
1092         argc -= args_used;
1093
1094         s->ti = ti;
1095         s->valid = 1;
1096         s->active = 0;
1097         atomic_set(&s->pending_exceptions_count, 0);
1098         init_rwsem(&s->lock);
1099         INIT_LIST_HEAD(&s->list);
1100         spin_lock_init(&s->pe_lock);
1101         s->state_bits = 0;
1102         s->merge_failed = 0;
1103         s->first_merging_chunk = 0;
1104         s->num_merging_chunks = 0;
1105         bio_list_init(&s->bios_queued_during_merge);
1106
1107         /* Allocate hash table for COW data */
1108         if (init_hash_tables(s)) {
1109                 ti->error = "Unable to allocate hash table space";
1110                 r = -ENOMEM;
1111                 goto bad_hash_tables;
1112         }
1113
1114         s->kcopyd_client = dm_kcopyd_client_create();
1115         if (IS_ERR(s->kcopyd_client)) {
1116                 r = PTR_ERR(s->kcopyd_client);
1117                 ti->error = "Could not create kcopyd client";
1118                 goto bad_kcopyd;
1119         }
1120
1121         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1122         if (!s->pending_pool) {
1123                 ti->error = "Could not allocate mempool for pending exceptions";
1124                 goto bad_pending_pool;
1125         }
1126
1127         s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1128                                                          tracked_chunk_cache);
1129         if (!s->tracked_chunk_pool) {
1130                 ti->error = "Could not allocate tracked_chunk mempool for "
1131                             "tracking reads";
1132                 goto bad_tracked_chunk_pool;
1133         }
1134
1135         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1136                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1137
1138         spin_lock_init(&s->tracked_chunk_lock);
1139
1140         ti->private = s;
1141         ti->num_flush_requests = num_flush_requests;
1142
1143         /* Add snapshot to the list of snapshots for this origin */
1144         /* Exceptions aren't triggered till snapshot_resume() is called */
1145         r = register_snapshot(s);
1146         if (r == -ENOMEM) {
1147                 ti->error = "Snapshot origin struct allocation failed";
1148                 goto bad_load_and_register;
1149         } else if (r < 0) {
1150                 /* invalid handover, register_snapshot has set ti->error */
1151                 goto bad_load_and_register;
1152         }
1153
1154         /*
1155          * Metadata must only be loaded into one table at once, so skip this
1156          * if metadata will be handed over during resume.
1157          * Chunk size will be set during the handover - set it to zero to
1158          * ensure it's ignored.
1159          */
1160         if (r > 0) {
1161                 s->store->chunk_size = 0;
1162                 return 0;
1163         }
1164
1165         r = s->store->type->read_metadata(s->store, dm_add_exception,
1166                                           (void *)s);
1167         if (r < 0) {
1168                 ti->error = "Failed to read snapshot metadata";
1169                 goto bad_read_metadata;
1170         } else if (r > 0) {
1171                 s->valid = 0;
1172                 DMWARN("Snapshot is marked invalid.");
1173         }
1174
1175         if (!s->store->chunk_size) {
1176                 ti->error = "Chunk size not set";
1177                 goto bad_read_metadata;
1178         }
1179         ti->split_io = s->store->chunk_size;
1180
1181         return 0;
1182
1183 bad_read_metadata:
1184         unregister_snapshot(s);
1185
1186 bad_load_and_register:
1187         mempool_destroy(s->tracked_chunk_pool);
1188
1189 bad_tracked_chunk_pool:
1190         mempool_destroy(s->pending_pool);
1191
1192 bad_pending_pool:
1193         dm_kcopyd_client_destroy(s->kcopyd_client);
1194
1195 bad_kcopyd:
1196         dm_exception_table_exit(&s->pending, pending_cache);
1197         dm_exception_table_exit(&s->complete, exception_cache);
1198
1199 bad_hash_tables:
1200         dm_exception_store_destroy(s->store);
1201
1202 bad_store:
1203         dm_put_device(ti, s->cow);
1204
1205 bad_cow:
1206         dm_put_device(ti, s->origin);
1207
1208 bad_origin:
1209         kfree(s);
1210
1211 bad:
1212         return r;
1213 }
1214
1215 static void __free_exceptions(struct dm_snapshot *s)
1216 {
1217         dm_kcopyd_client_destroy(s->kcopyd_client);
1218         s->kcopyd_client = NULL;
1219
1220         dm_exception_table_exit(&s->pending, pending_cache);
1221         dm_exception_table_exit(&s->complete, exception_cache);
1222 }
1223
1224 static void __handover_exceptions(struct dm_snapshot *snap_src,
1225                                   struct dm_snapshot *snap_dest)
1226 {
1227         union {
1228                 struct dm_exception_table table_swap;
1229                 struct dm_exception_store *store_swap;
1230         } u;
1231
1232         /*
1233          * Swap all snapshot context information between the two instances.
1234          */
1235         u.table_swap = snap_dest->complete;
1236         snap_dest->complete = snap_src->complete;
1237         snap_src->complete = u.table_swap;
1238
1239         u.store_swap = snap_dest->store;
1240         snap_dest->store = snap_src->store;
1241         snap_src->store = u.store_swap;
1242
1243         snap_dest->store->snap = snap_dest;
1244         snap_src->store->snap = snap_src;
1245
1246         snap_dest->ti->split_io = snap_dest->store->chunk_size;
1247         snap_dest->valid = snap_src->valid;
1248
1249         /*
1250          * Set source invalid to ensure it receives no further I/O.
1251          */
1252         snap_src->valid = 0;
1253 }
1254
1255 static void snapshot_dtr(struct dm_target *ti)
1256 {
1257 #ifdef CONFIG_DM_DEBUG
1258         int i;
1259 #endif
1260         struct dm_snapshot *s = ti->private;
1261         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1262
1263         down_read(&_origins_lock);
1264         /* Check whether exception handover must be cancelled */
1265         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1266         if (snap_src && snap_dest && (s == snap_src)) {
1267                 down_write(&snap_dest->lock);
1268                 snap_dest->valid = 0;
1269                 up_write(&snap_dest->lock);
1270                 DMERR("Cancelling snapshot handover.");
1271         }
1272         up_read(&_origins_lock);
1273
1274         if (dm_target_is_snapshot_merge(ti))
1275                 stop_merge(s);
1276
1277         /* Prevent further origin writes from using this snapshot. */
1278         /* After this returns there can be no new kcopyd jobs. */
1279         unregister_snapshot(s);
1280
1281         while (atomic_read(&s->pending_exceptions_count))
1282                 msleep(1);
1283         /*
1284          * Ensure instructions in mempool_destroy aren't reordered
1285          * before atomic_read.
1286          */
1287         smp_mb();
1288
1289 #ifdef CONFIG_DM_DEBUG
1290         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1291                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1292 #endif
1293
1294         mempool_destroy(s->tracked_chunk_pool);
1295
1296         __free_exceptions(s);
1297
1298         mempool_destroy(s->pending_pool);
1299
1300         dm_exception_store_destroy(s->store);
1301
1302         dm_put_device(ti, s->cow);
1303
1304         dm_put_device(ti, s->origin);
1305
1306         kfree(s);
1307 }
1308
1309 /*
1310  * Flush a list of buffers.
1311  */
1312 static void flush_bios(struct bio *bio)
1313 {
1314         struct bio *n;
1315
1316         while (bio) {
1317                 n = bio->bi_next;
1318                 bio->bi_next = NULL;
1319                 generic_make_request(bio);
1320                 bio = n;
1321         }
1322 }
1323
1324 static int do_origin(struct dm_dev *origin, struct bio *bio);
1325
1326 /*
1327  * Flush a list of buffers.
1328  */
1329 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1330 {
1331         struct bio *n;
1332         int r;
1333
1334         while (bio) {
1335                 n = bio->bi_next;
1336                 bio->bi_next = NULL;
1337                 r = do_origin(s->origin, bio);
1338                 if (r == DM_MAPIO_REMAPPED)
1339                         generic_make_request(bio);
1340                 bio = n;
1341         }
1342 }
1343
1344 /*
1345  * Error a list of buffers.
1346  */
1347 static void error_bios(struct bio *bio)
1348 {
1349         struct bio *n;
1350
1351         while (bio) {
1352                 n = bio->bi_next;
1353                 bio->bi_next = NULL;
1354                 bio_io_error(bio);
1355                 bio = n;
1356         }
1357 }
1358
1359 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1360 {
1361         if (!s->valid)
1362                 return;
1363
1364         if (err == -EIO)
1365                 DMERR("Invalidating snapshot: Error reading/writing.");
1366         else if (err == -ENOMEM)
1367                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1368
1369         if (s->store->type->drop_snapshot)
1370                 s->store->type->drop_snapshot(s->store);
1371
1372         s->valid = 0;
1373
1374         dm_table_event(s->ti->table);
1375 }
1376
1377 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1378 {
1379         struct dm_exception *e;
1380         struct dm_snapshot *s = pe->snap;
1381         struct bio *origin_bios = NULL;
1382         struct bio *snapshot_bios = NULL;
1383         int error = 0;
1384
1385         if (!success) {
1386                 /* Read/write error - snapshot is unusable */
1387                 down_write(&s->lock);
1388                 __invalidate_snapshot(s, -EIO);
1389                 error = 1;
1390                 goto out;
1391         }
1392
1393         e = alloc_completed_exception();
1394         if (!e) {
1395                 down_write(&s->lock);
1396                 __invalidate_snapshot(s, -ENOMEM);
1397                 error = 1;
1398                 goto out;
1399         }
1400         *e = pe->e;
1401
1402         down_write(&s->lock);
1403         if (!s->valid) {
1404                 free_completed_exception(e);
1405                 error = 1;
1406                 goto out;
1407         }
1408
1409         /* Check for conflicting reads */
1410         __check_for_conflicting_io(s, pe->e.old_chunk);
1411
1412         /*
1413          * Add a proper exception, and remove the
1414          * in-flight exception from the list.
1415          */
1416         dm_insert_exception(&s->complete, e);
1417
1418  out:
1419         dm_remove_exception(&pe->e);
1420         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1421         origin_bios = bio_list_get(&pe->origin_bios);
1422         free_pending_exception(pe);
1423
1424         increment_pending_exceptions_done_count();
1425
1426         up_write(&s->lock);
1427
1428         /* Submit any pending write bios */
1429         if (error)
1430                 error_bios(snapshot_bios);
1431         else
1432                 flush_bios(snapshot_bios);
1433
1434         retry_origin_bios(s, origin_bios);
1435 }
1436
1437 static void commit_callback(void *context, int success)
1438 {
1439         struct dm_snap_pending_exception *pe = context;
1440
1441         pending_complete(pe, success);
1442 }
1443
1444 /*
1445  * Called when the copy I/O has finished.  kcopyd actually runs
1446  * this code so don't block.
1447  */
1448 static void copy_callback(int read_err, unsigned long write_err, void *context)
1449 {
1450         struct dm_snap_pending_exception *pe = context;
1451         struct dm_snapshot *s = pe->snap;
1452
1453         if (read_err || write_err)
1454                 pending_complete(pe, 0);
1455
1456         else
1457                 /* Update the metadata if we are persistent */
1458                 s->store->type->commit_exception(s->store, &pe->e,
1459                                                  commit_callback, pe);
1460 }
1461
1462 /*
1463  * Dispatches the copy operation to kcopyd.
1464  */
1465 static void start_copy(struct dm_snap_pending_exception *pe)
1466 {
1467         struct dm_snapshot *s = pe->snap;
1468         struct dm_io_region src, dest;
1469         struct block_device *bdev = s->origin->bdev;
1470         sector_t dev_size;
1471
1472         dev_size = get_dev_size(bdev);
1473
1474         src.bdev = bdev;
1475         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1476         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1477
1478         dest.bdev = s->cow->bdev;
1479         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1480         dest.count = src.count;
1481
1482         /* Hand over to kcopyd */
1483         dm_kcopyd_copy(s->kcopyd_client,
1484                     &src, 1, &dest, 0, copy_callback, pe);
1485 }
1486
1487 static struct dm_snap_pending_exception *
1488 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1489 {
1490         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1491
1492         if (!e)
1493                 return NULL;
1494
1495         return container_of(e, struct dm_snap_pending_exception, e);
1496 }
1497
1498 /*
1499  * Looks to see if this snapshot already has a pending exception
1500  * for this chunk, otherwise it allocates a new one and inserts
1501  * it into the pending table.
1502  *
1503  * NOTE: a write lock must be held on snap->lock before calling
1504  * this.
1505  */
1506 static struct dm_snap_pending_exception *
1507 __find_pending_exception(struct dm_snapshot *s,
1508                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1509 {
1510         struct dm_snap_pending_exception *pe2;
1511
1512         pe2 = __lookup_pending_exception(s, chunk);
1513         if (pe2) {
1514                 free_pending_exception(pe);
1515                 return pe2;
1516         }
1517
1518         pe->e.old_chunk = chunk;
1519         bio_list_init(&pe->origin_bios);
1520         bio_list_init(&pe->snapshot_bios);
1521         pe->started = 0;
1522
1523         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1524                 free_pending_exception(pe);
1525                 return NULL;
1526         }
1527
1528         dm_insert_exception(&s->pending, &pe->e);
1529
1530         return pe;
1531 }
1532
1533 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1534                             struct bio *bio, chunk_t chunk)
1535 {
1536         bio->bi_bdev = s->cow->bdev;
1537         bio->bi_sector = chunk_to_sector(s->store,
1538                                          dm_chunk_number(e->new_chunk) +
1539                                          (chunk - e->old_chunk)) +
1540                                          (bio->bi_sector &
1541                                           s->store->chunk_mask);
1542 }
1543
1544 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1545                         union map_info *map_context)
1546 {
1547         struct dm_exception *e;
1548         struct dm_snapshot *s = ti->private;
1549         int r = DM_MAPIO_REMAPPED;
1550         chunk_t chunk;
1551         struct dm_snap_pending_exception *pe = NULL;
1552
1553         if (bio->bi_rw & REQ_FLUSH) {
1554                 bio->bi_bdev = s->cow->bdev;
1555                 return DM_MAPIO_REMAPPED;
1556         }
1557
1558         chunk = sector_to_chunk(s->store, bio->bi_sector);
1559
1560         /* Full snapshots are not usable */
1561         /* To get here the table must be live so s->active is always set. */
1562         if (!s->valid)
1563                 return -EIO;
1564
1565         /* FIXME: should only take write lock if we need
1566          * to copy an exception */
1567         down_write(&s->lock);
1568
1569         if (!s->valid) {
1570                 r = -EIO;
1571                 goto out_unlock;
1572         }
1573
1574         /* If the block is already remapped - use that, else remap it */
1575         e = dm_lookup_exception(&s->complete, chunk);
1576         if (e) {
1577                 remap_exception(s, e, bio, chunk);
1578                 goto out_unlock;
1579         }
1580
1581         /*
1582          * Write to snapshot - higher level takes care of RW/RO
1583          * flags so we should only get this if we are
1584          * writeable.
1585          */
1586         if (bio_rw(bio) == WRITE) {
1587                 pe = __lookup_pending_exception(s, chunk);
1588                 if (!pe) {
1589                         up_write(&s->lock);
1590                         pe = alloc_pending_exception(s);
1591                         down_write(&s->lock);
1592
1593                         if (!s->valid) {
1594                                 free_pending_exception(pe);
1595                                 r = -EIO;
1596                                 goto out_unlock;
1597                         }
1598
1599                         e = dm_lookup_exception(&s->complete, chunk);
1600                         if (e) {
1601                                 free_pending_exception(pe);
1602                                 remap_exception(s, e, bio, chunk);
1603                                 goto out_unlock;
1604                         }
1605
1606                         pe = __find_pending_exception(s, pe, chunk);
1607                         if (!pe) {
1608                                 __invalidate_snapshot(s, -ENOMEM);
1609                                 r = -EIO;
1610                                 goto out_unlock;
1611                         }
1612                 }
1613
1614                 remap_exception(s, &pe->e, bio, chunk);
1615                 bio_list_add(&pe->snapshot_bios, bio);
1616
1617                 r = DM_MAPIO_SUBMITTED;
1618
1619                 if (!pe->started) {
1620                         /* this is protected by snap->lock */
1621                         pe->started = 1;
1622                         up_write(&s->lock);
1623                         start_copy(pe);
1624                         goto out;
1625                 }
1626         } else {
1627                 bio->bi_bdev = s->origin->bdev;
1628                 map_context->ptr = track_chunk(s, chunk);
1629         }
1630
1631  out_unlock:
1632         up_write(&s->lock);
1633  out:
1634         return r;
1635 }
1636
1637 /*
1638  * A snapshot-merge target behaves like a combination of a snapshot
1639  * target and a snapshot-origin target.  It only generates new
1640  * exceptions in other snapshots and not in the one that is being
1641  * merged.
1642  *
1643  * For each chunk, if there is an existing exception, it is used to
1644  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1645  * which in turn might generate exceptions in other snapshots.
1646  * If merging is currently taking place on the chunk in question, the
1647  * I/O is deferred by adding it to s->bios_queued_during_merge.
1648  */
1649 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1650                               union map_info *map_context)
1651 {
1652         struct dm_exception *e;
1653         struct dm_snapshot *s = ti->private;
1654         int r = DM_MAPIO_REMAPPED;
1655         chunk_t chunk;
1656
1657         if (bio->bi_rw & REQ_FLUSH) {
1658                 if (!map_context->target_request_nr)
1659                         bio->bi_bdev = s->origin->bdev;
1660                 else
1661                         bio->bi_bdev = s->cow->bdev;
1662                 map_context->ptr = NULL;
1663                 return DM_MAPIO_REMAPPED;
1664         }
1665
1666         chunk = sector_to_chunk(s->store, bio->bi_sector);
1667
1668         down_write(&s->lock);
1669
1670         /* Full merging snapshots are redirected to the origin */
1671         if (!s->valid)
1672                 goto redirect_to_origin;
1673
1674         /* If the block is already remapped - use that */
1675         e = dm_lookup_exception(&s->complete, chunk);
1676         if (e) {
1677                 /* Queue writes overlapping with chunks being merged */
1678                 if (bio_rw(bio) == WRITE &&
1679                     chunk >= s->first_merging_chunk &&
1680                     chunk < (s->first_merging_chunk +
1681                              s->num_merging_chunks)) {
1682                         bio->bi_bdev = s->origin->bdev;
1683                         bio_list_add(&s->bios_queued_during_merge, bio);
1684                         r = DM_MAPIO_SUBMITTED;
1685                         goto out_unlock;
1686                 }
1687
1688                 remap_exception(s, e, bio, chunk);
1689
1690                 if (bio_rw(bio) == WRITE)
1691                         map_context->ptr = track_chunk(s, chunk);
1692                 goto out_unlock;
1693         }
1694
1695 redirect_to_origin:
1696         bio->bi_bdev = s->origin->bdev;
1697
1698         if (bio_rw(bio) == WRITE) {
1699                 up_write(&s->lock);
1700                 return do_origin(s->origin, bio);
1701         }
1702
1703 out_unlock:
1704         up_write(&s->lock);
1705
1706         return r;
1707 }
1708
1709 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1710                            int error, union map_info *map_context)
1711 {
1712         struct dm_snapshot *s = ti->private;
1713         struct dm_snap_tracked_chunk *c = map_context->ptr;
1714
1715         if (c)
1716                 stop_tracking_chunk(s, c);
1717
1718         return 0;
1719 }
1720
1721 static void snapshot_merge_presuspend(struct dm_target *ti)
1722 {
1723         struct dm_snapshot *s = ti->private;
1724
1725         stop_merge(s);
1726 }
1727
1728 static int snapshot_preresume(struct dm_target *ti)
1729 {
1730         int r = 0;
1731         struct dm_snapshot *s = ti->private;
1732         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1733
1734         down_read(&_origins_lock);
1735         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1736         if (snap_src && snap_dest) {
1737                 down_read(&snap_src->lock);
1738                 if (s == snap_src) {
1739                         DMERR("Unable to resume snapshot source until "
1740                               "handover completes.");
1741                         r = -EINVAL;
1742                 } else if (!dm_suspended(snap_src->ti)) {
1743                         DMERR("Unable to perform snapshot handover until "
1744                               "source is suspended.");
1745                         r = -EINVAL;
1746                 }
1747                 up_read(&snap_src->lock);
1748         }
1749         up_read(&_origins_lock);
1750
1751         return r;
1752 }
1753
1754 static void snapshot_resume(struct dm_target *ti)
1755 {
1756         struct dm_snapshot *s = ti->private;
1757         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1758
1759         down_read(&_origins_lock);
1760         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1761         if (snap_src && snap_dest) {
1762                 down_write(&snap_src->lock);
1763                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1764                 __handover_exceptions(snap_src, snap_dest);
1765                 up_write(&snap_dest->lock);
1766                 up_write(&snap_src->lock);
1767         }
1768         up_read(&_origins_lock);
1769
1770         /* Now we have correct chunk size, reregister */
1771         reregister_snapshot(s);
1772
1773         down_write(&s->lock);
1774         s->active = 1;
1775         up_write(&s->lock);
1776 }
1777
1778 static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1779 {
1780         sector_t min_chunksize;
1781
1782         down_read(&_origins_lock);
1783         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1784         up_read(&_origins_lock);
1785
1786         return min_chunksize;
1787 }
1788
1789 static void snapshot_merge_resume(struct dm_target *ti)
1790 {
1791         struct dm_snapshot *s = ti->private;
1792
1793         /*
1794          * Handover exceptions from existing snapshot.
1795          */
1796         snapshot_resume(ti);
1797
1798         /*
1799          * snapshot-merge acts as an origin, so set ti->split_io
1800          */
1801         ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1802
1803         start_merge(s);
1804 }
1805
1806 static int snapshot_status(struct dm_target *ti, status_type_t type,
1807                            char *result, unsigned int maxlen)
1808 {
1809         unsigned sz = 0;
1810         struct dm_snapshot *snap = ti->private;
1811
1812         switch (type) {
1813         case STATUSTYPE_INFO:
1814
1815                 down_write(&snap->lock);
1816
1817                 if (!snap->valid)
1818                         DMEMIT("Invalid");
1819                 else if (snap->merge_failed)
1820                         DMEMIT("Merge failed");
1821                 else {
1822                         if (snap->store->type->usage) {
1823                                 sector_t total_sectors, sectors_allocated,
1824                                          metadata_sectors;
1825                                 snap->store->type->usage(snap->store,
1826                                                          &total_sectors,
1827                                                          &sectors_allocated,
1828                                                          &metadata_sectors);
1829                                 DMEMIT("%llu/%llu %llu",
1830                                        (unsigned long long)sectors_allocated,
1831                                        (unsigned long long)total_sectors,
1832                                        (unsigned long long)metadata_sectors);
1833                         }
1834                         else
1835                                 DMEMIT("Unknown");
1836                 }
1837
1838                 up_write(&snap->lock);
1839
1840                 break;
1841
1842         case STATUSTYPE_TABLE:
1843                 /*
1844                  * kdevname returns a static pointer so we need
1845                  * to make private copies if the output is to
1846                  * make sense.
1847                  */
1848                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1849                 snap->store->type->status(snap->store, type, result + sz,
1850                                           maxlen - sz);
1851                 break;
1852         }
1853
1854         return 0;
1855 }
1856
1857 static int snapshot_iterate_devices(struct dm_target *ti,
1858                                     iterate_devices_callout_fn fn, void *data)
1859 {
1860         struct dm_snapshot *snap = ti->private;
1861         int r;
1862
1863         r = fn(ti, snap->origin, 0, ti->len, data);
1864
1865         if (!r)
1866                 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1867
1868         return r;
1869 }
1870
1871
1872 /*-----------------------------------------------------------------
1873  * Origin methods
1874  *---------------------------------------------------------------*/
1875
1876 /*
1877  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1878  * supplied bio was ignored.  The caller may submit it immediately.
1879  * (No remapping actually occurs as the origin is always a direct linear
1880  * map.)
1881  *
1882  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1883  * and any supplied bio is added to a list to be submitted once all
1884  * the necessary exceptions exist.
1885  */
1886 static int __origin_write(struct list_head *snapshots, sector_t sector,
1887                           struct bio *bio)
1888 {
1889         int r = DM_MAPIO_REMAPPED;
1890         struct dm_snapshot *snap;
1891         struct dm_exception *e;
1892         struct dm_snap_pending_exception *pe;
1893         struct dm_snap_pending_exception *pe_to_start_now = NULL;
1894         struct dm_snap_pending_exception *pe_to_start_last = NULL;
1895         chunk_t chunk;
1896
1897         /* Do all the snapshots on this origin */
1898         list_for_each_entry (snap, snapshots, list) {
1899                 /*
1900                  * Don't make new exceptions in a merging snapshot
1901                  * because it has effectively been deleted
1902                  */
1903                 if (dm_target_is_snapshot_merge(snap->ti))
1904                         continue;
1905
1906                 down_write(&snap->lock);
1907
1908                 /* Only deal with valid and active snapshots */
1909                 if (!snap->valid || !snap->active)
1910                         goto next_snapshot;
1911
1912                 /* Nothing to do if writing beyond end of snapshot */
1913                 if (sector >= dm_table_get_size(snap->ti->table))
1914                         goto next_snapshot;
1915
1916                 /*
1917                  * Remember, different snapshots can have
1918                  * different chunk sizes.
1919                  */
1920                 chunk = sector_to_chunk(snap->store, sector);
1921
1922                 /*
1923                  * Check exception table to see if block
1924                  * is already remapped in this snapshot
1925                  * and trigger an exception if not.
1926                  */
1927                 e = dm_lookup_exception(&snap->complete, chunk);
1928                 if (e)
1929                         goto next_snapshot;
1930
1931                 pe = __lookup_pending_exception(snap, chunk);
1932                 if (!pe) {
1933                         up_write(&snap->lock);
1934                         pe = alloc_pending_exception(snap);
1935                         down_write(&snap->lock);
1936
1937                         if (!snap->valid) {
1938                                 free_pending_exception(pe);
1939                                 goto next_snapshot;
1940                         }
1941
1942                         e = dm_lookup_exception(&snap->complete, chunk);
1943                         if (e) {
1944                                 free_pending_exception(pe);
1945                                 goto next_snapshot;
1946                         }
1947
1948                         pe = __find_pending_exception(snap, pe, chunk);
1949                         if (!pe) {
1950                                 __invalidate_snapshot(snap, -ENOMEM);
1951                                 goto next_snapshot;
1952                         }
1953                 }
1954
1955                 r = DM_MAPIO_SUBMITTED;
1956
1957                 /*
1958                  * If an origin bio was supplied, queue it to wait for the
1959                  * completion of this exception, and start this one last,
1960                  * at the end of the function.
1961                  */
1962                 if (bio) {
1963                         bio_list_add(&pe->origin_bios, bio);
1964                         bio = NULL;
1965
1966                         if (!pe->started) {
1967                                 pe->started = 1;
1968                                 pe_to_start_last = pe;
1969                         }
1970                 }
1971
1972                 if (!pe->started) {
1973                         pe->started = 1;
1974                         pe_to_start_now = pe;
1975                 }
1976
1977  next_snapshot:
1978                 up_write(&snap->lock);
1979
1980                 if (pe_to_start_now) {
1981                         start_copy(pe_to_start_now);
1982                         pe_to_start_now = NULL;
1983                 }
1984         }
1985
1986         /*
1987          * Submit the exception against which the bio is queued last,
1988          * to give the other exceptions a head start.
1989          */
1990         if (pe_to_start_last)
1991                 start_copy(pe_to_start_last);
1992
1993         return r;
1994 }
1995
1996 /*
1997  * Called on a write from the origin driver.
1998  */
1999 static int do_origin(struct dm_dev *origin, struct bio *bio)
2000 {
2001         struct origin *o;
2002         int r = DM_MAPIO_REMAPPED;
2003
2004         down_read(&_origins_lock);
2005         o = __lookup_origin(origin->bdev);
2006         if (o)
2007                 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2008         up_read(&_origins_lock);
2009
2010         return r;
2011 }
2012
2013 /*
2014  * Trigger exceptions in all non-merging snapshots.
2015  *
2016  * The chunk size of the merging snapshot may be larger than the chunk
2017  * size of some other snapshot so we may need to reallocate multiple
2018  * chunks in other snapshots.
2019  *
2020  * We scan all the overlapping exceptions in the other snapshots.
2021  * Returns 1 if anything was reallocated and must be waited for,
2022  * otherwise returns 0.
2023  *
2024  * size must be a multiple of merging_snap's chunk_size.
2025  */
2026 static int origin_write_extent(struct dm_snapshot *merging_snap,
2027                                sector_t sector, unsigned size)
2028 {
2029         int must_wait = 0;
2030         sector_t n;
2031         struct origin *o;
2032
2033         /*
2034          * The origin's __minimum_chunk_size() got stored in split_io
2035          * by snapshot_merge_resume().
2036          */
2037         down_read(&_origins_lock);
2038         o = __lookup_origin(merging_snap->origin->bdev);
2039         for (n = 0; n < size; n += merging_snap->ti->split_io)
2040                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2041                     DM_MAPIO_SUBMITTED)
2042                         must_wait = 1;
2043         up_read(&_origins_lock);
2044
2045         return must_wait;
2046 }
2047
2048 /*
2049  * Origin: maps a linear range of a device, with hooks for snapshotting.
2050  */
2051
2052 /*
2053  * Construct an origin mapping: <dev_path>
2054  * The context for an origin is merely a 'struct dm_dev *'
2055  * pointing to the real device.
2056  */
2057 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2058 {
2059         int r;
2060         struct dm_dev *dev;
2061
2062         if (argc != 1) {
2063                 ti->error = "origin: incorrect number of arguments";
2064                 return -EINVAL;
2065         }
2066
2067         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2068         if (r) {
2069                 ti->error = "Cannot get target device";
2070                 return r;
2071         }
2072
2073         ti->private = dev;
2074         ti->num_flush_requests = 1;
2075
2076         return 0;
2077 }
2078
2079 static void origin_dtr(struct dm_target *ti)
2080 {
2081         struct dm_dev *dev = ti->private;
2082         dm_put_device(ti, dev);
2083 }
2084
2085 static int origin_map(struct dm_target *ti, struct bio *bio,
2086                       union map_info *map_context)
2087 {
2088         struct dm_dev *dev = ti->private;
2089         bio->bi_bdev = dev->bdev;
2090
2091         if (bio->bi_rw & REQ_FLUSH)
2092                 return DM_MAPIO_REMAPPED;
2093
2094         /* Only tell snapshots if this is a write */
2095         return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2096 }
2097
2098 /*
2099  * Set the target "split_io" field to the minimum of all the snapshots'
2100  * chunk sizes.
2101  */
2102 static void origin_resume(struct dm_target *ti)
2103 {
2104         struct dm_dev *dev = ti->private;
2105
2106         ti->split_io = get_origin_minimum_chunksize(dev->bdev);
2107 }
2108
2109 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2110                          unsigned int maxlen)
2111 {
2112         struct dm_dev *dev = ti->private;
2113
2114         switch (type) {
2115         case STATUSTYPE_INFO:
2116                 result[0] = '\0';
2117                 break;
2118
2119         case STATUSTYPE_TABLE:
2120                 snprintf(result, maxlen, "%s", dev->name);
2121                 break;
2122         }
2123
2124         return 0;
2125 }
2126
2127 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2128                         struct bio_vec *biovec, int max_size)
2129 {
2130         struct dm_dev *dev = ti->private;
2131         struct request_queue *q = bdev_get_queue(dev->bdev);
2132
2133         if (!q->merge_bvec_fn)
2134                 return max_size;
2135
2136         bvm->bi_bdev = dev->bdev;
2137         bvm->bi_sector = bvm->bi_sector;
2138
2139         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2140 }
2141
2142 static int origin_iterate_devices(struct dm_target *ti,
2143                                   iterate_devices_callout_fn fn, void *data)
2144 {
2145         struct dm_dev *dev = ti->private;
2146
2147         return fn(ti, dev, 0, ti->len, data);
2148 }
2149
2150 static struct target_type origin_target = {
2151         .name    = "snapshot-origin",
2152         .version = {1, 7, 1},
2153         .module  = THIS_MODULE,
2154         .ctr     = origin_ctr,
2155         .dtr     = origin_dtr,
2156         .map     = origin_map,
2157         .resume  = origin_resume,
2158         .status  = origin_status,
2159         .merge   = origin_merge,
2160         .iterate_devices = origin_iterate_devices,
2161 };
2162
2163 static struct target_type snapshot_target = {
2164         .name    = "snapshot",
2165         .version = {1, 10, 0},
2166         .module  = THIS_MODULE,
2167         .ctr     = snapshot_ctr,
2168         .dtr     = snapshot_dtr,
2169         .map     = snapshot_map,
2170         .end_io  = snapshot_end_io,
2171         .preresume  = snapshot_preresume,
2172         .resume  = snapshot_resume,
2173         .status  = snapshot_status,
2174         .iterate_devices = snapshot_iterate_devices,
2175 };
2176
2177 static struct target_type merge_target = {
2178         .name    = dm_snapshot_merge_target_name,
2179         .version = {1, 1, 0},
2180         .module  = THIS_MODULE,
2181         .ctr     = snapshot_ctr,
2182         .dtr     = snapshot_dtr,
2183         .map     = snapshot_merge_map,
2184         .end_io  = snapshot_end_io,
2185         .presuspend = snapshot_merge_presuspend,
2186         .preresume  = snapshot_preresume,
2187         .resume  = snapshot_merge_resume,
2188         .status  = snapshot_status,
2189         .iterate_devices = snapshot_iterate_devices,
2190 };
2191
2192 static int __init dm_snapshot_init(void)
2193 {
2194         int r;
2195
2196         r = dm_exception_store_init();
2197         if (r) {
2198                 DMERR("Failed to initialize exception stores");
2199                 return r;
2200         }
2201
2202         r = dm_register_target(&snapshot_target);
2203         if (r < 0) {
2204                 DMERR("snapshot target register failed %d", r);
2205                 goto bad_register_snapshot_target;
2206         }
2207
2208         r = dm_register_target(&origin_target);
2209         if (r < 0) {
2210                 DMERR("Origin target register failed %d", r);
2211                 goto bad_register_origin_target;
2212         }
2213
2214         r = dm_register_target(&merge_target);
2215         if (r < 0) {
2216                 DMERR("Merge target register failed %d", r);
2217                 goto bad_register_merge_target;
2218         }
2219
2220         r = init_origin_hash();
2221         if (r) {
2222                 DMERR("init_origin_hash failed.");
2223                 goto bad_origin_hash;
2224         }
2225
2226         exception_cache = KMEM_CACHE(dm_exception, 0);
2227         if (!exception_cache) {
2228                 DMERR("Couldn't create exception cache.");
2229                 r = -ENOMEM;
2230                 goto bad_exception_cache;
2231         }
2232
2233         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2234         if (!pending_cache) {
2235                 DMERR("Couldn't create pending cache.");
2236                 r = -ENOMEM;
2237                 goto bad_pending_cache;
2238         }
2239
2240         tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2241         if (!tracked_chunk_cache) {
2242                 DMERR("Couldn't create cache to track chunks in use.");
2243                 r = -ENOMEM;
2244                 goto bad_tracked_chunk_cache;
2245         }
2246
2247         return 0;
2248
2249 bad_tracked_chunk_cache:
2250         kmem_cache_destroy(pending_cache);
2251 bad_pending_cache:
2252         kmem_cache_destroy(exception_cache);
2253 bad_exception_cache:
2254         exit_origin_hash();
2255 bad_origin_hash:
2256         dm_unregister_target(&merge_target);
2257 bad_register_merge_target:
2258         dm_unregister_target(&origin_target);
2259 bad_register_origin_target:
2260         dm_unregister_target(&snapshot_target);
2261 bad_register_snapshot_target:
2262         dm_exception_store_exit();
2263
2264         return r;
2265 }
2266
2267 static void __exit dm_snapshot_exit(void)
2268 {
2269         dm_unregister_target(&snapshot_target);
2270         dm_unregister_target(&origin_target);
2271         dm_unregister_target(&merge_target);
2272
2273         exit_origin_hash();
2274         kmem_cache_destroy(pending_cache);
2275         kmem_cache_destroy(exception_cache);
2276         kmem_cache_destroy(tracked_chunk_cache);
2277
2278         dm_exception_store_exit();
2279 }
2280
2281 /* Module hooks */
2282 module_init(dm_snapshot_init);
2283 module_exit(dm_snapshot_exit);
2284
2285 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2286 MODULE_AUTHOR("Joe Thornber");
2287 MODULE_LICENSE("GPL");