aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-snap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:12:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:12:01 -0500
commit53365383c4667aba55385cd1858582c19a7a8a36 (patch)
treeb290d003534b3947834762c2fb492d9d0beb985f /drivers/md/dm-snap.c
parent51b736b85155a56543fda8aeca5f8592795d7983 (diff)
parentd2fdb776e08d4231d7e86a879cc663a93913c202 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (80 commits) dm snapshot: use merge origin if snapshot invalid dm snapshot: report merge failure in status dm snapshot: merge consecutive chunks together dm snapshot: trigger exceptions in remaining snapshots during merge dm snapshot: delay merging a chunk until writes to it complete dm snapshot: queue writes to chunks being merged dm snapshot: add merging dm snapshot: permit only one merge at once dm snapshot: support barriers in snapshot merge target dm snapshot: avoid allocating exceptions in merge dm snapshot: rework writing to origin dm snapshot: add merge target dm exception store: add merge specific methods dm snapshot: create function for chunk_is_tracked wait dm snapshot: make bio optional in __origin_write dm mpath: reject messages when device is suspended dm: export suspended state to targets dm: rename dm_suspended to dm_suspended_md dm: swap target postsuspend call and setting suspended flag dm crypt: add plain64 iv ...
Diffstat (limited to 'drivers/md/dm-snap.c')
-rw-r--r--drivers/md/dm-snap.c1279
1 files changed, 1017 insertions, 262 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 3a3ba46e6d4b..ee8eb283650d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -25,6 +25,11 @@
25 25
26#define DM_MSG_PREFIX "snapshots" 26#define DM_MSG_PREFIX "snapshots"
27 27
28static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29
30#define dm_target_is_snapshot_merge(ti) \
31 ((ti)->type->name == dm_snapshot_merge_target_name)
32
28/* 33/*
29 * The percentage increment we will wake up users at 34 * The percentage increment we will wake up users at
30 */ 35 */
@@ -49,7 +54,7 @@
49#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 54#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
50 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 55 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
51 56
52struct exception_table { 57struct dm_exception_table {
53 uint32_t hash_mask; 58 uint32_t hash_mask;
54 unsigned hash_shift; 59 unsigned hash_shift;
55 struct list_head *table; 60 struct list_head *table;
@@ -59,22 +64,31 @@ struct dm_snapshot {
59 struct rw_semaphore lock; 64 struct rw_semaphore lock;
60 65
61 struct dm_dev *origin; 66 struct dm_dev *origin;
67 struct dm_dev *cow;
68
69 struct dm_target *ti;
62 70
63 /* List of snapshots per Origin */ 71 /* List of snapshots per Origin */
64 struct list_head list; 72 struct list_head list;
65 73
66 /* You can't use a snapshot if this is 0 (e.g. if full) */ 74 /*
75 * You can't use a snapshot if this is 0 (e.g. if full).
76 * A snapshot-merge target never clears this.
77 */
67 int valid; 78 int valid;
68 79
69 /* Origin writes don't trigger exceptions until this is set */ 80 /* Origin writes don't trigger exceptions until this is set */
70 int active; 81 int active;
71 82
83 /* Whether or not owning mapped_device is suspended */
84 int suspended;
85
72 mempool_t *pending_pool; 86 mempool_t *pending_pool;
73 87
74 atomic_t pending_exceptions_count; 88 atomic_t pending_exceptions_count;
75 89
76 struct exception_table pending; 90 struct dm_exception_table pending;
77 struct exception_table complete; 91 struct dm_exception_table complete;
78 92
79 /* 93 /*
80 * pe_lock protects all pending_exception operations and access 94 * pe_lock protects all pending_exception operations and access
@@ -95,8 +109,51 @@ struct dm_snapshot {
95 mempool_t *tracked_chunk_pool; 109 mempool_t *tracked_chunk_pool;
96 spinlock_t tracked_chunk_lock; 110 spinlock_t tracked_chunk_lock;
97 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 111 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
112
113 /*
114 * The merge operation failed if this flag is set.
115 * Failure modes are handled as follows:
116 * - I/O error reading the header
117 * => don't load the target; abort.
118 * - Header does not have "valid" flag set
119 * => use the origin; forget about the snapshot.
120 * - I/O error when reading exceptions
121 * => don't load the target; abort.
122 * (We can't use the intermediate origin state.)
123 * - I/O error while merging
124 * => stop merging; set merge_failed; process I/O normally.
125 */
126 int merge_failed;
127
128 /* Wait for events based on state_bits */
129 unsigned long state_bits;
130
131 /* Range of chunks currently being merged. */
132 chunk_t first_merging_chunk;
133 int num_merging_chunks;
134
135 /*
136 * Incoming bios that overlap with chunks being merged must wait
137 * for them to be committed.
138 */
139 struct bio_list bios_queued_during_merge;
98}; 140};
99 141
142/*
143 * state_bits:
144 * RUNNING_MERGE - Merge operation is in progress.
145 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
146 * cleared afterwards.
147 */
148#define RUNNING_MERGE 0
149#define SHUTDOWN_MERGE 1
150
151struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
152{
153 return s->cow;
154}
155EXPORT_SYMBOL(dm_snap_cow);
156
100static struct workqueue_struct *ksnapd; 157static struct workqueue_struct *ksnapd;
101static void flush_queued_bios(struct work_struct *work); 158static void flush_queued_bios(struct work_struct *work);
102 159
@@ -116,7 +173,7 @@ static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
116} 173}
117 174
118struct dm_snap_pending_exception { 175struct dm_snap_pending_exception {
119 struct dm_snap_exception e; 176 struct dm_exception e;
120 177
121 /* 178 /*
122 * Origin buffers waiting for this to complete are held 179 * Origin buffers waiting for this to complete are held
@@ -125,28 +182,6 @@ struct dm_snap_pending_exception {
125 struct bio_list origin_bios; 182 struct bio_list origin_bios;
126 struct bio_list snapshot_bios; 183 struct bio_list snapshot_bios;
127 184
128 /*
129 * Short-term queue of pending exceptions prior to submission.
130 */
131 struct list_head list;
132
133 /*
134 * The primary pending_exception is the one that holds
135 * the ref_count and the list of origin_bios for a
136 * group of pending_exceptions. It is always last to get freed.
137 * These fields get set up when writing to the origin.
138 */
139 struct dm_snap_pending_exception *primary_pe;
140
141 /*
142 * Number of pending_exceptions processing this chunk.
143 * When this drops to zero we must complete the origin bios.
144 * If incrementing or decrementing this, hold pe->snap->lock for
145 * the sibling concerned and not pe->primary_pe->snap->lock unless
146 * they are the same.
147 */
148 atomic_t ref_count;
149
150 /* Pointer back to snapshot context */ 185 /* Pointer back to snapshot context */
151 struct dm_snapshot *snap; 186 struct dm_snapshot *snap;
152 187
@@ -222,6 +257,16 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
222} 257}
223 258
224/* 259/*
260 * This conflicting I/O is extremely improbable in the caller,
261 * so msleep(1) is sufficient and there is no need for a wait queue.
262 */
263static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
264{
265 while (__chunk_is_tracked(s, chunk))
266 msleep(1);
267}
268
269/*
225 * One of these per registered origin, held in the snapshot_origins hash 270 * One of these per registered origin, held in the snapshot_origins hash
226 */ 271 */
227struct origin { 272struct origin {
@@ -243,6 +288,10 @@ struct origin {
243static struct list_head *_origins; 288static struct list_head *_origins;
244static struct rw_semaphore _origins_lock; 289static struct rw_semaphore _origins_lock;
245 290
291static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
292static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
293static uint64_t _pending_exceptions_done_count;
294
246static int init_origin_hash(void) 295static int init_origin_hash(void)
247{ 296{
248 int i; 297 int i;
@@ -291,22 +340,144 @@ static void __insert_origin(struct origin *o)
291} 340}
292 341
293/* 342/*
343 * _origins_lock must be held when calling this function.
344 * Returns number of snapshots registered using the supplied cow device, plus:
345 * snap_src - a snapshot suitable for use as a source of exception handover
346 * snap_dest - a snapshot capable of receiving exception handover.
347 * snap_merge - an existing snapshot-merge target linked to the same origin.
348 * There can be at most one snapshot-merge target. The parameter is optional.
349 *
350 * Possible return values and states of snap_src and snap_dest.
351 * 0: NULL, NULL - first new snapshot
352 * 1: snap_src, NULL - normal snapshot
353 * 2: snap_src, snap_dest - waiting for handover
354 * 2: snap_src, NULL - handed over, waiting for old to be deleted
355 * 1: NULL, snap_dest - source got destroyed without handover
356 */
357static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
358 struct dm_snapshot **snap_src,
359 struct dm_snapshot **snap_dest,
360 struct dm_snapshot **snap_merge)
361{
362 struct dm_snapshot *s;
363 struct origin *o;
364 int count = 0;
365 int active;
366
367 o = __lookup_origin(snap->origin->bdev);
368 if (!o)
369 goto out;
370
371 list_for_each_entry(s, &o->snapshots, list) {
372 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
373 *snap_merge = s;
374 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
375 continue;
376
377 down_read(&s->lock);
378 active = s->active;
379 up_read(&s->lock);
380
381 if (active) {
382 if (snap_src)
383 *snap_src = s;
384 } else if (snap_dest)
385 *snap_dest = s;
386
387 count++;
388 }
389
390out:
391 return count;
392}
393
394/*
395 * On success, returns 1 if this snapshot is a handover destination,
396 * otherwise returns 0.
397 */
398static int __validate_exception_handover(struct dm_snapshot *snap)
399{
400 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
401 struct dm_snapshot *snap_merge = NULL;
402
403 /* Does snapshot need exceptions handed over to it? */
404 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
405 &snap_merge) == 2) ||
406 snap_dest) {
407 snap->ti->error = "Snapshot cow pairing for exception "
408 "table handover failed";
409 return -EINVAL;
410 }
411
412 /*
413 * If no snap_src was found, snap cannot become a handover
414 * destination.
415 */
416 if (!snap_src)
417 return 0;
418
419 /*
420 * Non-snapshot-merge handover?
421 */
422 if (!dm_target_is_snapshot_merge(snap->ti))
423 return 1;
424
425 /*
426 * Do not allow more than one merging snapshot.
427 */
428 if (snap_merge) {
429 snap->ti->error = "A snapshot is already merging.";
430 return -EINVAL;
431 }
432
433 if (!snap_src->store->type->prepare_merge ||
434 !snap_src->store->type->commit_merge) {
435 snap->ti->error = "Snapshot exception store does not "
436 "support snapshot-merge.";
437 return -EINVAL;
438 }
439
440 return 1;
441}
442
443static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
444{
445 struct dm_snapshot *l;
446
447 /* Sort the list according to chunk size, largest-first smallest-last */
448 list_for_each_entry(l, &o->snapshots, list)
449 if (l->store->chunk_size < s->store->chunk_size)
450 break;
451 list_add_tail(&s->list, &l->list);
452}
453
454/*
294 * Make a note of the snapshot and its origin so we can look it 455 * Make a note of the snapshot and its origin so we can look it
295 * up when the origin has a write on it. 456 * up when the origin has a write on it.
457 *
458 * Also validate snapshot exception store handovers.
459 * On success, returns 1 if this registration is a handover destination,
460 * otherwise returns 0.
296 */ 461 */
297static int register_snapshot(struct dm_snapshot *snap) 462static int register_snapshot(struct dm_snapshot *snap)
298{ 463{
299 struct dm_snapshot *l; 464 struct origin *o, *new_o = NULL;
300 struct origin *o, *new_o;
301 struct block_device *bdev = snap->origin->bdev; 465 struct block_device *bdev = snap->origin->bdev;
466 int r = 0;
302 467
303 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 468 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
304 if (!new_o) 469 if (!new_o)
305 return -ENOMEM; 470 return -ENOMEM;
306 471
307 down_write(&_origins_lock); 472 down_write(&_origins_lock);
308 o = __lookup_origin(bdev);
309 473
474 r = __validate_exception_handover(snap);
475 if (r < 0) {
476 kfree(new_o);
477 goto out;
478 }
479
480 o = __lookup_origin(bdev);
310 if (o) 481 if (o)
311 kfree(new_o); 482 kfree(new_o);
312 else { 483 else {
@@ -320,14 +491,27 @@ static int register_snapshot(struct dm_snapshot *snap)
320 __insert_origin(o); 491 __insert_origin(o);
321 } 492 }
322 493
323 /* Sort the list according to chunk size, largest-first smallest-last */ 494 __insert_snapshot(o, snap);
324 list_for_each_entry(l, &o->snapshots, list) 495
325 if (l->store->chunk_size < snap->store->chunk_size) 496out:
326 break; 497 up_write(&_origins_lock);
327 list_add_tail(&snap->list, &l->list); 498
499 return r;
500}
501
502/*
503 * Move snapshot to correct place in list according to chunk size.
504 */
505static void reregister_snapshot(struct dm_snapshot *s)
506{
507 struct block_device *bdev = s->origin->bdev;
508
509 down_write(&_origins_lock);
510
511 list_del(&s->list);
512 __insert_snapshot(__lookup_origin(bdev), s);
328 513
329 up_write(&_origins_lock); 514 up_write(&_origins_lock);
330 return 0;
331} 515}
332 516
333static void unregister_snapshot(struct dm_snapshot *s) 517static void unregister_snapshot(struct dm_snapshot *s)
@@ -338,7 +522,7 @@ static void unregister_snapshot(struct dm_snapshot *s)
338 o = __lookup_origin(s->origin->bdev); 522 o = __lookup_origin(s->origin->bdev);
339 523
340 list_del(&s->list); 524 list_del(&s->list);
341 if (list_empty(&o->snapshots)) { 525 if (o && list_empty(&o->snapshots)) {
342 list_del(&o->hash_list); 526 list_del(&o->hash_list);
343 kfree(o); 527 kfree(o);
344 } 528 }
@@ -351,8 +535,8 @@ static void unregister_snapshot(struct dm_snapshot *s)
351 * The lowest hash_shift bits of the chunk number are ignored, allowing 535 * The lowest hash_shift bits of the chunk number are ignored, allowing
352 * some consecutive chunks to be grouped together. 536 * some consecutive chunks to be grouped together.
353 */ 537 */
354static int init_exception_table(struct exception_table *et, uint32_t size, 538static int dm_exception_table_init(struct dm_exception_table *et,
355 unsigned hash_shift) 539 uint32_t size, unsigned hash_shift)
356{ 540{
357 unsigned int i; 541 unsigned int i;
358 542
@@ -368,10 +552,11 @@ static int init_exception_table(struct exception_table *et, uint32_t size,
368 return 0; 552 return 0;
369} 553}
370 554
371static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 555static void dm_exception_table_exit(struct dm_exception_table *et,
556 struct kmem_cache *mem)
372{ 557{
373 struct list_head *slot; 558 struct list_head *slot;
374 struct dm_snap_exception *ex, *next; 559 struct dm_exception *ex, *next;
375 int i, size; 560 int i, size;
376 561
377 size = et->hash_mask + 1; 562 size = et->hash_mask + 1;
@@ -385,19 +570,12 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
385 vfree(et->table); 570 vfree(et->table);
386} 571}
387 572
388static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 573static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
389{ 574{
390 return (chunk >> et->hash_shift) & et->hash_mask; 575 return (chunk >> et->hash_shift) & et->hash_mask;
391} 576}
392 577
393static void insert_exception(struct exception_table *eh, 578static void dm_remove_exception(struct dm_exception *e)
394 struct dm_snap_exception *e)
395{
396 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
397 list_add(&e->hash_list, l);
398}
399
400static void remove_exception(struct dm_snap_exception *e)
401{ 579{
402 list_del(&e->hash_list); 580 list_del(&e->hash_list);
403} 581}
@@ -406,11 +584,11 @@ static void remove_exception(struct dm_snap_exception *e)
406 * Return the exception data for a sector, or NULL if not 584 * Return the exception data for a sector, or NULL if not
407 * remapped. 585 * remapped.
408 */ 586 */
409static struct dm_snap_exception *lookup_exception(struct exception_table *et, 587static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
410 chunk_t chunk) 588 chunk_t chunk)
411{ 589{
412 struct list_head *slot; 590 struct list_head *slot;
413 struct dm_snap_exception *e; 591 struct dm_exception *e;
414 592
415 slot = &et->table[exception_hash(et, chunk)]; 593 slot = &et->table[exception_hash(et, chunk)];
416 list_for_each_entry (e, slot, hash_list) 594 list_for_each_entry (e, slot, hash_list)
@@ -421,9 +599,9 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et,
421 return NULL; 599 return NULL;
422} 600}
423 601
424static struct dm_snap_exception *alloc_exception(void) 602static struct dm_exception *alloc_completed_exception(void)
425{ 603{
426 struct dm_snap_exception *e; 604 struct dm_exception *e;
427 605
428 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 606 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
429 if (!e) 607 if (!e)
@@ -432,7 +610,7 @@ static struct dm_snap_exception *alloc_exception(void)
432 return e; 610 return e;
433} 611}
434 612
435static void free_exception(struct dm_snap_exception *e) 613static void free_completed_exception(struct dm_exception *e)
436{ 614{
437 kmem_cache_free(exception_cache, e); 615 kmem_cache_free(exception_cache, e);
438} 616}
@@ -457,12 +635,11 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
457 atomic_dec(&s->pending_exceptions_count); 635 atomic_dec(&s->pending_exceptions_count);
458} 636}
459 637
460static void insert_completed_exception(struct dm_snapshot *s, 638static void dm_insert_exception(struct dm_exception_table *eh,
461 struct dm_snap_exception *new_e) 639 struct dm_exception *new_e)
462{ 640{
463 struct exception_table *eh = &s->complete;
464 struct list_head *l; 641 struct list_head *l;
465 struct dm_snap_exception *e = NULL; 642 struct dm_exception *e = NULL;
466 643
467 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 644 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
468 645
@@ -478,7 +655,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
478 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 655 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
479 dm_consecutive_chunk_count(e) + 1)) { 656 dm_consecutive_chunk_count(e) + 1)) {
480 dm_consecutive_chunk_count_inc(e); 657 dm_consecutive_chunk_count_inc(e);
481 free_exception(new_e); 658 free_completed_exception(new_e);
482 return; 659 return;
483 } 660 }
484 661
@@ -488,7 +665,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
488 dm_consecutive_chunk_count_inc(e); 665 dm_consecutive_chunk_count_inc(e);
489 e->old_chunk--; 666 e->old_chunk--;
490 e->new_chunk--; 667 e->new_chunk--;
491 free_exception(new_e); 668 free_completed_exception(new_e);
492 return; 669 return;
493 } 670 }
494 671
@@ -507,9 +684,9 @@ out:
507static int dm_add_exception(void *context, chunk_t old, chunk_t new) 684static int dm_add_exception(void *context, chunk_t old, chunk_t new)
508{ 685{
509 struct dm_snapshot *s = context; 686 struct dm_snapshot *s = context;
510 struct dm_snap_exception *e; 687 struct dm_exception *e;
511 688
512 e = alloc_exception(); 689 e = alloc_completed_exception();
513 if (!e) 690 if (!e)
514 return -ENOMEM; 691 return -ENOMEM;
515 692
@@ -518,11 +695,30 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
518 /* Consecutive_count is implicitly initialised to zero */ 695 /* Consecutive_count is implicitly initialised to zero */
519 e->new_chunk = new; 696 e->new_chunk = new;
520 697
521 insert_completed_exception(s, e); 698 dm_insert_exception(&s->complete, e);
522 699
523 return 0; 700 return 0;
524} 701}
525 702
703#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
704
705/*
706 * Return a minimum chunk size of all snapshots that have the specified origin.
707 * Return zero if the origin has no snapshots.
708 */
709static sector_t __minimum_chunk_size(struct origin *o)
710{
711 struct dm_snapshot *snap;
712 unsigned chunk_size = 0;
713
714 if (o)
715 list_for_each_entry(snap, &o->snapshots, list)
716 chunk_size = min_not_zero(chunk_size,
717 snap->store->chunk_size);
718
719 return chunk_size;
720}
721
526/* 722/*
527 * Hard coded magic. 723 * Hard coded magic.
528 */ 724 */
@@ -546,16 +742,18 @@ static int init_hash_tables(struct dm_snapshot *s)
546 * Calculate based on the size of the original volume or 742 * Calculate based on the size of the original volume or
547 * the COW volume... 743 * the COW volume...
548 */ 744 */
549 cow_dev_size = get_dev_size(s->store->cow->bdev); 745 cow_dev_size = get_dev_size(s->cow->bdev);
550 origin_dev_size = get_dev_size(s->origin->bdev); 746 origin_dev_size = get_dev_size(s->origin->bdev);
551 max_buckets = calc_max_buckets(); 747 max_buckets = calc_max_buckets();
552 748
553 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 749 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
554 hash_size = min(hash_size, max_buckets); 750 hash_size = min(hash_size, max_buckets);
555 751
752 if (hash_size < 64)
753 hash_size = 64;
556 hash_size = rounddown_pow_of_two(hash_size); 754 hash_size = rounddown_pow_of_two(hash_size);
557 if (init_exception_table(&s->complete, hash_size, 755 if (dm_exception_table_init(&s->complete, hash_size,
558 DM_CHUNK_CONSECUTIVE_BITS)) 756 DM_CHUNK_CONSECUTIVE_BITS))
559 return -ENOMEM; 757 return -ENOMEM;
560 758
561 /* 759 /*
@@ -566,14 +764,284 @@ static int init_hash_tables(struct dm_snapshot *s)
566 if (hash_size < 64) 764 if (hash_size < 64)
567 hash_size = 64; 765 hash_size = 64;
568 766
569 if (init_exception_table(&s->pending, hash_size, 0)) { 767 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
570 exit_exception_table(&s->complete, exception_cache); 768 dm_exception_table_exit(&s->complete, exception_cache);
571 return -ENOMEM; 769 return -ENOMEM;
572 } 770 }
573 771
574 return 0; 772 return 0;
575} 773}
576 774
775static void merge_shutdown(struct dm_snapshot *s)
776{
777 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
778 smp_mb__after_clear_bit();
779 wake_up_bit(&s->state_bits, RUNNING_MERGE);
780}
781
782static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
783{
784 s->first_merging_chunk = 0;
785 s->num_merging_chunks = 0;
786
787 return bio_list_get(&s->bios_queued_during_merge);
788}
789
790/*
791 * Remove one chunk from the index of completed exceptions.
792 */
793static int __remove_single_exception_chunk(struct dm_snapshot *s,
794 chunk_t old_chunk)
795{
796 struct dm_exception *e;
797
798 e = dm_lookup_exception(&s->complete, old_chunk);
799 if (!e) {
800 DMERR("Corruption detected: exception for block %llu is "
801 "on disk but not in memory",
802 (unsigned long long)old_chunk);
803 return -EINVAL;
804 }
805
806 /*
807 * If this is the only chunk using this exception, remove exception.
808 */
809 if (!dm_consecutive_chunk_count(e)) {
810 dm_remove_exception(e);
811 free_completed_exception(e);
812 return 0;
813 }
814
815 /*
816 * The chunk may be either at the beginning or the end of a
817 * group of consecutive chunks - never in the middle. We are
818 * removing chunks in the opposite order to that in which they
819 * were added, so this should always be true.
820 * Decrement the consecutive chunk counter and adjust the
821 * starting point if necessary.
822 */
823 if (old_chunk == e->old_chunk) {
824 e->old_chunk++;
825 e->new_chunk++;
826 } else if (old_chunk != e->old_chunk +
827 dm_consecutive_chunk_count(e)) {
828 DMERR("Attempt to merge block %llu from the "
829 "middle of a chunk range [%llu - %llu]",
830 (unsigned long long)old_chunk,
831 (unsigned long long)e->old_chunk,
832 (unsigned long long)
833 e->old_chunk + dm_consecutive_chunk_count(e));
834 return -EINVAL;
835 }
836
837 dm_consecutive_chunk_count_dec(e);
838
839 return 0;
840}
841
842static void flush_bios(struct bio *bio);
843
844static int remove_single_exception_chunk(struct dm_snapshot *s)
845{
846 struct bio *b = NULL;
847 int r;
848 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
849
850 down_write(&s->lock);
851
852 /*
853 * Process chunks (and associated exceptions) in reverse order
854 * so that dm_consecutive_chunk_count_dec() accounting works.
855 */
856 do {
857 r = __remove_single_exception_chunk(s, old_chunk);
858 if (r)
859 goto out;
860 } while (old_chunk-- > s->first_merging_chunk);
861
862 b = __release_queued_bios_after_merge(s);
863
864out:
865 up_write(&s->lock);
866 if (b)
867 flush_bios(b);
868
869 return r;
870}
871
872static int origin_write_extent(struct dm_snapshot *merging_snap,
873 sector_t sector, unsigned chunk_size);
874
875static void merge_callback(int read_err, unsigned long write_err,
876 void *context);
877
878static uint64_t read_pending_exceptions_done_count(void)
879{
880 uint64_t pending_exceptions_done;
881
882 spin_lock(&_pending_exceptions_done_spinlock);
883 pending_exceptions_done = _pending_exceptions_done_count;
884 spin_unlock(&_pending_exceptions_done_spinlock);
885
886 return pending_exceptions_done;
887}
888
889static void increment_pending_exceptions_done_count(void)
890{
891 spin_lock(&_pending_exceptions_done_spinlock);
892 _pending_exceptions_done_count++;
893 spin_unlock(&_pending_exceptions_done_spinlock);
894
895 wake_up_all(&_pending_exceptions_done);
896}
897
898static void snapshot_merge_next_chunks(struct dm_snapshot *s)
899{
900 int i, linear_chunks;
901 chunk_t old_chunk, new_chunk;
902 struct dm_io_region src, dest;
903 sector_t io_size;
904 uint64_t previous_count;
905
906 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
907 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
908 goto shut;
909
910 /*
911 * valid flag never changes during merge, so no lock required.
912 */
913 if (!s->valid) {
914 DMERR("Snapshot is invalid: can't merge");
915 goto shut;
916 }
917
918 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
919 &new_chunk);
920 if (linear_chunks <= 0) {
921 if (linear_chunks < 0) {
922 DMERR("Read error in exception store: "
923 "shutting down merge");
924 down_write(&s->lock);
925 s->merge_failed = 1;
926 up_write(&s->lock);
927 }
928 goto shut;
929 }
930
931 /* Adjust old_chunk and new_chunk to reflect start of linear region */
932 old_chunk = old_chunk + 1 - linear_chunks;
933 new_chunk = new_chunk + 1 - linear_chunks;
934
935 /*
936 * Use one (potentially large) I/O to copy all 'linear_chunks'
937 * from the exception store to the origin
938 */
939 io_size = linear_chunks * s->store->chunk_size;
940
941 dest.bdev = s->origin->bdev;
942 dest.sector = chunk_to_sector(s->store, old_chunk);
943 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
944
945 src.bdev = s->cow->bdev;
946 src.sector = chunk_to_sector(s->store, new_chunk);
947 src.count = dest.count;
948
949 /*
950 * Reallocate any exceptions needed in other snapshots then
951 * wait for the pending exceptions to complete.
952 * Each time any pending exception (globally on the system)
953 * completes we are woken and repeat the process to find out
954 * if we can proceed. While this may not seem a particularly
955 * efficient algorithm, it is not expected to have any
956 * significant impact on performance.
957 */
958 previous_count = read_pending_exceptions_done_count();
959 while (origin_write_extent(s, dest.sector, io_size)) {
960 wait_event(_pending_exceptions_done,
961 (read_pending_exceptions_done_count() !=
962 previous_count));
963 /* Retry after the wait, until all exceptions are done. */
964 previous_count = read_pending_exceptions_done_count();
965 }
966
967 down_write(&s->lock);
968 s->first_merging_chunk = old_chunk;
969 s->num_merging_chunks = linear_chunks;
970 up_write(&s->lock);
971
972 /* Wait until writes to all 'linear_chunks' drain */
973 for (i = 0; i < linear_chunks; i++)
974 __check_for_conflicting_io(s, old_chunk + i);
975
976 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
977 return;
978
979shut:
980 merge_shutdown(s);
981}
982
983static void error_bios(struct bio *bio);
984
985static void merge_callback(int read_err, unsigned long write_err, void *context)
986{
987 struct dm_snapshot *s = context;
988 struct bio *b = NULL;
989
990 if (read_err || write_err) {
991 if (read_err)
992 DMERR("Read error: shutting down merge.");
993 else
994 DMERR("Write error: shutting down merge.");
995 goto shut;
996 }
997
998 if (s->store->type->commit_merge(s->store,
999 s->num_merging_chunks) < 0) {
1000 DMERR("Write error in exception store: shutting down merge");
1001 goto shut;
1002 }
1003
1004 if (remove_single_exception_chunk(s) < 0)
1005 goto shut;
1006
1007 snapshot_merge_next_chunks(s);
1008
1009 return;
1010
1011shut:
1012 down_write(&s->lock);
1013 s->merge_failed = 1;
1014 b = __release_queued_bios_after_merge(s);
1015 up_write(&s->lock);
1016 error_bios(b);
1017
1018 merge_shutdown(s);
1019}
1020
1021static void start_merge(struct dm_snapshot *s)
1022{
1023 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1024 snapshot_merge_next_chunks(s);
1025}
1026
1027static int wait_schedule(void *ptr)
1028{
1029 schedule();
1030
1031 return 0;
1032}
1033
1034/*
1035 * Stop the merging process and wait until it finishes.
1036 */
1037static void stop_merge(struct dm_snapshot *s)
1038{
1039 set_bit(SHUTDOWN_MERGE, &s->state_bits);
1040 wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1041 TASK_UNINTERRUPTIBLE);
1042 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1043}
1044
577/* 1045/*
578 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 1046 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
579 */ 1047 */
@@ -582,50 +1050,73 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
582 struct dm_snapshot *s; 1050 struct dm_snapshot *s;
583 int i; 1051 int i;
584 int r = -EINVAL; 1052 int r = -EINVAL;
585 char *origin_path; 1053 char *origin_path, *cow_path;
586 struct dm_exception_store *store; 1054 unsigned args_used, num_flush_requests = 1;
587 unsigned args_used; 1055 fmode_t origin_mode = FMODE_READ;
588 1056
589 if (argc != 4) { 1057 if (argc != 4) {
590 ti->error = "requires exactly 4 arguments"; 1058 ti->error = "requires exactly 4 arguments";
591 r = -EINVAL; 1059 r = -EINVAL;
592 goto bad_args; 1060 goto bad;
1061 }
1062
1063 if (dm_target_is_snapshot_merge(ti)) {
1064 num_flush_requests = 2;
1065 origin_mode = FMODE_WRITE;
593 } 1066 }
594 1067
595 origin_path = argv[0]; 1068 origin_path = argv[0];
596 argv++; 1069 argv++;
597 argc--; 1070 argc--;
598 1071
599 r = dm_exception_store_create(ti, argc, argv, &args_used, &store); 1072 s = kmalloc(sizeof(*s), GFP_KERNEL);
1073 if (!s) {
1074 ti->error = "Cannot allocate snapshot context private "
1075 "structure";
1076 r = -ENOMEM;
1077 goto bad;
1078 }
1079
1080 cow_path = argv[0];
1081 argv++;
1082 argc--;
1083
1084 r = dm_get_device(ti, cow_path, 0, 0,
1085 FMODE_READ | FMODE_WRITE, &s->cow);
1086 if (r) {
1087 ti->error = "Cannot get COW device";
1088 goto bad_cow;
1089 }
1090
1091 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
600 if (r) { 1092 if (r) {
601 ti->error = "Couldn't create exception store"; 1093 ti->error = "Couldn't create exception store";
602 r = -EINVAL; 1094 r = -EINVAL;
603 goto bad_args; 1095 goto bad_store;
604 } 1096 }
605 1097
606 argv += args_used; 1098 argv += args_used;
607 argc -= args_used; 1099 argc -= args_used;
608 1100
609 s = kmalloc(sizeof(*s), GFP_KERNEL); 1101 r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
610 if (!s) {
611 ti->error = "Cannot allocate snapshot context private "
612 "structure";
613 r = -ENOMEM;
614 goto bad_snap;
615 }
616
617 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
618 if (r) { 1102 if (r) {
619 ti->error = "Cannot get origin device"; 1103 ti->error = "Cannot get origin device";
620 goto bad_origin; 1104 goto bad_origin;
621 } 1105 }
622 1106
623 s->store = store; 1107 s->ti = ti;
624 s->valid = 1; 1108 s->valid = 1;
625 s->active = 0; 1109 s->active = 0;
1110 s->suspended = 0;
626 atomic_set(&s->pending_exceptions_count, 0); 1111 atomic_set(&s->pending_exceptions_count, 0);
627 init_rwsem(&s->lock); 1112 init_rwsem(&s->lock);
1113 INIT_LIST_HEAD(&s->list);
628 spin_lock_init(&s->pe_lock); 1114 spin_lock_init(&s->pe_lock);
1115 s->state_bits = 0;
1116 s->merge_failed = 0;
1117 s->first_merging_chunk = 0;
1118 s->num_merging_chunks = 0;
1119 bio_list_init(&s->bios_queued_during_merge);
629 1120
630 /* Allocate hash table for COW data */ 1121 /* Allocate hash table for COW data */
631 if (init_hash_tables(s)) { 1122 if (init_hash_tables(s)) {
@@ -659,39 +1150,55 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
659 1150
660 spin_lock_init(&s->tracked_chunk_lock); 1151 spin_lock_init(&s->tracked_chunk_lock);
661 1152
662 /* Metadata must only be loaded into one table at once */ 1153 bio_list_init(&s->queued_bios);
1154 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1155
1156 ti->private = s;
1157 ti->num_flush_requests = num_flush_requests;
1158
1159 /* Add snapshot to the list of snapshots for this origin */
1160 /* Exceptions aren't triggered till snapshot_resume() is called */
1161 r = register_snapshot(s);
1162 if (r == -ENOMEM) {
1163 ti->error = "Snapshot origin struct allocation failed";
1164 goto bad_load_and_register;
1165 } else if (r < 0) {
1166 /* invalid handover, register_snapshot has set ti->error */
1167 goto bad_load_and_register;
1168 }
1169
1170 /*
1171 * Metadata must only be loaded into one table at once, so skip this
1172 * if metadata will be handed over during resume.
1173 * Chunk size will be set during the handover - set it to zero to
1174 * ensure it's ignored.
1175 */
1176 if (r > 0) {
1177 s->store->chunk_size = 0;
1178 return 0;
1179 }
1180
663 r = s->store->type->read_metadata(s->store, dm_add_exception, 1181 r = s->store->type->read_metadata(s->store, dm_add_exception,
664 (void *)s); 1182 (void *)s);
665 if (r < 0) { 1183 if (r < 0) {
666 ti->error = "Failed to read snapshot metadata"; 1184 ti->error = "Failed to read snapshot metadata";
667 goto bad_load_and_register; 1185 goto bad_read_metadata;
668 } else if (r > 0) { 1186 } else if (r > 0) {
669 s->valid = 0; 1187 s->valid = 0;
670 DMWARN("Snapshot is marked invalid."); 1188 DMWARN("Snapshot is marked invalid.");
671 } 1189 }
672 1190
673 bio_list_init(&s->queued_bios);
674 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
675
676 if (!s->store->chunk_size) { 1191 if (!s->store->chunk_size) {
677 ti->error = "Chunk size not set"; 1192 ti->error = "Chunk size not set";
678 goto bad_load_and_register; 1193 goto bad_read_metadata;
679 }
680
681 /* Add snapshot to the list of snapshots for this origin */
682 /* Exceptions aren't triggered till snapshot_resume() is called */
683 if (register_snapshot(s)) {
684 r = -EINVAL;
685 ti->error = "Cannot register snapshot origin";
686 goto bad_load_and_register;
687 } 1194 }
688
689 ti->private = s;
690 ti->split_io = s->store->chunk_size; 1195 ti->split_io = s->store->chunk_size;
691 ti->num_flush_requests = 1;
692 1196
693 return 0; 1197 return 0;
694 1198
1199bad_read_metadata:
1200 unregister_snapshot(s);
1201
695bad_load_and_register: 1202bad_load_and_register:
696 mempool_destroy(s->tracked_chunk_pool); 1203 mempool_destroy(s->tracked_chunk_pool);
697 1204
@@ -702,19 +1209,22 @@ bad_pending_pool:
702 dm_kcopyd_client_destroy(s->kcopyd_client); 1209 dm_kcopyd_client_destroy(s->kcopyd_client);
703 1210
704bad_kcopyd: 1211bad_kcopyd:
705 exit_exception_table(&s->pending, pending_cache); 1212 dm_exception_table_exit(&s->pending, pending_cache);
706 exit_exception_table(&s->complete, exception_cache); 1213 dm_exception_table_exit(&s->complete, exception_cache);
707 1214
708bad_hash_tables: 1215bad_hash_tables:
709 dm_put_device(ti, s->origin); 1216 dm_put_device(ti, s->origin);
710 1217
711bad_origin: 1218bad_origin:
712 kfree(s); 1219 dm_exception_store_destroy(s->store);
713 1220
714bad_snap: 1221bad_store:
715 dm_exception_store_destroy(store); 1222 dm_put_device(ti, s->cow);
1223
1224bad_cow:
1225 kfree(s);
716 1226
717bad_args: 1227bad:
718 return r; 1228 return r;
719} 1229}
720 1230
@@ -723,8 +1233,39 @@ static void __free_exceptions(struct dm_snapshot *s)
723 dm_kcopyd_client_destroy(s->kcopyd_client); 1233 dm_kcopyd_client_destroy(s->kcopyd_client);
724 s->kcopyd_client = NULL; 1234 s->kcopyd_client = NULL;
725 1235
726 exit_exception_table(&s->pending, pending_cache); 1236 dm_exception_table_exit(&s->pending, pending_cache);
727 exit_exception_table(&s->complete, exception_cache); 1237 dm_exception_table_exit(&s->complete, exception_cache);
1238}
1239
1240static void __handover_exceptions(struct dm_snapshot *snap_src,
1241 struct dm_snapshot *snap_dest)
1242{
1243 union {
1244 struct dm_exception_table table_swap;
1245 struct dm_exception_store *store_swap;
1246 } u;
1247
1248 /*
1249 * Swap all snapshot context information between the two instances.
1250 */
1251 u.table_swap = snap_dest->complete;
1252 snap_dest->complete = snap_src->complete;
1253 snap_src->complete = u.table_swap;
1254
1255 u.store_swap = snap_dest->store;
1256 snap_dest->store = snap_src->store;
1257 snap_src->store = u.store_swap;
1258
1259 snap_dest->store->snap = snap_dest;
1260 snap_src->store->snap = snap_src;
1261
1262 snap_dest->ti->split_io = snap_dest->store->chunk_size;
1263 snap_dest->valid = snap_src->valid;
1264
1265 /*
1266 * Set source invalid to ensure it receives no further I/O.
1267 */
1268 snap_src->valid = 0;
728} 1269}
729 1270
730static void snapshot_dtr(struct dm_target *ti) 1271static void snapshot_dtr(struct dm_target *ti)
@@ -733,9 +1274,24 @@ static void snapshot_dtr(struct dm_target *ti)
733 int i; 1274 int i;
734#endif 1275#endif
735 struct dm_snapshot *s = ti->private; 1276 struct dm_snapshot *s = ti->private;
1277 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
736 1278
737 flush_workqueue(ksnapd); 1279 flush_workqueue(ksnapd);
738 1280
1281 down_read(&_origins_lock);
1282 /* Check whether exception handover must be cancelled */
1283 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1284 if (snap_src && snap_dest && (s == snap_src)) {
1285 down_write(&snap_dest->lock);
1286 snap_dest->valid = 0;
1287 up_write(&snap_dest->lock);
1288 DMERR("Cancelling snapshot handover.");
1289 }
1290 up_read(&_origins_lock);
1291
1292 if (dm_target_is_snapshot_merge(ti))
1293 stop_merge(s);
1294
739 /* Prevent further origin writes from using this snapshot. */ 1295 /* Prevent further origin writes from using this snapshot. */
740 /* After this returns there can be no new kcopyd jobs. */ 1296 /* After this returns there can be no new kcopyd jobs. */
741 unregister_snapshot(s); 1297 unregister_snapshot(s);
@@ -763,6 +1319,8 @@ static void snapshot_dtr(struct dm_target *ti)
763 1319
764 dm_exception_store_destroy(s->store); 1320 dm_exception_store_destroy(s->store);
765 1321
1322 dm_put_device(ti, s->cow);
1323
766 kfree(s); 1324 kfree(s);
767} 1325}
768 1326
@@ -795,6 +1353,26 @@ static void flush_queued_bios(struct work_struct *work)
795 flush_bios(queued_bios); 1353 flush_bios(queued_bios);
796} 1354}
797 1355
1356static int do_origin(struct dm_dev *origin, struct bio *bio);
1357
1358/*
1359 * Flush a list of buffers.
1360 */
1361static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1362{
1363 struct bio *n;
1364 int r;
1365
1366 while (bio) {
1367 n = bio->bi_next;
1368 bio->bi_next = NULL;
1369 r = do_origin(s->origin, bio);
1370 if (r == DM_MAPIO_REMAPPED)
1371 generic_make_request(bio);
1372 bio = n;
1373 }
1374}
1375
798/* 1376/*
799 * Error a list of buffers. 1377 * Error a list of buffers.
800 */ 1378 */
@@ -825,45 +1403,12 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
825 1403
826 s->valid = 0; 1404 s->valid = 0;
827 1405
828 dm_table_event(s->store->ti->table); 1406 dm_table_event(s->ti->table);
829}
830
831static void get_pending_exception(struct dm_snap_pending_exception *pe)
832{
833 atomic_inc(&pe->ref_count);
834}
835
836static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
837{
838 struct dm_snap_pending_exception *primary_pe;
839 struct bio *origin_bios = NULL;
840
841 primary_pe = pe->primary_pe;
842
843 /*
844 * If this pe is involved in a write to the origin and
845 * it is the last sibling to complete then release
846 * the bios for the original write to the origin.
847 */
848 if (primary_pe &&
849 atomic_dec_and_test(&primary_pe->ref_count)) {
850 origin_bios = bio_list_get(&primary_pe->origin_bios);
851 free_pending_exception(primary_pe);
852 }
853
854 /*
855 * Free the pe if it's not linked to an origin write or if
856 * it's not itself a primary pe.
857 */
858 if (!primary_pe || primary_pe != pe)
859 free_pending_exception(pe);
860
861 return origin_bios;
862} 1407}
863 1408
864static void pending_complete(struct dm_snap_pending_exception *pe, int success) 1409static void pending_complete(struct dm_snap_pending_exception *pe, int success)
865{ 1410{
866 struct dm_snap_exception *e; 1411 struct dm_exception *e;
867 struct dm_snapshot *s = pe->snap; 1412 struct dm_snapshot *s = pe->snap;
868 struct bio *origin_bios = NULL; 1413 struct bio *origin_bios = NULL;
869 struct bio *snapshot_bios = NULL; 1414 struct bio *snapshot_bios = NULL;
@@ -877,7 +1422,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
877 goto out; 1422 goto out;
878 } 1423 }
879 1424
880 e = alloc_exception(); 1425 e = alloc_completed_exception();
881 if (!e) { 1426 if (!e) {
882 down_write(&s->lock); 1427 down_write(&s->lock);
883 __invalidate_snapshot(s, -ENOMEM); 1428 __invalidate_snapshot(s, -ENOMEM);
@@ -888,28 +1433,27 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
888 1433
889 down_write(&s->lock); 1434 down_write(&s->lock);
890 if (!s->valid) { 1435 if (!s->valid) {
891 free_exception(e); 1436 free_completed_exception(e);
892 error = 1; 1437 error = 1;
893 goto out; 1438 goto out;
894 } 1439 }
895 1440
896 /* 1441 /* Check for conflicting reads */
897 * Check for conflicting reads. This is extremely improbable, 1442 __check_for_conflicting_io(s, pe->e.old_chunk);
898 * so msleep(1) is sufficient and there is no need for a wait queue.
899 */
900 while (__chunk_is_tracked(s, pe->e.old_chunk))
901 msleep(1);
902 1443
903 /* 1444 /*
904 * Add a proper exception, and remove the 1445 * Add a proper exception, and remove the
905 * in-flight exception from the list. 1446 * in-flight exception from the list.
906 */ 1447 */
907 insert_completed_exception(s, e); 1448 dm_insert_exception(&s->complete, e);
908 1449
909 out: 1450 out:
910 remove_exception(&pe->e); 1451 dm_remove_exception(&pe->e);
911 snapshot_bios = bio_list_get(&pe->snapshot_bios); 1452 snapshot_bios = bio_list_get(&pe->snapshot_bios);
912 origin_bios = put_pending_exception(pe); 1453 origin_bios = bio_list_get(&pe->origin_bios);
1454 free_pending_exception(pe);
1455
1456 increment_pending_exceptions_done_count();
913 1457
914 up_write(&s->lock); 1458 up_write(&s->lock);
915 1459
@@ -919,7 +1463,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
919 else 1463 else
920 flush_bios(snapshot_bios); 1464 flush_bios(snapshot_bios);
921 1465
922 flush_bios(origin_bios); 1466 retry_origin_bios(s, origin_bios);
923} 1467}
924 1468
925static void commit_callback(void *context, int success) 1469static void commit_callback(void *context, int success)
@@ -963,7 +1507,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
963 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 1507 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
964 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 1508 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
965 1509
966 dest.bdev = s->store->cow->bdev; 1510 dest.bdev = s->cow->bdev;
967 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 1511 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
968 dest.count = src.count; 1512 dest.count = src.count;
969 1513
@@ -975,7 +1519,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
975static struct dm_snap_pending_exception * 1519static struct dm_snap_pending_exception *
976__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 1520__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
977{ 1521{
978 struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); 1522 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
979 1523
980 if (!e) 1524 if (!e)
981 return NULL; 1525 return NULL;
@@ -1006,8 +1550,6 @@ __find_pending_exception(struct dm_snapshot *s,
1006 pe->e.old_chunk = chunk; 1550 pe->e.old_chunk = chunk;
1007 bio_list_init(&pe->origin_bios); 1551 bio_list_init(&pe->origin_bios);
1008 bio_list_init(&pe->snapshot_bios); 1552 bio_list_init(&pe->snapshot_bios);
1009 pe->primary_pe = NULL;
1010 atomic_set(&pe->ref_count, 0);
1011 pe->started = 0; 1553 pe->started = 0;
1012 1554
1013 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1555 if (s->store->type->prepare_exception(s->store, &pe->e)) {
@@ -1015,16 +1557,15 @@ __find_pending_exception(struct dm_snapshot *s,
1015 return NULL; 1557 return NULL;
1016 } 1558 }
1017 1559
1018 get_pending_exception(pe); 1560 dm_insert_exception(&s->pending, &pe->e);
1019 insert_exception(&s->pending, &pe->e);
1020 1561
1021 return pe; 1562 return pe;
1022} 1563}
1023 1564
1024static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1565static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1025 struct bio *bio, chunk_t chunk) 1566 struct bio *bio, chunk_t chunk)
1026{ 1567{
1027 bio->bi_bdev = s->store->cow->bdev; 1568 bio->bi_bdev = s->cow->bdev;
1028 bio->bi_sector = chunk_to_sector(s->store, 1569 bio->bi_sector = chunk_to_sector(s->store,
1029 dm_chunk_number(e->new_chunk) + 1570 dm_chunk_number(e->new_chunk) +
1030 (chunk - e->old_chunk)) + 1571 (chunk - e->old_chunk)) +
@@ -1035,14 +1576,14 @@ static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
1035static int snapshot_map(struct dm_target *ti, struct bio *bio, 1576static int snapshot_map(struct dm_target *ti, struct bio *bio,
1036 union map_info *map_context) 1577 union map_info *map_context)
1037{ 1578{
1038 struct dm_snap_exception *e; 1579 struct dm_exception *e;
1039 struct dm_snapshot *s = ti->private; 1580 struct dm_snapshot *s = ti->private;
1040 int r = DM_MAPIO_REMAPPED; 1581 int r = DM_MAPIO_REMAPPED;
1041 chunk_t chunk; 1582 chunk_t chunk;
1042 struct dm_snap_pending_exception *pe = NULL; 1583 struct dm_snap_pending_exception *pe = NULL;
1043 1584
1044 if (unlikely(bio_empty_barrier(bio))) { 1585 if (unlikely(bio_empty_barrier(bio))) {
1045 bio->bi_bdev = s->store->cow->bdev; 1586 bio->bi_bdev = s->cow->bdev;
1046 return DM_MAPIO_REMAPPED; 1587 return DM_MAPIO_REMAPPED;
1047 } 1588 }
1048 1589
@@ -1063,7 +1604,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1063 } 1604 }
1064 1605
1065 /* If the block is already remapped - use that, else remap it */ 1606 /* If the block is already remapped - use that, else remap it */
1066 e = lookup_exception(&s->complete, chunk); 1607 e = dm_lookup_exception(&s->complete, chunk);
1067 if (e) { 1608 if (e) {
1068 remap_exception(s, e, bio, chunk); 1609 remap_exception(s, e, bio, chunk);
1069 goto out_unlock; 1610 goto out_unlock;
@@ -1087,7 +1628,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1087 goto out_unlock; 1628 goto out_unlock;
1088 } 1629 }
1089 1630
1090 e = lookup_exception(&s->complete, chunk); 1631 e = dm_lookup_exception(&s->complete, chunk);
1091 if (e) { 1632 if (e) {
1092 free_pending_exception(pe); 1633 free_pending_exception(pe);
1093 remap_exception(s, e, bio, chunk); 1634 remap_exception(s, e, bio, chunk);
@@ -1125,6 +1666,78 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1125 return r; 1666 return r;
1126} 1667}
1127 1668
1669/*
1670 * A snapshot-merge target behaves like a combination of a snapshot
1671 * target and a snapshot-origin target. It only generates new
1672 * exceptions in other snapshots and not in the one that is being
1673 * merged.
1674 *
1675 * For each chunk, if there is an existing exception, it is used to
1676 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1677 * which in turn might generate exceptions in other snapshots.
1678 * If merging is currently taking place on the chunk in question, the
1679 * I/O is deferred by adding it to s->bios_queued_during_merge.
1680 */
1681static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1682 union map_info *map_context)
1683{
1684 struct dm_exception *e;
1685 struct dm_snapshot *s = ti->private;
1686 int r = DM_MAPIO_REMAPPED;
1687 chunk_t chunk;
1688
1689 if (unlikely(bio_empty_barrier(bio))) {
1690 if (!map_context->flush_request)
1691 bio->bi_bdev = s->origin->bdev;
1692 else
1693 bio->bi_bdev = s->cow->bdev;
1694 map_context->ptr = NULL;
1695 return DM_MAPIO_REMAPPED;
1696 }
1697
1698 chunk = sector_to_chunk(s->store, bio->bi_sector);
1699
1700 down_write(&s->lock);
1701
1702 /* Full merging snapshots are redirected to the origin */
1703 if (!s->valid)
1704 goto redirect_to_origin;
1705
1706 /* If the block is already remapped - use that */
1707 e = dm_lookup_exception(&s->complete, chunk);
1708 if (e) {
1709 /* Queue writes overlapping with chunks being merged */
1710 if (bio_rw(bio) == WRITE &&
1711 chunk >= s->first_merging_chunk &&
1712 chunk < (s->first_merging_chunk +
1713 s->num_merging_chunks)) {
1714 bio->bi_bdev = s->origin->bdev;
1715 bio_list_add(&s->bios_queued_during_merge, bio);
1716 r = DM_MAPIO_SUBMITTED;
1717 goto out_unlock;
1718 }
1719
1720 remap_exception(s, e, bio, chunk);
1721
1722 if (bio_rw(bio) == WRITE)
1723 map_context->ptr = track_chunk(s, chunk);
1724 goto out_unlock;
1725 }
1726
1727redirect_to_origin:
1728 bio->bi_bdev = s->origin->bdev;
1729
1730 if (bio_rw(bio) == WRITE) {
1731 up_write(&s->lock);
1732 return do_origin(s->origin, bio);
1733 }
1734
1735out_unlock:
1736 up_write(&s->lock);
1737
1738 return r;
1739}
1740
1128static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1741static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1129 int error, union map_info *map_context) 1742 int error, union map_info *map_context)
1130{ 1743{
@@ -1137,40 +1750,135 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1137 return 0; 1750 return 0;
1138} 1751}
1139 1752
1753static void snapshot_merge_presuspend(struct dm_target *ti)
1754{
1755 struct dm_snapshot *s = ti->private;
1756
1757 stop_merge(s);
1758}
1759
1760static void snapshot_postsuspend(struct dm_target *ti)
1761{
1762 struct dm_snapshot *s = ti->private;
1763
1764 down_write(&s->lock);
1765 s->suspended = 1;
1766 up_write(&s->lock);
1767}
1768
1769static int snapshot_preresume(struct dm_target *ti)
1770{
1771 int r = 0;
1772 struct dm_snapshot *s = ti->private;
1773 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1774
1775 down_read(&_origins_lock);
1776 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1777 if (snap_src && snap_dest) {
1778 down_read(&snap_src->lock);
1779 if (s == snap_src) {
1780 DMERR("Unable to resume snapshot source until "
1781 "handover completes.");
1782 r = -EINVAL;
1783 } else if (!snap_src->suspended) {
1784 DMERR("Unable to perform snapshot handover until "
1785 "source is suspended.");
1786 r = -EINVAL;
1787 }
1788 up_read(&snap_src->lock);
1789 }
1790 up_read(&_origins_lock);
1791
1792 return r;
1793}
1794
1140static void snapshot_resume(struct dm_target *ti) 1795static void snapshot_resume(struct dm_target *ti)
1141{ 1796{
1142 struct dm_snapshot *s = ti->private; 1797 struct dm_snapshot *s = ti->private;
1798 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1799
1800 down_read(&_origins_lock);
1801 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1802 if (snap_src && snap_dest) {
1803 down_write(&snap_src->lock);
1804 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1805 __handover_exceptions(snap_src, snap_dest);
1806 up_write(&snap_dest->lock);
1807 up_write(&snap_src->lock);
1808 }
1809 up_read(&_origins_lock);
1810
1811 /* Now we have correct chunk size, reregister */
1812 reregister_snapshot(s);
1143 1813
1144 down_write(&s->lock); 1814 down_write(&s->lock);
1145 s->active = 1; 1815 s->active = 1;
1816 s->suspended = 0;
1146 up_write(&s->lock); 1817 up_write(&s->lock);
1147} 1818}
1148 1819
1820static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1821{
1822 sector_t min_chunksize;
1823
1824 down_read(&_origins_lock);
1825 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1826 up_read(&_origins_lock);
1827
1828 return min_chunksize;
1829}
1830
1831static void snapshot_merge_resume(struct dm_target *ti)
1832{
1833 struct dm_snapshot *s = ti->private;
1834
1835 /*
1836 * Handover exceptions from existing snapshot.
1837 */
1838 snapshot_resume(ti);
1839
1840 /*
1841 * snapshot-merge acts as an origin, so set ti->split_io
1842 */
1843 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1844
1845 start_merge(s);
1846}
1847
1149static int snapshot_status(struct dm_target *ti, status_type_t type, 1848static int snapshot_status(struct dm_target *ti, status_type_t type,
1150 char *result, unsigned int maxlen) 1849 char *result, unsigned int maxlen)
1151{ 1850{
1152 unsigned sz = 0; 1851 unsigned sz = 0;
1153 struct dm_snapshot *snap = ti->private; 1852 struct dm_snapshot *snap = ti->private;
1154 1853
1155 down_write(&snap->lock);
1156
1157 switch (type) { 1854 switch (type) {
1158 case STATUSTYPE_INFO: 1855 case STATUSTYPE_INFO:
1856
1857 down_write(&snap->lock);
1858
1159 if (!snap->valid) 1859 if (!snap->valid)
1160 DMEMIT("Invalid"); 1860 DMEMIT("Invalid");
1861 else if (snap->merge_failed)
1862 DMEMIT("Merge failed");
1161 else { 1863 else {
1162 if (snap->store->type->fraction_full) { 1864 if (snap->store->type->usage) {
1163 sector_t numerator, denominator; 1865 sector_t total_sectors, sectors_allocated,
1164 snap->store->type->fraction_full(snap->store, 1866 metadata_sectors;
1165 &numerator, 1867 snap->store->type->usage(snap->store,
1166 &denominator); 1868 &total_sectors,
1167 DMEMIT("%llu/%llu", 1869 &sectors_allocated,
1168 (unsigned long long)numerator, 1870 &metadata_sectors);
1169 (unsigned long long)denominator); 1871 DMEMIT("%llu/%llu %llu",
1872 (unsigned long long)sectors_allocated,
1873 (unsigned long long)total_sectors,
1874 (unsigned long long)metadata_sectors);
1170 } 1875 }
1171 else 1876 else
1172 DMEMIT("Unknown"); 1877 DMEMIT("Unknown");
1173 } 1878 }
1879
1880 up_write(&snap->lock);
1881
1174 break; 1882 break;
1175 1883
1176 case STATUSTYPE_TABLE: 1884 case STATUSTYPE_TABLE:
@@ -1179,14 +1887,12 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1179 * to make private copies if the output is to 1887 * to make private copies if the output is to
1180 * make sense. 1888 * make sense.
1181 */ 1889 */
1182 DMEMIT("%s", snap->origin->name); 1890 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1183 snap->store->type->status(snap->store, type, result + sz, 1891 snap->store->type->status(snap->store, type, result + sz,
1184 maxlen - sz); 1892 maxlen - sz);
1185 break; 1893 break;
1186 } 1894 }
1187 1895
1188 up_write(&snap->lock);
1189
1190 return 0; 1896 return 0;
1191} 1897}
1192 1898
@@ -1202,17 +1908,36 @@ static int snapshot_iterate_devices(struct dm_target *ti,
1202/*----------------------------------------------------------------- 1908/*-----------------------------------------------------------------
1203 * Origin methods 1909 * Origin methods
1204 *---------------------------------------------------------------*/ 1910 *---------------------------------------------------------------*/
1205static int __origin_write(struct list_head *snapshots, struct bio *bio) 1911
1912/*
1913 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1914 * supplied bio was ignored. The caller may submit it immediately.
1915 * (No remapping actually occurs as the origin is always a direct linear
1916 * map.)
1917 *
1918 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1919 * and any supplied bio is added to a list to be submitted once all
1920 * the necessary exceptions exist.
1921 */
1922static int __origin_write(struct list_head *snapshots, sector_t sector,
1923 struct bio *bio)
1206{ 1924{
1207 int r = DM_MAPIO_REMAPPED, first = 0; 1925 int r = DM_MAPIO_REMAPPED;
1208 struct dm_snapshot *snap; 1926 struct dm_snapshot *snap;
1209 struct dm_snap_exception *e; 1927 struct dm_exception *e;
1210 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1928 struct dm_snap_pending_exception *pe;
1929 struct dm_snap_pending_exception *pe_to_start_now = NULL;
1930 struct dm_snap_pending_exception *pe_to_start_last = NULL;
1211 chunk_t chunk; 1931 chunk_t chunk;
1212 LIST_HEAD(pe_queue);
1213 1932
1214 /* Do all the snapshots on this origin */ 1933 /* Do all the snapshots on this origin */
1215 list_for_each_entry (snap, snapshots, list) { 1934 list_for_each_entry (snap, snapshots, list) {
1935 /*
1936 * Don't make new exceptions in a merging snapshot
1937 * because it has effectively been deleted
1938 */
1939 if (dm_target_is_snapshot_merge(snap->ti))
1940 continue;
1216 1941
1217 down_write(&snap->lock); 1942 down_write(&snap->lock);
1218 1943
@@ -1221,24 +1946,21 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1221 goto next_snapshot; 1946 goto next_snapshot;
1222 1947
1223 /* Nothing to do if writing beyond end of snapshot */ 1948 /* Nothing to do if writing beyond end of snapshot */
1224 if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) 1949 if (sector >= dm_table_get_size(snap->ti->table))
1225 goto next_snapshot; 1950 goto next_snapshot;
1226 1951
1227 /* 1952 /*
1228 * Remember, different snapshots can have 1953 * Remember, different snapshots can have
1229 * different chunk sizes. 1954 * different chunk sizes.
1230 */ 1955 */
1231 chunk = sector_to_chunk(snap->store, bio->bi_sector); 1956 chunk = sector_to_chunk(snap->store, sector);
1232 1957
1233 /* 1958 /*
1234 * Check exception table to see if block 1959 * Check exception table to see if block
1235 * is already remapped in this snapshot 1960 * is already remapped in this snapshot
1236 * and trigger an exception if not. 1961 * and trigger an exception if not.
1237 *
1238 * ref_count is initialised to 1 so pending_complete()
1239 * won't destroy the primary_pe while we're inside this loop.
1240 */ 1962 */
1241 e = lookup_exception(&snap->complete, chunk); 1963 e = dm_lookup_exception(&snap->complete, chunk);
1242 if (e) 1964 if (e)
1243 goto next_snapshot; 1965 goto next_snapshot;
1244 1966
@@ -1253,7 +1975,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1253 goto next_snapshot; 1975 goto next_snapshot;
1254 } 1976 }
1255 1977
1256 e = lookup_exception(&snap->complete, chunk); 1978 e = dm_lookup_exception(&snap->complete, chunk);
1257 if (e) { 1979 if (e) {
1258 free_pending_exception(pe); 1980 free_pending_exception(pe);
1259 goto next_snapshot; 1981 goto next_snapshot;
@@ -1266,59 +1988,43 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1266 } 1988 }
1267 } 1989 }
1268 1990
1269 if (!primary_pe) { 1991 r = DM_MAPIO_SUBMITTED;
1270 /*
1271 * Either every pe here has same
1272 * primary_pe or none has one yet.
1273 */
1274 if (pe->primary_pe)
1275 primary_pe = pe->primary_pe;
1276 else {
1277 primary_pe = pe;
1278 first = 1;
1279 }
1280
1281 bio_list_add(&primary_pe->origin_bios, bio);
1282 1992
1283 r = DM_MAPIO_SUBMITTED; 1993 /*
1284 } 1994 * If an origin bio was supplied, queue it to wait for the
1995 * completion of this exception, and start this one last,
1996 * at the end of the function.
1997 */
1998 if (bio) {
1999 bio_list_add(&pe->origin_bios, bio);
2000 bio = NULL;
1285 2001
1286 if (!pe->primary_pe) { 2002 if (!pe->started) {
1287 pe->primary_pe = primary_pe; 2003 pe->started = 1;
1288 get_pending_exception(primary_pe); 2004 pe_to_start_last = pe;
2005 }
1289 } 2006 }
1290 2007
1291 if (!pe->started) { 2008 if (!pe->started) {
1292 pe->started = 1; 2009 pe->started = 1;
1293 list_add_tail(&pe->list, &pe_queue); 2010 pe_to_start_now = pe;
1294 } 2011 }
1295 2012
1296 next_snapshot: 2013 next_snapshot:
1297 up_write(&snap->lock); 2014 up_write(&snap->lock);
1298 }
1299 2015
1300 if (!primary_pe) 2016 if (pe_to_start_now) {
1301 return r; 2017 start_copy(pe_to_start_now);
1302 2018 pe_to_start_now = NULL;
1303 /* 2019 }
1304 * If this is the first time we're processing this chunk and
1305 * ref_count is now 1 it means all the pending exceptions
1306 * got completed while we were in the loop above, so it falls to
1307 * us here to remove the primary_pe and submit any origin_bios.
1308 */
1309
1310 if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1311 flush_bios(bio_list_get(&primary_pe->origin_bios));
1312 free_pending_exception(primary_pe);
1313 /* If we got here, pe_queue is necessarily empty. */
1314 return r;
1315 } 2020 }
1316 2021
1317 /* 2022 /*
1318 * Now that we have a complete pe list we can start the copying. 2023 * Submit the exception against which the bio is queued last,
2024 * to give the other exceptions a head start.
1319 */ 2025 */
1320 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 2026 if (pe_to_start_last)
1321 start_copy(pe); 2027 start_copy(pe_to_start_last);
1322 2028
1323 return r; 2029 return r;
1324} 2030}
@@ -1334,13 +2040,48 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
1334 down_read(&_origins_lock); 2040 down_read(&_origins_lock);
1335 o = __lookup_origin(origin->bdev); 2041 o = __lookup_origin(origin->bdev);
1336 if (o) 2042 if (o)
1337 r = __origin_write(&o->snapshots, bio); 2043 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
1338 up_read(&_origins_lock); 2044 up_read(&_origins_lock);
1339 2045
1340 return r; 2046 return r;
1341} 2047}
1342 2048
1343/* 2049/*
2050 * Trigger exceptions in all non-merging snapshots.
2051 *
2052 * The chunk size of the merging snapshot may be larger than the chunk
2053 * size of some other snapshot so we may need to reallocate multiple
2054 * chunks in other snapshots.
2055 *
2056 * We scan all the overlapping exceptions in the other snapshots.
2057 * Returns 1 if anything was reallocated and must be waited for,
2058 * otherwise returns 0.
2059 *
2060 * size must be a multiple of merging_snap's chunk_size.
2061 */
2062static int origin_write_extent(struct dm_snapshot *merging_snap,
2063 sector_t sector, unsigned size)
2064{
2065 int must_wait = 0;
2066 sector_t n;
2067 struct origin *o;
2068
2069 /*
2070 * The origin's __minimum_chunk_size() got stored in split_io
2071 * by snapshot_merge_resume().
2072 */
2073 down_read(&_origins_lock);
2074 o = __lookup_origin(merging_snap->origin->bdev);
2075 for (n = 0; n < size; n += merging_snap->ti->split_io)
2076 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2077 DM_MAPIO_SUBMITTED)
2078 must_wait = 1;
2079 up_read(&_origins_lock);
2080
2081 return must_wait;
2082}
2083
2084/*
1344 * Origin: maps a linear range of a device, with hooks for snapshotting. 2085 * Origin: maps a linear range of a device, with hooks for snapshotting.
1345 */ 2086 */
1346 2087
@@ -1391,8 +2132,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
1391 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 2132 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1392} 2133}
1393 2134
1394#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1395
1396/* 2135/*
1397 * Set the target "split_io" field to the minimum of all the snapshots' 2136 * Set the target "split_io" field to the minimum of all the snapshots'
1398 * chunk sizes. 2137 * chunk sizes.
@@ -1400,19 +2139,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
1400static void origin_resume(struct dm_target *ti) 2139static void origin_resume(struct dm_target *ti)
1401{ 2140{
1402 struct dm_dev *dev = ti->private; 2141 struct dm_dev *dev = ti->private;
1403 struct dm_snapshot *snap;
1404 struct origin *o;
1405 unsigned chunk_size = 0;
1406
1407 down_read(&_origins_lock);
1408 o = __lookup_origin(dev->bdev);
1409 if (o)
1410 list_for_each_entry (snap, &o->snapshots, list)
1411 chunk_size = min_not_zero(chunk_size,
1412 snap->store->chunk_size);
1413 up_read(&_origins_lock);
1414 2142
1415 ti->split_io = chunk_size; 2143 ti->split_io = get_origin_minimum_chunksize(dev->bdev);
1416} 2144}
1417 2145
1418static int origin_status(struct dm_target *ti, status_type_t type, char *result, 2146static int origin_status(struct dm_target *ti, status_type_t type, char *result,
@@ -1455,17 +2183,35 @@ static struct target_type origin_target = {
1455 2183
1456static struct target_type snapshot_target = { 2184static struct target_type snapshot_target = {
1457 .name = "snapshot", 2185 .name = "snapshot",
1458 .version = {1, 7, 0}, 2186 .version = {1, 9, 0},
1459 .module = THIS_MODULE, 2187 .module = THIS_MODULE,
1460 .ctr = snapshot_ctr, 2188 .ctr = snapshot_ctr,
1461 .dtr = snapshot_dtr, 2189 .dtr = snapshot_dtr,
1462 .map = snapshot_map, 2190 .map = snapshot_map,
1463 .end_io = snapshot_end_io, 2191 .end_io = snapshot_end_io,
2192 .postsuspend = snapshot_postsuspend,
2193 .preresume = snapshot_preresume,
1464 .resume = snapshot_resume, 2194 .resume = snapshot_resume,
1465 .status = snapshot_status, 2195 .status = snapshot_status,
1466 .iterate_devices = snapshot_iterate_devices, 2196 .iterate_devices = snapshot_iterate_devices,
1467}; 2197};
1468 2198
2199static struct target_type merge_target = {
2200 .name = dm_snapshot_merge_target_name,
2201 .version = {1, 0, 0},
2202 .module = THIS_MODULE,
2203 .ctr = snapshot_ctr,
2204 .dtr = snapshot_dtr,
2205 .map = snapshot_merge_map,
2206 .end_io = snapshot_end_io,
2207 .presuspend = snapshot_merge_presuspend,
2208 .postsuspend = snapshot_postsuspend,
2209 .preresume = snapshot_preresume,
2210 .resume = snapshot_merge_resume,
2211 .status = snapshot_status,
2212 .iterate_devices = snapshot_iterate_devices,
2213};
2214
1469static int __init dm_snapshot_init(void) 2215static int __init dm_snapshot_init(void)
1470{ 2216{
1471 int r; 2217 int r;
@@ -1477,7 +2223,7 @@ static int __init dm_snapshot_init(void)
1477 } 2223 }
1478 2224
1479 r = dm_register_target(&snapshot_target); 2225 r = dm_register_target(&snapshot_target);
1480 if (r) { 2226 if (r < 0) {
1481 DMERR("snapshot target register failed %d", r); 2227 DMERR("snapshot target register failed %d", r);
1482 goto bad_register_snapshot_target; 2228 goto bad_register_snapshot_target;
1483 } 2229 }
@@ -1485,34 +2231,40 @@ static int __init dm_snapshot_init(void)
1485 r = dm_register_target(&origin_target); 2231 r = dm_register_target(&origin_target);
1486 if (r < 0) { 2232 if (r < 0) {
1487 DMERR("Origin target register failed %d", r); 2233 DMERR("Origin target register failed %d", r);
1488 goto bad1; 2234 goto bad_register_origin_target;
2235 }
2236
2237 r = dm_register_target(&merge_target);
2238 if (r < 0) {
2239 DMERR("Merge target register failed %d", r);
2240 goto bad_register_merge_target;
1489 } 2241 }
1490 2242
1491 r = init_origin_hash(); 2243 r = init_origin_hash();
1492 if (r) { 2244 if (r) {
1493 DMERR("init_origin_hash failed."); 2245 DMERR("init_origin_hash failed.");
1494 goto bad2; 2246 goto bad_origin_hash;
1495 } 2247 }
1496 2248
1497 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 2249 exception_cache = KMEM_CACHE(dm_exception, 0);
1498 if (!exception_cache) { 2250 if (!exception_cache) {
1499 DMERR("Couldn't create exception cache."); 2251 DMERR("Couldn't create exception cache.");
1500 r = -ENOMEM; 2252 r = -ENOMEM;
1501 goto bad3; 2253 goto bad_exception_cache;
1502 } 2254 }
1503 2255
1504 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 2256 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1505 if (!pending_cache) { 2257 if (!pending_cache) {
1506 DMERR("Couldn't create pending cache."); 2258 DMERR("Couldn't create pending cache.");
1507 r = -ENOMEM; 2259 r = -ENOMEM;
1508 goto bad4; 2260 goto bad_pending_cache;
1509 } 2261 }
1510 2262
1511 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 2263 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
1512 if (!tracked_chunk_cache) { 2264 if (!tracked_chunk_cache) {
1513 DMERR("Couldn't create cache to track chunks in use."); 2265 DMERR("Couldn't create cache to track chunks in use.");
1514 r = -ENOMEM; 2266 r = -ENOMEM;
1515 goto bad5; 2267 goto bad_tracked_chunk_cache;
1516 } 2268 }
1517 2269
1518 ksnapd = create_singlethread_workqueue("ksnapd"); 2270 ksnapd = create_singlethread_workqueue("ksnapd");
@@ -1526,19 +2278,21 @@ static int __init dm_snapshot_init(void)
1526 2278
1527bad_pending_pool: 2279bad_pending_pool:
1528 kmem_cache_destroy(tracked_chunk_cache); 2280 kmem_cache_destroy(tracked_chunk_cache);
1529bad5: 2281bad_tracked_chunk_cache:
1530 kmem_cache_destroy(pending_cache); 2282 kmem_cache_destroy(pending_cache);
1531bad4: 2283bad_pending_cache:
1532 kmem_cache_destroy(exception_cache); 2284 kmem_cache_destroy(exception_cache);
1533bad3: 2285bad_exception_cache:
1534 exit_origin_hash(); 2286 exit_origin_hash();
1535bad2: 2287bad_origin_hash:
2288 dm_unregister_target(&merge_target);
2289bad_register_merge_target:
1536 dm_unregister_target(&origin_target); 2290 dm_unregister_target(&origin_target);
1537bad1: 2291bad_register_origin_target:
1538 dm_unregister_target(&snapshot_target); 2292 dm_unregister_target(&snapshot_target);
1539
1540bad_register_snapshot_target: 2293bad_register_snapshot_target:
1541 dm_exception_store_exit(); 2294 dm_exception_store_exit();
2295
1542 return r; 2296 return r;
1543} 2297}
1544 2298
@@ -1548,6 +2302,7 @@ static void __exit dm_snapshot_exit(void)
1548 2302
1549 dm_unregister_target(&snapshot_target); 2303 dm_unregister_target(&snapshot_target);
1550 dm_unregister_target(&origin_target); 2304 dm_unregister_target(&origin_target);
2305 dm_unregister_target(&merge_target);
1551 2306
1552 exit_origin_hash(); 2307 exit_origin_hash();
1553 kmem_cache_destroy(pending_cache); 2308 kmem_cache_destroy(pending_cache);