From eccf081799be8d83852f183838bf26e1ca099db4 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Mon, 27 Mar 2006 01:17:42 -0800 Subject: [PATCH] device-mapper snapshot: fix origin_write pending_exception submission Say you have several snapshots of the same origin and then you issue a write to some place in the origin for the first time. Before the device-mapper snapshot target lets the write go through to the underlying device, it needs to make a copy of the data that is about to be overwritten. Each snapshot is independent, so it makes one copy for each snapshot. __origin_write() loops through each snapshot and checks to see whether a copy is needed for that snapshot. (A copy is only needed the first time that data changes.) If a copy is needed, the code allocates a 'pending_exception' structure holding the details. It links these together for all the snapshots, then works its way through this list and submits the copying requests to the kcopyd thread by calling start_copy(). When each request is completed, the original pending_exception structure gets freed in pending_complete(). If you're very unlucky, this structure can get freed *before* the submission process has finished walking the list. This patch: 1) Creates a new temporary list pe_queue to hold the pending exception structures; 2) Does all the bookkeeping up-front, then walks through the new list safely and calls start_copy() for each pending_exception that needed it; 3) Avoids attempting to add pe->siblings to the list if it's already connected. [NB This does not fix all the races in this code. More patches will follow.] Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-snap.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) (limited to 'drivers/md/dm-snap.c') diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 7401540086df..874f145431d8 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -48,6 +48,11 @@ struct pending_exception { struct bio_list origin_bios; struct bio_list snapshot_bios; + /* + * Short-term queue of pending exceptions prior to submission. + */ + struct list_head list; + /* * Other pending_exceptions that are processing this * chunk. When this list is empty, we know we can @@ -930,8 +935,9 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) int r = 1, first = 1; struct dm_snapshot *snap; struct exception *e; - struct pending_exception *pe, *last = NULL; + struct pending_exception *pe, *next_pe, *last = NULL; chunk_t chunk; + LIST_HEAD(pe_queue); /* Do all the snapshots on this origin */ list_for_each_entry (snap, snapshots, list) { @@ -965,12 +971,19 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) snap->valid = 0; } else { - if (last) + if (first) { + bio_list_add(&pe->origin_bios, bio); + r = 0; + first = 0; + } + if (last && list_empty(&pe->siblings)) list_merge(&pe->siblings, &last->siblings); - + if (!pe->started) { + pe->started = 1; + list_add_tail(&pe->list, &pe_queue); + } last = pe; - r = 0; } } @@ -980,24 +993,8 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) /* * Now that we have a complete pe list we can start the copying. */ - if (last) { - pe = last; - do { - down_write(&pe->snap->lock); - if (first) - bio_list_add(&pe->origin_bios, bio); - if (!pe->started) { - pe->started = 1; - up_write(&pe->snap->lock); - start_copy(pe); - } else - up_write(&pe->snap->lock); - first = 0; - pe = list_entry(pe->siblings.next, - struct pending_exception, siblings); - - } while (pe != last); - } + list_for_each_entry_safe(pe, next_pe, &pe_queue, list) + start_copy(pe); return r; } -- cgit v1.2.2 From b4b610f684d13bf8691feeae5d4d7a8bd1f1033e Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Mon, 27 Mar 2006 01:17:44 -0800 Subject: [PATCH] device-mapper snapshot: replace sibling list The siblings "list" is used unsafely at the moment. Firstly, only the element on the list being changed gets locked (via the snapshot lock), not the next and previous elements which have pointers that are also being changed. Secondly, if you have two or more snapshots and write to the same chunk a second time before every snapshot has finished making its private copy of the data, if you're unlucky, _origin_write() could attempt its list_merge() and dereference a 'last' pointer to a pending_exception structure that has just been freed. Analysis reveals that the list is actually only there for reference counting. If 5 pending_exceptions are needed in origin_write, then the 5 are joined together into a 5-element list - without a separate list head because there's nowhere suitable to store it. As the pending_exceptions complete, they are removed from the list one-by-one and any contents of origin_bios get moved across to one of the remaining pending_exceptions on the list. Whichever one is last is detected because list_empty() is then true and the origin_bios get submitted. The fix proposed here uses an alternative reference counting mechanism by choosing one of the pending_exceptions as primary and maintaining an atomic counter there. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-snap.c | 122 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 80 insertions(+), 42 deletions(-) (limited to 'drivers/md/dm-snap.c') diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 874f145431d8..475514bda9d0 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -54,11 +54,21 @@ struct pending_exception { struct list_head list; /* - * Other pending_exceptions that are processing this - * chunk. When this list is empty, we know we can - * complete the origins. + * The primary pending_exception is the one that holds + * the sibling_count and the list of origin_bios for a + * group of pending_exceptions. It is always last to get freed. + * These fields get set up when writing to the origin. */ - struct list_head siblings; + struct pending_exception *primary_pe; + + /* + * Number of pending_exceptions processing this chunk. + * When this drops to zero we must complete the origin bios. + * If incrementing or decrementing this, hold pe->snap->lock for + * the sibling concerned and not pe->primary_pe->snap->lock unless + * they are the same. + */ + atomic_t sibling_count; /* Pointer back to snapshot context */ struct dm_snapshot *snap; @@ -593,20 +603,15 @@ static void error_bios(struct bio *bio) static struct bio *__flush_bios(struct pending_exception *pe) { - struct pending_exception *sibling; - - if (list_empty(&pe->siblings)) - return bio_list_get(&pe->origin_bios); - - sibling = list_entry(pe->siblings.next, - struct pending_exception, siblings); - - list_del(&pe->siblings); - - /* This is fine as long as kcopyd is single-threaded. If kcopyd - * becomes multi-threaded, we'll need some locking here. + /* + * If this pe is involved in a write to the origin and + * it is the last sibling to complete then release + * the bios for the original write to the origin. */ - bio_list_merge(&sibling->origin_bios, &pe->origin_bios); + + if (pe->primary_pe && + atomic_dec_and_test(&pe->primary_pe->sibling_count)) + return bio_list_get(&pe->primary_pe->origin_bios); return NULL; } @@ -614,6 +619,7 @@ static struct bio *__flush_bios(struct pending_exception *pe) static void pending_complete(struct pending_exception *pe, int success) { struct exception *e; + struct pending_exception *primary_pe; struct dm_snapshot *s = pe->snap; struct bio *flush = NULL; @@ -662,7 +668,20 @@ static void pending_complete(struct pending_exception *pe, int success) } out: - free_pending_exception(pe); + primary_pe = pe->primary_pe; + + /* + * Free the pe if it's not linked to an origin write or if + * it's not itself a primary pe. + */ + if (!primary_pe || primary_pe != pe) + free_pending_exception(pe); + + /* + * Free the primary pe if nothing references it. + */ + if (primary_pe && !atomic_read(&primary_pe->sibling_count)) + free_pending_exception(primary_pe); if (flush) flush_bios(flush); @@ -757,7 +776,8 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) pe->e.old_chunk = chunk; bio_list_init(&pe->origin_bios); bio_list_init(&pe->snapshot_bios); - INIT_LIST_HEAD(&pe->siblings); + pe->primary_pe = NULL; + atomic_set(&pe->sibling_count, 1); pe->snap = s; pe->started = 0; @@ -916,26 +936,12 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, /*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/ -static void list_merge(struct list_head *l1, struct list_head *l2) -{ - struct list_head *l1_n, *l2_p; - - l1_n = l1->next; - l2_p = l2->prev; - - l1->next = l2; - l2->prev = l1; - - l2_p->next = l1_n; - l1_n->prev = l2_p; -} - static int __origin_write(struct list_head *snapshots, struct bio *bio) { - int r = 1, first = 1; + int r = 1, first = 0; struct dm_snapshot *snap; struct exception *e; - struct pending_exception *pe, *next_pe, *last = NULL; + struct pending_exception *pe, *next_pe, *primary_pe = NULL; chunk_t chunk; LIST_HEAD(pe_queue); @@ -962,6 +968,9 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) * Check exception table to see if block * is already remapped in this snapshot * and trigger an exception if not. + * + * sibling_count is initialised to 1 so pending_complete() + * won't destroy the primary_pe while we're inside this loop. */ e = lookup_exception(&snap->complete, chunk); if (!e) { @@ -971,31 +980,60 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) snap->valid = 0; } else { - if (first) { - bio_list_add(&pe->origin_bios, bio); + if (!primary_pe) { + /* + * Either every pe here has same + * primary_pe or none has one yet. + */ + if (pe->primary_pe) + primary_pe = pe->primary_pe; + else { + primary_pe = pe; + first = 1; + } + + bio_list_add(&primary_pe->origin_bios, + bio); r = 0; - first = 0; } - if (last && list_empty(&pe->siblings)) - list_merge(&pe->siblings, - &last->siblings); + if (!pe->primary_pe) { + atomic_inc(&primary_pe->sibling_count); + pe->primary_pe = primary_pe; + } if (!pe->started) { pe->started = 1; list_add_tail(&pe->list, &pe_queue); } - last = pe; } } up_write(&snap->lock); } + if (!primary_pe) + goto out; + + /* + * If this is the first time we're processing this chunk and + * sibling_count is now 1 it means all the pending exceptions + * got completed while we were in the loop above, so it falls to + * us here to remove the primary_pe and submit any origin_bios. + */ + + if (first && atomic_dec_and_test(&primary_pe->sibling_count)) { + flush_bios(bio_list_get(&primary_pe->origin_bios)); + free_pending_exception(primary_pe); + /* If we got here, pe_queue is necessarily empty. */ + goto out; + } + /* * Now that we have a complete pe list we can start the copying. */ list_for_each_entry_safe(pe, next_pe, &pe_queue, list) start_copy(pe); + out: return r; } -- cgit v1.2.2 From 76df1c651b66bdf07d60b3d60789feb5f58d73e3 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Mon, 27 Mar 2006 01:17:45 -0800 Subject: [PATCH] device-mapper snapshot: fix invalidation When a snapshot becomes invalid, s->valid is set to 0. In this state, a snapshot can no longer be accessed. When s->lock is acquired, before doing anything else, s->valid must be checked to ensure the snapshot remains valid. This patch eliminates some races (that may cause panics) by adding some missing checks. At the same time, some unnecessary levels of indentation are removed and snapshot invalidation is moved into a single function that always generates a device-mapper event. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-snap.c | 295 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 174 insertions(+), 121 deletions(-) (limited to 'drivers/md/dm-snap.c') diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 475514bda9d0..14bd1a1815b1 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -392,6 +392,8 @@ static void read_snapshot_metadata(struct dm_snapshot *s) down_write(&s->lock); s->valid = 0; up_write(&s->lock); + + dm_table_event(s->table); } } @@ -601,6 +603,11 @@ static void error_bios(struct bio *bio) } } +static inline void error_snapshot_bios(struct pending_exception *pe) +{ + error_bios(bio_list_get(&pe->snapshot_bios)); +} + static struct bio *__flush_bios(struct pending_exception *pe) { /* @@ -616,6 +623,28 @@ static struct bio *__flush_bios(struct pending_exception *pe) return NULL; } +static void __invalidate_snapshot(struct dm_snapshot *s, + struct pending_exception *pe, int err) +{ + if (!s->valid) + return; + + if (err == -EIO) + DMERR("Invalidating snapshot: Error reading/writing."); + else if (err == -ENOMEM) + DMERR("Invalidating snapshot: Unable to allocate exception."); + + if (pe) + remove_exception(&pe->e); + + if (s->store.drop_snapshot) + s->store.drop_snapshot(&s->store); + + s->valid = 0; + + dm_table_event(s->table); +} + static void pending_complete(struct pending_exception *pe, int success) { struct exception *e; @@ -623,50 +652,53 @@ static void pending_complete(struct pending_exception *pe, int success) struct dm_snapshot *s = pe->snap; struct bio *flush = NULL; - if (success) { - e = alloc_exception(); - if (!e) { - DMWARN("Unable to allocate exception."); - down_write(&s->lock); - s->store.drop_snapshot(&s->store); - s->valid = 0; - flush = __flush_bios(pe); - up_write(&s->lock); - - error_bios(bio_list_get(&pe->snapshot_bios)); - goto out; - } - *e = pe->e; - - /* - * Add a proper exception, and remove the - * in-flight exception from the list. - */ + if (!success) { + /* Read/write error - snapshot is unusable */ down_write(&s->lock); - insert_exception(&s->complete, e); - remove_exception(&pe->e); + __invalidate_snapshot(s, pe, -EIO); flush = __flush_bios(pe); - - /* Submit any pending write bios */ up_write(&s->lock); - flush_bios(bio_list_get(&pe->snapshot_bios)); - } else { - /* Read/write error - snapshot is unusable */ + error_snapshot_bios(pe); + goto out; + } + + e = alloc_exception(); + if (!e) { down_write(&s->lock); - if (s->valid) - DMERR("Error reading/writing snapshot"); - s->store.drop_snapshot(&s->store); - s->valid = 0; - remove_exception(&pe->e); + __invalidate_snapshot(s, pe, -ENOMEM); flush = __flush_bios(pe); up_write(&s->lock); - error_bios(bio_list_get(&pe->snapshot_bios)); + error_snapshot_bios(pe); + goto out; + } + *e = pe->e; - dm_table_event(s->table); + /* + * Add a proper exception, and remove the + * in-flight exception from the list. + */ + down_write(&s->lock); + if (!s->valid) { + flush = __flush_bios(pe); + up_write(&s->lock); + + free_exception(e); + + error_snapshot_bios(pe); + goto out; } + insert_exception(&s->complete, e); + remove_exception(&pe->e); + flush = __flush_bios(pe); + + up_write(&s->lock); + + /* Submit any pending write bios */ + flush_bios(bio_list_get(&pe->snapshot_bios)); + out: primary_pe = pe->primary_pe; @@ -758,39 +790,45 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) if (e) { /* cast the exception to a pending exception */ pe = container_of(e, struct pending_exception, e); + goto out; + } - } else { - /* - * Create a new pending exception, we don't want - * to hold the lock while we do this. - */ - up_write(&s->lock); - pe = alloc_pending_exception(); - down_write(&s->lock); + /* + * Create a new pending exception, we don't want + * to hold the lock while we do this. + */ + up_write(&s->lock); + pe = alloc_pending_exception(); + down_write(&s->lock); - e = lookup_exception(&s->pending, chunk); - if (e) { - free_pending_exception(pe); - pe = container_of(e, struct pending_exception, e); - } else { - pe->e.old_chunk = chunk; - bio_list_init(&pe->origin_bios); - bio_list_init(&pe->snapshot_bios); - pe->primary_pe = NULL; - atomic_set(&pe->sibling_count, 1); - pe->snap = s; - pe->started = 0; - - if (s->store.prepare_exception(&s->store, &pe->e)) { - free_pending_exception(pe); - s->valid = 0; - return NULL; - } + if (!s->valid) { + free_pending_exception(pe); + return NULL; + } - insert_exception(&s->pending, &pe->e); - } + e = lookup_exception(&s->pending, chunk); + if (e) { + free_pending_exception(pe); + pe = container_of(e, struct pending_exception, e); + goto out; } + pe->e.old_chunk = chunk; + bio_list_init(&pe->origin_bios); + bio_list_init(&pe->snapshot_bios); + pe->primary_pe = NULL; + atomic_set(&pe->sibling_count, 1); + pe->snap = s; + pe->started = 0; + + if (s->store.prepare_exception(&s->store, &pe->e)) { + free_pending_exception(pe); + return NULL; + } + + insert_exception(&s->pending, &pe->e); + + out: return pe; } @@ -807,13 +845,15 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, { struct exception *e; struct dm_snapshot *s = (struct dm_snapshot *) ti->private; + int copy_needed = 0; int r = 1; chunk_t chunk; - struct pending_exception *pe; + struct pending_exception *pe = NULL; chunk = sector_to_chunk(s, bio->bi_sector); /* Full snapshots are not usable */ + /* To get here the table must be live so s->active is always set. */ if (!s->valid) return -EIO; @@ -831,36 +871,41 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, * to copy an exception */ down_write(&s->lock); + if (!s->valid) { + r = -EIO; + goto out_unlock; + } + /* If the block is already remapped - use that, else remap it */ e = lookup_exception(&s->complete, chunk); if (e) { remap_exception(s, e, bio); - up_write(&s->lock); - - } else { - pe = __find_pending_exception(s, bio); - - if (!pe) { - if (s->store.drop_snapshot) - s->store.drop_snapshot(&s->store); - s->valid = 0; - r = -EIO; - up_write(&s->lock); - } else { - remap_exception(s, &pe->e, bio); - bio_list_add(&pe->snapshot_bios, bio); - - if (!pe->started) { - /* this is protected by snap->lock */ - pe->started = 1; - up_write(&s->lock); - start_copy(pe); - } else - up_write(&s->lock); - r = 0; - } + goto out_unlock; + } + + pe = __find_pending_exception(s, bio); + if (!pe) { + __invalidate_snapshot(s, pe, -ENOMEM); + r = -EIO; + goto out_unlock; } + remap_exception(s, &pe->e, bio); + bio_list_add(&pe->snapshot_bios, bio); + + if (!pe->started) { + /* this is protected by snap->lock */ + pe->started = 1; + copy_needed = 1; + } + + r = 0; + + out_unlock: + up_write(&s->lock); + + if (copy_needed) + start_copy(pe); } else { /* * FIXME: this read path scares me because we @@ -872,6 +917,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, /* Do reads */ down_read(&s->lock); + if (!s->valid) { + up_read(&s->lock); + return -EIO; + } + /* See if it it has been remapped */ e = lookup_exception(&s->complete, chunk); if (e) @@ -948,15 +998,15 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) /* Do all the snapshots on this origin */ list_for_each_entry (snap, snapshots, list) { + down_write(&snap->lock); + /* Only deal with valid and active snapshots */ if (!snap->valid || !snap->active) - continue; + goto next_snapshot; /* Nothing to do if writing beyond end of snapshot */ if (bio->bi_sector >= dm_table_get_size(snap->table)) - continue; - - down_write(&snap->lock); + goto next_snapshot; /* * Remember, different snapshots can have @@ -973,40 +1023,43 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) * won't destroy the primary_pe while we're inside this loop. */ e = lookup_exception(&snap->complete, chunk); - if (!e) { - pe = __find_pending_exception(snap, bio); - if (!pe) { - snap->store.drop_snapshot(&snap->store); - snap->valid = 0; - - } else { - if (!primary_pe) { - /* - * Either every pe here has same - * primary_pe or none has one yet. - */ - if (pe->primary_pe) - primary_pe = pe->primary_pe; - else { - primary_pe = pe; - first = 1; - } - - bio_list_add(&primary_pe->origin_bios, - bio); - r = 0; - } - if (!pe->primary_pe) { - atomic_inc(&primary_pe->sibling_count); - pe->primary_pe = primary_pe; - } - if (!pe->started) { - pe->started = 1; - list_add_tail(&pe->list, &pe_queue); - } + if (e) + goto next_snapshot; + + pe = __find_pending_exception(snap, bio); + if (!pe) { + __invalidate_snapshot(snap, pe, ENOMEM); + goto next_snapshot; + } + + if (!primary_pe) { + /* + * Either every pe here has same + * primary_pe or none has one yet. + */ + if (pe->primary_pe) + primary_pe = pe->primary_pe; + else { + primary_pe = pe; + first = 1; } + + bio_list_add(&primary_pe->origin_bios, bio); + + r = 0; + } + + if (!pe->primary_pe) { + atomic_inc(&primary_pe->sibling_count); + pe->primary_pe = primary_pe; + } + + if (!pe->started) { + pe->started = 1; + list_add_tail(&pe->list, &pe_queue); } + next_snapshot: up_write(&snap->lock); } -- cgit v1.2.2 From 4ee218cd67b385759993a6c840ea45f0ee0a8b30 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 27 Mar 2006 01:17:48 -0800 Subject: [PATCH] dm: remove SECTOR_FORMAT We don't know what type sector_t has. Sometimes it's unsigned long, sometimes it's unsigned long long. For example on ppc64 it's unsigned long with CONFIG_LBD=n and on x86_64 it's unsigned long long with CONFIG_LBD=n. The way to handle all of this is to always use unsigned long long and to always typecast the sector_t when printing it. Acked-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-snap.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/md/dm-snap.c') diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 14bd1a1815b1..a5765f9fbe02 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -959,9 +959,9 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, snap->store.fraction_full(&snap->store, &numerator, &denominator); - snprintf(result, maxlen, - SECTOR_FORMAT "/" SECTOR_FORMAT, - numerator, denominator); + snprintf(result, maxlen, "%llu/%llu", + (unsigned long long)numerator, + (unsigned long long)denominator); } else snprintf(result, maxlen, "Unknown"); @@ -974,9 +974,10 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, * to make private copies if the output is to * make sense. */ - snprintf(result, maxlen, "%s %s %c " SECTOR_FORMAT, + snprintf(result, maxlen, "%s %s %c %llu", snap->origin->name, snap->cow->name, - snap->type, snap->chunk_size); + snap->type, + (unsigned long long)snap->chunk_size); break; } -- cgit v1.2.2 From 138728dc96529f20dfe970c470e51885a60e329f Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Mon, 27 Mar 2006 01:17:50 -0800 Subject: [PATCH] dm snapshot: fix kcopyd destructor Before removing a snapshot, wait for the completion of any kcopyd jobs using it. Do this by maintaining a count (nr_jobs) of how many outstanding jobs each kcopyd_client has. The snapshot destructor first unregisters the snapshot so that no new kcopyd jobs (created by writes to the origin) will reference that particular snapshot. kcopyd_client_destroy() is now run next to wait for the completion of any outstanding jobs before the snapshot exception structures (that those jobs reference) are freed. Signed-off-by: Alasdair G Kergon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/dm-snap.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/md/dm-snap.c') diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index a5765f9fbe02..08312b46463a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -559,8 +559,12 @@ static void snapshot_dtr(struct dm_target *ti) { struct dm_snapshot *s = (struct dm_snapshot *) ti->private; + /* Prevent further origin writes from using this snapshot. */ + /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); + kcopyd_client_destroy(s->kcopyd_client); + exit_exception_table(&s->pending, pending_cache); exit_exception_table(&s->complete, exception_cache); @@ -569,7 +573,7 @@ static void snapshot_dtr(struct dm_target *ti) dm_put_device(ti, s->origin); dm_put_device(ti, s->cow); - kcopyd_client_destroy(s->kcopyd_client); + kfree(s); } -- cgit v1.2.2