diff options
author | Mike Snitzer <snitzer@redhat.com> | 2009-12-10 18:52:34 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-12-10 18:52:34 -0500 |
commit | 8a2d528620e228ddfd0df9cec0a16e034ff8db1d (patch) | |
tree | ff93eac3e996df7bf046d08c7386d690c3fd5ee0 /drivers/md | |
parent | 73dfd078cf8bfee4018fb22f1e2a24f2e05b69dc (diff) |
dm snapshot: merge consecutive chunks together
s->store->type->prepare_merge returns the number of chunks that can be
copied linearly working backwards from the returned chunk number.
For example, if it returns 3 chunks with old_chunk == 10 and new_chunk
== 20, then chunk 20 can be copied to 10, chunk 19 to 9 and 18 to 8.
Until now kcopyd only copied one chunk at a time. This patch now copies
the full set at once.
Consequently, snapshot_merge_process() needs to delay the merging of all
chunks if any have writes in progress, not just the first chunk in the
region that is to be merged.
snapshot-merge's performance is now comparable to the original
snapshot-origin target.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-snap.c | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1498704467a7..bb4b733697b3 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -879,9 +879,10 @@ static void increment_pending_exceptions_done_count(void) | |||
879 | 879 | ||
880 | static void snapshot_merge_next_chunks(struct dm_snapshot *s) | 880 | static void snapshot_merge_next_chunks(struct dm_snapshot *s) |
881 | { | 881 | { |
882 | int r; | 882 | int i, linear_chunks; |
883 | chunk_t old_chunk, new_chunk; | 883 | chunk_t old_chunk, new_chunk; |
884 | struct dm_io_region src, dest; | 884 | struct dm_io_region src, dest; |
885 | sector_t io_size; | ||
885 | uint64_t previous_count; | 886 | uint64_t previous_count; |
886 | 887 | ||
887 | BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); | 888 | BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); |
@@ -896,20 +897,28 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) | |||
896 | goto shut; | 897 | goto shut; |
897 | } | 898 | } |
898 | 899 | ||
899 | r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk); | 900 | linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, |
900 | if (r <= 0) { | 901 | &new_chunk); |
901 | if (r < 0) | 902 | if (linear_chunks <= 0) { |
903 | if (linear_chunks < 0) | ||
902 | DMERR("Read error in exception store: " | 904 | DMERR("Read error in exception store: " |
903 | "shutting down merge"); | 905 | "shutting down merge"); |
904 | goto shut; | 906 | goto shut; |
905 | } | 907 | } |
906 | 908 | ||
907 | /* TODO: use larger I/O size once we verify that kcopyd handles it */ | 909 | /* Adjust old_chunk and new_chunk to reflect start of linear region */ |
910 | old_chunk = old_chunk + 1 - linear_chunks; | ||
911 | new_chunk = new_chunk + 1 - linear_chunks; | ||
912 | |||
913 | /* | ||
914 | * Use one (potentially large) I/O to copy all 'linear_chunks' | ||
915 | * from the exception store to the origin | ||
916 | */ | ||
917 | io_size = linear_chunks * s->store->chunk_size; | ||
908 | 918 | ||
909 | dest.bdev = s->origin->bdev; | 919 | dest.bdev = s->origin->bdev; |
910 | dest.sector = chunk_to_sector(s->store, old_chunk); | 920 | dest.sector = chunk_to_sector(s->store, old_chunk); |
911 | dest.count = min((sector_t)s->store->chunk_size, | 921 | dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); |
912 | get_dev_size(dest.bdev) - dest.sector); | ||
913 | 922 | ||
914 | src.bdev = s->cow->bdev; | 923 | src.bdev = s->cow->bdev; |
915 | src.sector = chunk_to_sector(s->store, new_chunk); | 924 | src.sector = chunk_to_sector(s->store, new_chunk); |
@@ -925,7 +934,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) | |||
925 | * significant impact on performance. | 934 | * significant impact on performance. |
926 | */ | 935 | */ |
927 | previous_count = read_pending_exceptions_done_count(); | 936 | previous_count = read_pending_exceptions_done_count(); |
928 | while (origin_write_extent(s, dest.sector, s->store->chunk_size)) { | 937 | while (origin_write_extent(s, dest.sector, io_size)) { |
929 | wait_event(_pending_exceptions_done, | 938 | wait_event(_pending_exceptions_done, |
930 | (read_pending_exceptions_done_count() != | 939 | (read_pending_exceptions_done_count() != |
931 | previous_count)); | 940 | previous_count)); |
@@ -935,10 +944,12 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) | |||
935 | 944 | ||
936 | down_write(&s->lock); | 945 | down_write(&s->lock); |
937 | s->first_merging_chunk = old_chunk; | 946 | s->first_merging_chunk = old_chunk; |
938 | s->num_merging_chunks = 1; | 947 | s->num_merging_chunks = linear_chunks; |
939 | up_write(&s->lock); | 948 | up_write(&s->lock); |
940 | 949 | ||
941 | __check_for_conflicting_io(s, old_chunk); | 950 | /* Wait until writes to all 'linear_chunks' drain */ |
951 | for (i = 0; i < linear_chunks; i++) | ||
952 | __check_for_conflicting_io(s, old_chunk + i); | ||
942 | 953 | ||
943 | dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); | 954 | dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); |
944 | return; | 955 | return; |