aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2009-12-10 18:52:32 -0500
committerAlasdair G Kergon <agk@redhat.com>2009-12-10 18:52:32 -0500
commit1e03f97e4301f75a2f3b649787f7876516764929 (patch)
tree37f4baa83145c011245a83f1e18acf9348aac3c3 /drivers/md
parent9d3b15c4c776b041f9ee81810cbd375275411829 (diff)
dm snapshot: add merging
Merging is started when origin is resumed and it is stopped when origin is suspended or when the merging snapshot is destroyed or errors are detected. Merging is not yet interlocked with writes: this will be handled in subsequent patches. The code relies on callbacks from a private kcopyd thread. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-exception-store.h11
-rw-r--r--drivers/md/dm-snap.c239
2 files changed, 244 insertions, 6 deletions
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index c53e08935b42..e8dfa06af3ba 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -154,6 +154,13 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
154 BUG_ON(!dm_consecutive_chunk_count(e)); 154 BUG_ON(!dm_consecutive_chunk_count(e));
155} 155}
156 156
157static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
158{
159 BUG_ON(!dm_consecutive_chunk_count(e));
160
161 e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
162}
163
157# else 164# else
158# define DM_CHUNK_CONSECUTIVE_BITS 0 165# define DM_CHUNK_CONSECUTIVE_BITS 0
159 166
@@ -171,6 +178,10 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
171{ 178{
172} 179}
173 180
181static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
182{
183}
184
174# endif 185# endif
175 186
176/* 187/*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 7ddee7c0c518..dc2412e6c5cf 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -106,8 +106,20 @@ struct dm_snapshot {
106 mempool_t *tracked_chunk_pool; 106 mempool_t *tracked_chunk_pool;
107 spinlock_t tracked_chunk_lock; 107 spinlock_t tracked_chunk_lock;
108 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 108 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
109
110 /* Wait for events based on state_bits */
111 unsigned long state_bits;
109}; 112};
110 113
114/*
115 * state_bits:
116 * RUNNING_MERGE - Merge operation is in progress.
117 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
118 * cleared afterwards.
119 */
120#define RUNNING_MERGE 0
121#define SHUTDOWN_MERGE 1
122
111struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 123struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
112{ 124{
113 return s->cow; 125 return s->cow;
@@ -386,6 +398,13 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
386 return -EINVAL; 398 return -EINVAL;
387 } 399 }
388 400
401 if (!snap_src->store->type->prepare_merge ||
402 !snap_src->store->type->commit_merge) {
403 snap->ti->error = "Snapshot exception store does not "
404 "support snapshot-merge.";
405 return -EINVAL;
406 }
407
389 return 1; 408 return 1;
390} 409}
391 410
@@ -721,6 +740,178 @@ static int init_hash_tables(struct dm_snapshot *s)
721 return 0; 740 return 0;
722} 741}
723 742
743static void merge_shutdown(struct dm_snapshot *s)
744{
745 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
746 smp_mb__after_clear_bit();
747 wake_up_bit(&s->state_bits, RUNNING_MERGE);
748}
749
750/*
751 * Remove one chunk from the index of completed exceptions.
752 */
753static int __remove_single_exception_chunk(struct dm_snapshot *s,
754 chunk_t old_chunk)
755{
756 struct dm_exception *e;
757
758 /* FIXME: interlock writes to this chunk */
759
760 e = dm_lookup_exception(&s->complete, old_chunk);
761 if (!e) {
762 DMERR("Corruption detected: exception for block %llu is "
763 "on disk but not in memory",
764 (unsigned long long)old_chunk);
765 return -EINVAL;
766 }
767
768 /*
769 * If this is the only chunk using this exception, remove exception.
770 */
771 if (!dm_consecutive_chunk_count(e)) {
772 dm_remove_exception(e);
773 free_completed_exception(e);
774 return 0;
775 }
776
777 /*
778 * The chunk may be either at the beginning or the end of a
779 * group of consecutive chunks - never in the middle. We are
780 * removing chunks in the opposite order to that in which they
781 * were added, so this should always be true.
782 * Decrement the consecutive chunk counter and adjust the
783 * starting point if necessary.
784 */
785 if (old_chunk == e->old_chunk) {
786 e->old_chunk++;
787 e->new_chunk++;
788 } else if (old_chunk != e->old_chunk +
789 dm_consecutive_chunk_count(e)) {
790 DMERR("Attempt to merge block %llu from the "
791 "middle of a chunk range [%llu - %llu]",
792 (unsigned long long)old_chunk,
793 (unsigned long long)e->old_chunk,
794 (unsigned long long)
795 e->old_chunk + dm_consecutive_chunk_count(e));
796 return -EINVAL;
797 }
798
799 dm_consecutive_chunk_count_dec(e);
800
801 return 0;
802}
803
804static int remove_single_exception_chunk(struct dm_snapshot *s,
805 chunk_t old_chunk)
806{
807 int r = 0;
808
809 down_write(&s->lock);
810 r = __remove_single_exception_chunk(s, old_chunk);
811 up_write(&s->lock);
812
813 return r;
814}
815
816static void merge_callback(int read_err, unsigned long write_err,
817 void *context);
818
819static void snapshot_merge_next_chunks(struct dm_snapshot *s)
820{
821 int r;
822 chunk_t old_chunk, new_chunk;
823 struct dm_io_region src, dest;
824
825 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
826 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
827 goto shut;
828
829 /*
830 * valid flag never changes during merge, so no lock required.
831 */
832 if (!s->valid) {
833 DMERR("Snapshot is invalid: can't merge");
834 goto shut;
835 }
836
837 r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk);
838 if (r <= 0) {
839 if (r < 0)
840 DMERR("Read error in exception store: "
841 "shutting down merge");
842 goto shut;
843 }
844
845 /* TODO: use larger I/O size once we verify that kcopyd handles it */
846
847 if (remove_single_exception_chunk(s, old_chunk) < 0)
848 goto shut;
849
850 dest.bdev = s->origin->bdev;
851 dest.sector = chunk_to_sector(s->store, old_chunk);
852 dest.count = min((sector_t)s->store->chunk_size,
853 get_dev_size(dest.bdev) - dest.sector);
854
855 src.bdev = s->cow->bdev;
856 src.sector = chunk_to_sector(s->store, new_chunk);
857 src.count = dest.count;
858
859 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
860 return;
861
862shut:
863 merge_shutdown(s);
864}
865
866static void merge_callback(int read_err, unsigned long write_err, void *context)
867{
868 struct dm_snapshot *s = context;
869
870 if (read_err || write_err) {
871 if (read_err)
872 DMERR("Read error: shutting down merge.");
873 else
874 DMERR("Write error: shutting down merge.");
875 goto shut;
876 }
877
878 if (s->store->type->commit_merge(s->store, 1) < 0) {
879 DMERR("Write error in exception store: shutting down merge");
880 goto shut;
881 }
882
883 snapshot_merge_next_chunks(s);
884
885 return;
886
887shut:
888 merge_shutdown(s);
889}
890
891static void start_merge(struct dm_snapshot *s)
892{
893 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
894 snapshot_merge_next_chunks(s);
895}
896
897static int wait_schedule(void *ptr)
898{
899 schedule();
900
901 return 0;
902}
903
904/*
905 * Stop the merging process and wait until it finishes.
906 */
907static void stop_merge(struct dm_snapshot *s)
908{
909 set_bit(SHUTDOWN_MERGE, &s->state_bits);
910 wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
911 TASK_UNINTERRUPTIBLE);
912 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
913}
914
724/* 915/*
725 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 916 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
726 */ 917 */
@@ -791,6 +982,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
791 init_rwsem(&s->lock); 982 init_rwsem(&s->lock);
792 INIT_LIST_HEAD(&s->list); 983 INIT_LIST_HEAD(&s->list);
793 spin_lock_init(&s->pe_lock); 984 spin_lock_init(&s->pe_lock);
985 s->state_bits = 0;
794 986
795 /* Allocate hash table for COW data */ 987 /* Allocate hash table for COW data */
796 if (init_hash_tables(s)) { 988 if (init_hash_tables(s)) {
@@ -963,6 +1155,9 @@ static void snapshot_dtr(struct dm_target *ti)
963 } 1155 }
964 up_read(&_origins_lock); 1156 up_read(&_origins_lock);
965 1157
1158 if (dm_target_is_snapshot_merge(ti))
1159 stop_merge(s);
1160
966 /* Prevent further origin writes from using this snapshot. */ 1161 /* Prevent further origin writes from using this snapshot. */
967 /* After this returns there can be no new kcopyd jobs. */ 1162 /* After this returns there can be no new kcopyd jobs. */
968 unregister_snapshot(s); 1163 unregister_snapshot(s);
@@ -1404,6 +1599,13 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1404 return 0; 1599 return 0;
1405} 1600}
1406 1601
1602static void snapshot_merge_presuspend(struct dm_target *ti)
1603{
1604 struct dm_snapshot *s = ti->private;
1605
1606 stop_merge(s);
1607}
1608
1407static void snapshot_postsuspend(struct dm_target *ti) 1609static void snapshot_postsuspend(struct dm_target *ti)
1408{ 1610{
1409 struct dm_snapshot *s = ti->private; 1611 struct dm_snapshot *s = ti->private;
@@ -1464,6 +1666,34 @@ static void snapshot_resume(struct dm_target *ti)
1464 up_write(&s->lock); 1666 up_write(&s->lock);
1465} 1667}
1466 1668
1669static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1670{
1671 sector_t min_chunksize;
1672
1673 down_read(&_origins_lock);
1674 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1675 up_read(&_origins_lock);
1676
1677 return min_chunksize;
1678}
1679
1680static void snapshot_merge_resume(struct dm_target *ti)
1681{
1682 struct dm_snapshot *s = ti->private;
1683
1684 /*
1685 * Handover exceptions from existing snapshot.
1686 */
1687 snapshot_resume(ti);
1688
1689 /*
1690 * snapshot-merge acts as an origin, so set ti->split_io
1691 */
1692 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1693
1694 start_merge(s);
1695}
1696
1467static int snapshot_status(struct dm_target *ti, status_type_t type, 1697static int snapshot_status(struct dm_target *ti, status_type_t type,
1468 char *result, unsigned int maxlen) 1698 char *result, unsigned int maxlen)
1469{ 1699{
@@ -1722,11 +1952,7 @@ static void origin_resume(struct dm_target *ti)
1722{ 1952{
1723 struct dm_dev *dev = ti->private; 1953 struct dm_dev *dev = ti->private;
1724 1954
1725 down_read(&_origins_lock); 1955 ti->split_io = get_origin_minimum_chunksize(dev->bdev);
1726
1727 ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev));
1728
1729 up_read(&_origins_lock);
1730} 1956}
1731 1957
1732static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1958static int origin_status(struct dm_target *ti, status_type_t type, char *result,
@@ -1790,9 +2016,10 @@ static struct target_type merge_target = {
1790 .dtr = snapshot_dtr, 2016 .dtr = snapshot_dtr,
1791 .map = snapshot_merge_map, 2017 .map = snapshot_merge_map,
1792 .end_io = snapshot_end_io, 2018 .end_io = snapshot_end_io,
2019 .presuspend = snapshot_merge_presuspend,
1793 .postsuspend = snapshot_postsuspend, 2020 .postsuspend = snapshot_postsuspend,
1794 .preresume = snapshot_preresume, 2021 .preresume = snapshot_preresume,
1795 .resume = snapshot_resume, 2022 .resume = snapshot_merge_resume,
1796 .status = snapshot_status, 2023 .status = snapshot_status,
1797 .iterate_devices = snapshot_iterate_devices, 2024 .iterate_devices = snapshot_iterate_devices,
1798}; 2025};