aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-snap.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index bc52776c69cc..1498704467a7 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -270,6 +270,10 @@ struct origin {
270static struct list_head *_origins; 270static struct list_head *_origins;
271static struct rw_semaphore _origins_lock; 271static struct rw_semaphore _origins_lock;
272 272
273static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
274static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
275static uint64_t _pending_exceptions_done_count;
276
273static int init_origin_hash(void) 277static int init_origin_hash(void)
274{ 278{
275 int i; 279 int i;
@@ -847,14 +851,38 @@ out:
847 return r; 851 return r;
848} 852}
849 853
854static int origin_write_extent(struct dm_snapshot *merging_snap,
855 sector_t sector, unsigned chunk_size);
856
850static void merge_callback(int read_err, unsigned long write_err, 857static void merge_callback(int read_err, unsigned long write_err,
851 void *context); 858 void *context);
852 859
860static uint64_t read_pending_exceptions_done_count(void)
861{
862 uint64_t pending_exceptions_done;
863
864 spin_lock(&_pending_exceptions_done_spinlock);
865 pending_exceptions_done = _pending_exceptions_done_count;
866 spin_unlock(&_pending_exceptions_done_spinlock);
867
868 return pending_exceptions_done;
869}
870
871static void increment_pending_exceptions_done_count(void)
872{
873 spin_lock(&_pending_exceptions_done_spinlock);
874 _pending_exceptions_done_count++;
875 spin_unlock(&_pending_exceptions_done_spinlock);
876
877 wake_up_all(&_pending_exceptions_done);
878}
879
853static void snapshot_merge_next_chunks(struct dm_snapshot *s) 880static void snapshot_merge_next_chunks(struct dm_snapshot *s)
854{ 881{
855 int r; 882 int r;
856 chunk_t old_chunk, new_chunk; 883 chunk_t old_chunk, new_chunk;
857 struct dm_io_region src, dest; 884 struct dm_io_region src, dest;
885 uint64_t previous_count;
858 886
859 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); 887 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
860 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) 888 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
@@ -887,6 +915,24 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
887 src.sector = chunk_to_sector(s->store, new_chunk); 915 src.sector = chunk_to_sector(s->store, new_chunk);
888 src.count = dest.count; 916 src.count = dest.count;
889 917
918 /*
919 * Reallocate any exceptions needed in other snapshots then
920 * wait for the pending exceptions to complete.
921 * Each time any pending exception (globally on the system)
922 * completes we are woken and repeat the process to find out
923 * if we can proceed. While this may not seem a particularly
924 * efficient algorithm, it is not expected to have any
925 * significant impact on performance.
926 */
927 previous_count = read_pending_exceptions_done_count();
928 while (origin_write_extent(s, dest.sector, s->store->chunk_size)) {
929 wait_event(_pending_exceptions_done,
930 (read_pending_exceptions_done_count() !=
931 previous_count));
932 /* Retry after the wait, until all exceptions are done. */
933 previous_count = read_pending_exceptions_done_count();
934 }
935
890 down_write(&s->lock); 936 down_write(&s->lock);
891 s->first_merging_chunk = old_chunk; 937 s->first_merging_chunk = old_chunk;
892 s->num_merging_chunks = 1; 938 s->num_merging_chunks = 1;
@@ -1372,6 +1418,8 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1372 origin_bios = bio_list_get(&pe->origin_bios); 1418 origin_bios = bio_list_get(&pe->origin_bios);
1373 free_pending_exception(pe); 1419 free_pending_exception(pe);
1374 1420
1421 increment_pending_exceptions_done_count();
1422
1375 up_write(&s->lock); 1423 up_write(&s->lock);
1376 1424
1377 /* Submit any pending write bios */ 1425 /* Submit any pending write bios */
@@ -1963,6 +2011,41 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
1963} 2011}
1964 2012
1965/* 2013/*
2014 * Trigger exceptions in all non-merging snapshots.
2015 *
2016 * The chunk size of the merging snapshot may be larger than the chunk
2017 * size of some other snapshot so we may need to reallocate multiple
2018 * chunks in other snapshots.
2019 *
2020 * We scan all the overlapping exceptions in the other snapshots.
2021 * Returns 1 if anything was reallocated and must be waited for,
2022 * otherwise returns 0.
2023 *
2024 * size must be a multiple of merging_snap's chunk_size.
2025 */
2026static int origin_write_extent(struct dm_snapshot *merging_snap,
2027 sector_t sector, unsigned size)
2028{
2029 int must_wait = 0;
2030 sector_t n;
2031 struct origin *o;
2032
2033 /*
2034 * The origin's __minimum_chunk_size() got stored in split_io
2035 * by snapshot_merge_resume().
2036 */
2037 down_read(&_origins_lock);
2038 o = __lookup_origin(merging_snap->origin->bdev);
2039 for (n = 0; n < size; n += merging_snap->ti->split_io)
2040 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2041 DM_MAPIO_SUBMITTED)
2042 must_wait = 1;
2043 up_read(&_origins_lock);
2044
2045 return must_wait;
2046}
2047
2048/*
1966 * Origin: maps a linear range of a device, with hooks for snapshotting. 2049 * Origin: maps a linear range of a device, with hooks for snapshotting.
1967 */ 2050 */
1968 2051