diff options
author | Milan Broz <mbroz@redhat.com> | 2008-02-07 21:11:27 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2008-02-07 21:11:27 -0500 |
commit | d74f81f8adc504a23be3babf347b9f69e9389924 (patch) | |
tree | 4a687e400479ad330bb36ded54012cd8b8b84ecf /drivers/md/dm-snap.c | |
parent | 4f7f5c675fd6bacaae3c67be44de872dcff0e3b7 (diff) |
dm snapshot: combine consecutive exceptions in memory
Provided sector_t is 64 bits, reduce the in-memory footprint of the
snapshot exception table by the simple method of using unused bits of
the chunk number to combine consecutive entries.
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-snap.c')
-rw-r--r-- | drivers/md/dm-snap.c | 82 |
1 files changed, 68 insertions, 14 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index fad84654b045..ae24eab8cd81 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -213,11 +213,15 @@ static void unregister_snapshot(struct dm_snapshot *s) | |||
213 | 213 | ||
214 | /* | 214 | /* |
215 | * Implementation of the exception hash tables. | 215 | * Implementation of the exception hash tables. |
216 | * The lowest hash_shift bits of the chunk number are ignored, allowing | ||
217 | * some consecutive chunks to be grouped together. | ||
216 | */ | 218 | */ |
217 | static int init_exception_table(struct exception_table *et, uint32_t size) | 219 | static int init_exception_table(struct exception_table *et, uint32_t size, |
220 | unsigned hash_shift) | ||
218 | { | 221 | { |
219 | unsigned int i; | 222 | unsigned int i; |
220 | 223 | ||
224 | et->hash_shift = hash_shift; | ||
221 | et->hash_mask = size - 1; | 225 | et->hash_mask = size - 1; |
222 | et->table = dm_vcalloc(size, sizeof(struct list_head)); | 226 | et->table = dm_vcalloc(size, sizeof(struct list_head)); |
223 | if (!et->table) | 227 | if (!et->table) |
@@ -248,7 +252,7 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache * | |||
248 | 252 | ||
249 | static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) | 253 | static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) |
250 | { | 254 | { |
251 | return chunk & et->hash_mask; | 255 | return (chunk >> et->hash_shift) & et->hash_mask; |
252 | } | 256 | } |
253 | 257 | ||
254 | static void insert_exception(struct exception_table *eh, | 258 | static void insert_exception(struct exception_table *eh, |
@@ -275,7 +279,8 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et, | |||
275 | 279 | ||
276 | slot = &et->table[exception_hash(et, chunk)]; | 280 | slot = &et->table[exception_hash(et, chunk)]; |
277 | list_for_each_entry (e, slot, hash_list) | 281 | list_for_each_entry (e, slot, hash_list) |
278 | if (e->old_chunk == chunk) | 282 | if (chunk >= e->old_chunk && |
283 | chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) | ||
279 | return e; | 284 | return e; |
280 | 285 | ||
281 | return NULL; | 286 | return NULL; |
@@ -307,6 +312,49 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) | |||
307 | mempool_free(pe, pending_pool); | 312 | mempool_free(pe, pending_pool); |
308 | } | 313 | } |
309 | 314 | ||
315 | static void insert_completed_exception(struct dm_snapshot *s, | ||
316 | struct dm_snap_exception *new_e) | ||
317 | { | ||
318 | struct exception_table *eh = &s->complete; | ||
319 | struct list_head *l; | ||
320 | struct dm_snap_exception *e = NULL; | ||
321 | |||
322 | l = &eh->table[exception_hash(eh, new_e->old_chunk)]; | ||
323 | |||
324 | /* Add immediately if this table doesn't support consecutive chunks */ | ||
325 | if (!eh->hash_shift) | ||
326 | goto out; | ||
327 | |||
328 | /* List is ordered by old_chunk */ | ||
329 | list_for_each_entry_reverse(e, l, hash_list) { | ||
330 | /* Insert after an existing chunk? */ | ||
331 | if (new_e->old_chunk == (e->old_chunk + | ||
332 | dm_consecutive_chunk_count(e) + 1) && | ||
333 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + | ||
334 | dm_consecutive_chunk_count(e) + 1)) { | ||
335 | dm_consecutive_chunk_count_inc(e); | ||
336 | free_exception(new_e); | ||
337 | return; | ||
338 | } | ||
339 | |||
340 | /* Insert before an existing chunk? */ | ||
341 | if (new_e->old_chunk == (e->old_chunk - 1) && | ||
342 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { | ||
343 | dm_consecutive_chunk_count_inc(e); | ||
344 | e->old_chunk--; | ||
345 | e->new_chunk--; | ||
346 | free_exception(new_e); | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | if (new_e->old_chunk > e->old_chunk) | ||
351 | break; | ||
352 | } | ||
353 | |||
354 | out: | ||
355 | list_add(&new_e->hash_list, e ? &e->hash_list : l); | ||
356 | } | ||
357 | |||
310 | int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) | 358 | int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) |
311 | { | 359 | { |
312 | struct dm_snap_exception *e; | 360 | struct dm_snap_exception *e; |
@@ -316,8 +364,12 @@ int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) | |||
316 | return -ENOMEM; | 364 | return -ENOMEM; |
317 | 365 | ||
318 | e->old_chunk = old; | 366 | e->old_chunk = old; |
367 | |||
368 | /* Consecutive_count is implicitly initialised to zero */ | ||
319 | e->new_chunk = new; | 369 | e->new_chunk = new; |
320 | insert_exception(&s->complete, e); | 370 | |
371 | insert_completed_exception(s, e); | ||
372 | |||
321 | return 0; | 373 | return 0; |
322 | } | 374 | } |
323 | 375 | ||
@@ -352,7 +404,8 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
352 | hash_size = min(hash_size, max_buckets); | 404 | hash_size = min(hash_size, max_buckets); |
353 | 405 | ||
354 | hash_size = rounddown_pow_of_two(hash_size); | 406 | hash_size = rounddown_pow_of_two(hash_size); |
355 | if (init_exception_table(&s->complete, hash_size)) | 407 | if (init_exception_table(&s->complete, hash_size, |
408 | DM_CHUNK_CONSECUTIVE_BITS)) | ||
356 | return -ENOMEM; | 409 | return -ENOMEM; |
357 | 410 | ||
358 | /* | 411 | /* |
@@ -363,7 +416,7 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
363 | if (hash_size < 64) | 416 | if (hash_size < 64) |
364 | hash_size = 64; | 417 | hash_size = 64; |
365 | 418 | ||
366 | if (init_exception_table(&s->pending, hash_size)) { | 419 | if (init_exception_table(&s->pending, hash_size, 0)) { |
367 | exit_exception_table(&s->complete, exception_cache); | 420 | exit_exception_table(&s->complete, exception_cache); |
368 | return -ENOMEM; | 421 | return -ENOMEM; |
369 | } | 422 | } |
@@ -722,7 +775,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) | |||
722 | * Add a proper exception, and remove the | 775 | * Add a proper exception, and remove the |
723 | * in-flight exception from the list. | 776 | * in-flight exception from the list. |
724 | */ | 777 | */ |
725 | insert_exception(&s->complete, e); | 778 | insert_completed_exception(s, e); |
726 | 779 | ||
727 | out: | 780 | out: |
728 | remove_exception(&pe->e); | 781 | remove_exception(&pe->e); |
@@ -856,11 +909,12 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
856 | } | 909 | } |
857 | 910 | ||
858 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, | 911 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, |
859 | struct bio *bio) | 912 | struct bio *bio, chunk_t chunk) |
860 | { | 913 | { |
861 | bio->bi_bdev = s->cow->bdev; | 914 | bio->bi_bdev = s->cow->bdev; |
862 | bio->bi_sector = chunk_to_sector(s, e->new_chunk) + | 915 | bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) + |
863 | (bio->bi_sector & s->chunk_mask); | 916 | (chunk - e->old_chunk)) + |
917 | (bio->bi_sector & s->chunk_mask); | ||
864 | } | 918 | } |
865 | 919 | ||
866 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | 920 | static int snapshot_map(struct dm_target *ti, struct bio *bio, |
@@ -891,7 +945,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
891 | /* If the block is already remapped - use that, else remap it */ | 945 | /* If the block is already remapped - use that, else remap it */ |
892 | e = lookup_exception(&s->complete, chunk); | 946 | e = lookup_exception(&s->complete, chunk); |
893 | if (e) { | 947 | if (e) { |
894 | remap_exception(s, e, bio); | 948 | remap_exception(s, e, bio, chunk); |
895 | goto out_unlock; | 949 | goto out_unlock; |
896 | } | 950 | } |
897 | 951 | ||
@@ -908,7 +962,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
908 | goto out_unlock; | 962 | goto out_unlock; |
909 | } | 963 | } |
910 | 964 | ||
911 | remap_exception(s, &pe->e, bio); | 965 | remap_exception(s, &pe->e, bio, chunk); |
912 | bio_list_add(&pe->snapshot_bios, bio); | 966 | bio_list_add(&pe->snapshot_bios, bio); |
913 | 967 | ||
914 | r = DM_MAPIO_SUBMITTED; | 968 | r = DM_MAPIO_SUBMITTED; |
@@ -1196,7 +1250,7 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |||
1196 | 1250 | ||
1197 | static struct target_type origin_target = { | 1251 | static struct target_type origin_target = { |
1198 | .name = "snapshot-origin", | 1252 | .name = "snapshot-origin", |
1199 | .version = {1, 5, 0}, | 1253 | .version = {1, 6, 0}, |
1200 | .module = THIS_MODULE, | 1254 | .module = THIS_MODULE, |
1201 | .ctr = origin_ctr, | 1255 | .ctr = origin_ctr, |
1202 | .dtr = origin_dtr, | 1256 | .dtr = origin_dtr, |
@@ -1207,7 +1261,7 @@ static struct target_type origin_target = { | |||
1207 | 1261 | ||
1208 | static struct target_type snapshot_target = { | 1262 | static struct target_type snapshot_target = { |
1209 | .name = "snapshot", | 1263 | .name = "snapshot", |
1210 | .version = {1, 5, 0}, | 1264 | .version = {1, 6, 0}, |
1211 | .module = THIS_MODULE, | 1265 | .module = THIS_MODULE, |
1212 | .ctr = snapshot_ctr, | 1266 | .ctr = snapshot_ctr, |
1213 | .dtr = snapshot_dtr, | 1267 | .dtr = snapshot_dtr, |