aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2008-02-07 21:11:27 -0500
committerAlasdair G Kergon <agk@redhat.com>2008-02-07 21:11:27 -0500
commitd74f81f8adc504a23be3babf347b9f69e9389924 (patch)
tree4a687e400479ad330bb36ded54012cd8b8b84ecf /drivers/md
parent4f7f5c675fd6bacaae3c67be44de872dcff0e3b7 (diff)
dm snapshot: combine consecutive exceptions in memory
Provided sector_t is 64 bits, reduce the in-memory footprint of the snapshot exception table by the simple method of using unused bits of the chunk number to combine consecutive entries. Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-snap.c82
-rw-r--r--drivers/md/dm-snap.h50
2 files changed, 116 insertions, 16 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index fad84654b045..ae24eab8cd81 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -213,11 +213,15 @@ static void unregister_snapshot(struct dm_snapshot *s)
213 213
214/* 214/*
215 * Implementation of the exception hash tables. 215 * Implementation of the exception hash tables.
216 * The lowest hash_shift bits of the chunk number are ignored, allowing
217 * some consecutive chunks to be grouped together.
216 */ 218 */
217static int init_exception_table(struct exception_table *et, uint32_t size) 219static int init_exception_table(struct exception_table *et, uint32_t size,
220 unsigned hash_shift)
218{ 221{
219 unsigned int i; 222 unsigned int i;
220 223
224 et->hash_shift = hash_shift;
221 et->hash_mask = size - 1; 225 et->hash_mask = size - 1;
222 et->table = dm_vcalloc(size, sizeof(struct list_head)); 226 et->table = dm_vcalloc(size, sizeof(struct list_head));
223 if (!et->table) 227 if (!et->table)
@@ -248,7 +252,7 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
248 252
249static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 253static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
250{ 254{
251 return chunk & et->hash_mask; 255 return (chunk >> et->hash_shift) & et->hash_mask;
252} 256}
253 257
254static void insert_exception(struct exception_table *eh, 258static void insert_exception(struct exception_table *eh,
@@ -275,7 +279,8 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et,
275 279
276 slot = &et->table[exception_hash(et, chunk)]; 280 slot = &et->table[exception_hash(et, chunk)];
277 list_for_each_entry (e, slot, hash_list) 281 list_for_each_entry (e, slot, hash_list)
278 if (e->old_chunk == chunk) 282 if (chunk >= e->old_chunk &&
283 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
279 return e; 284 return e;
280 285
281 return NULL; 286 return NULL;
@@ -307,6 +312,49 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
307 mempool_free(pe, pending_pool); 312 mempool_free(pe, pending_pool);
308} 313}
309 314
315static void insert_completed_exception(struct dm_snapshot *s,
316 struct dm_snap_exception *new_e)
317{
318 struct exception_table *eh = &s->complete;
319 struct list_head *l;
320 struct dm_snap_exception *e = NULL;
321
322 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
323
324 /* Add immediately if this table doesn't support consecutive chunks */
325 if (!eh->hash_shift)
326 goto out;
327
328 /* List is ordered by old_chunk */
329 list_for_each_entry_reverse(e, l, hash_list) {
330 /* Insert after an existing chunk? */
331 if (new_e->old_chunk == (e->old_chunk +
332 dm_consecutive_chunk_count(e) + 1) &&
333 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
334 dm_consecutive_chunk_count(e) + 1)) {
335 dm_consecutive_chunk_count_inc(e);
336 free_exception(new_e);
337 return;
338 }
339
340 /* Insert before an existing chunk? */
341 if (new_e->old_chunk == (e->old_chunk - 1) &&
342 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
343 dm_consecutive_chunk_count_inc(e);
344 e->old_chunk--;
345 e->new_chunk--;
346 free_exception(new_e);
347 return;
348 }
349
350 if (new_e->old_chunk > e->old_chunk)
351 break;
352 }
353
354out:
355 list_add(&new_e->hash_list, e ? &e->hash_list : l);
356}
357
310int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) 358int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
311{ 359{
312 struct dm_snap_exception *e; 360 struct dm_snap_exception *e;
@@ -316,8 +364,12 @@ int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
316 return -ENOMEM; 364 return -ENOMEM;
317 365
318 e->old_chunk = old; 366 e->old_chunk = old;
367
368 /* Consecutive_count is implicitly initialised to zero */
319 e->new_chunk = new; 369 e->new_chunk = new;
320 insert_exception(&s->complete, e); 370
371 insert_completed_exception(s, e);
372
321 return 0; 373 return 0;
322} 374}
323 375
@@ -352,7 +404,8 @@ static int init_hash_tables(struct dm_snapshot *s)
352 hash_size = min(hash_size, max_buckets); 404 hash_size = min(hash_size, max_buckets);
353 405
354 hash_size = rounddown_pow_of_two(hash_size); 406 hash_size = rounddown_pow_of_two(hash_size);
355 if (init_exception_table(&s->complete, hash_size)) 407 if (init_exception_table(&s->complete, hash_size,
408 DM_CHUNK_CONSECUTIVE_BITS))
356 return -ENOMEM; 409 return -ENOMEM;
357 410
358 /* 411 /*
@@ -363,7 +416,7 @@ static int init_hash_tables(struct dm_snapshot *s)
363 if (hash_size < 64) 416 if (hash_size < 64)
364 hash_size = 64; 417 hash_size = 64;
365 418
366 if (init_exception_table(&s->pending, hash_size)) { 419 if (init_exception_table(&s->pending, hash_size, 0)) {
367 exit_exception_table(&s->complete, exception_cache); 420 exit_exception_table(&s->complete, exception_cache);
368 return -ENOMEM; 421 return -ENOMEM;
369 } 422 }
@@ -722,7 +775,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
722 * Add a proper exception, and remove the 775 * Add a proper exception, and remove the
723 * in-flight exception from the list. 776 * in-flight exception from the list.
724 */ 777 */
725 insert_exception(&s->complete, e); 778 insert_completed_exception(s, e);
726 779
727 out: 780 out:
728 remove_exception(&pe->e); 781 remove_exception(&pe->e);
@@ -856,11 +909,12 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
856} 909}
857 910
858static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 911static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
859 struct bio *bio) 912 struct bio *bio, chunk_t chunk)
860{ 913{
861 bio->bi_bdev = s->cow->bdev; 914 bio->bi_bdev = s->cow->bdev;
862 bio->bi_sector = chunk_to_sector(s, e->new_chunk) + 915 bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
863 (bio->bi_sector & s->chunk_mask); 916 (chunk - e->old_chunk)) +
917 (bio->bi_sector & s->chunk_mask);
864} 918}
865 919
866static int snapshot_map(struct dm_target *ti, struct bio *bio, 920static int snapshot_map(struct dm_target *ti, struct bio *bio,
@@ -891,7 +945,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
891 /* If the block is already remapped - use that, else remap it */ 945 /* If the block is already remapped - use that, else remap it */
892 e = lookup_exception(&s->complete, chunk); 946 e = lookup_exception(&s->complete, chunk);
893 if (e) { 947 if (e) {
894 remap_exception(s, e, bio); 948 remap_exception(s, e, bio, chunk);
895 goto out_unlock; 949 goto out_unlock;
896 } 950 }
897 951
@@ -908,7 +962,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
908 goto out_unlock; 962 goto out_unlock;
909 } 963 }
910 964
911 remap_exception(s, &pe->e, bio); 965 remap_exception(s, &pe->e, bio, chunk);
912 bio_list_add(&pe->snapshot_bios, bio); 966 bio_list_add(&pe->snapshot_bios, bio);
913 967
914 r = DM_MAPIO_SUBMITTED; 968 r = DM_MAPIO_SUBMITTED;
@@ -1196,7 +1250,7 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1196 1250
1197static struct target_type origin_target = { 1251static struct target_type origin_target = {
1198 .name = "snapshot-origin", 1252 .name = "snapshot-origin",
1199 .version = {1, 5, 0}, 1253 .version = {1, 6, 0},
1200 .module = THIS_MODULE, 1254 .module = THIS_MODULE,
1201 .ctr = origin_ctr, 1255 .ctr = origin_ctr,
1202 .dtr = origin_dtr, 1256 .dtr = origin_dtr,
@@ -1207,7 +1261,7 @@ static struct target_type origin_target = {
1207 1261
1208static struct target_type snapshot_target = { 1262static struct target_type snapshot_target = {
1209 .name = "snapshot", 1263 .name = "snapshot",
1210 .version = {1, 5, 0}, 1264 .version = {1, 6, 0},
1211 .module = THIS_MODULE, 1265 .module = THIS_MODULE,
1212 .ctr = snapshot_ctr, 1266 .ctr = snapshot_ctr,
1213 .dtr = snapshot_dtr, 1267 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 650e0f1f51d8..93bce5d49742 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -16,19 +16,22 @@
16 16
17struct exception_table { 17struct exception_table {
18 uint32_t hash_mask; 18 uint32_t hash_mask;
19 unsigned hash_shift;
19 struct list_head *table; 20 struct list_head *table;
20}; 21};
21 22
22/* 23/*
23 * The snapshot code deals with largish chunks of the disk at a 24 * The snapshot code deals with largish chunks of the disk at a
24 * time. Typically 64k - 256k. 25 * time. Typically 32k - 512k.
25 */ 26 */
26/* FIXME: can we get away with limiting these to a uint32_t ? */
27typedef sector_t chunk_t; 27typedef sector_t chunk_t;
28 28
29/* 29/*
30 * An exception is used where an old chunk of data has been 30 * An exception is used where an old chunk of data has been
31 * replaced by a new one. 31 * replaced by a new one.
32 * If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number
33 * of chunks that follow contiguously. Remaining bits hold the number of the
34 * chunk within the device.
32 */ 35 */
33struct dm_snap_exception { 36struct dm_snap_exception {
34 struct list_head hash_list; 37 struct list_head hash_list;
@@ -38,6 +41,49 @@ struct dm_snap_exception {
38}; 41};
39 42
40/* 43/*
44 * Funtions to manipulate consecutive chunks
45 */
46# if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
47# define DM_CHUNK_CONSECUTIVE_BITS 8
48# define DM_CHUNK_NUMBER_BITS 56
49
50static inline chunk_t dm_chunk_number(chunk_t chunk)
51{
52 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
53}
54
55static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
56{
57 return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
58}
59
60static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
61{
62 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
63
64 BUG_ON(!dm_consecutive_chunk_count(e));
65}
66
67# else
68# define DM_CHUNK_CONSECUTIVE_BITS 0
69
70static inline chunk_t dm_chunk_number(chunk_t chunk)
71{
72 return chunk;
73}
74
75static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
76{
77 return 0;
78}
79
80static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
81{
82}
83
84# endif
85
86/*
41 * Abstraction to handle the meta/layout of exception stores (the 87 * Abstraction to handle the meta/layout of exception stores (the
42 * COW device). 88 * COW device).
43 */ 89 */