diff options
author | Jon Brassow <jbrassow@redhat.com> | 2009-12-10 18:52:11 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-12-10 18:52:11 -0500 |
commit | 3510cb94ff7b04b016bd22bfee913e2c1c05c066 (patch) | |
tree | 6d8f1d3a8873d52b561c26b5b631763908d0f015 /drivers/md/dm-snap.c | |
parent | 191437a53c8269df3a2c6199206781e742c57bb5 (diff) |
dm snapshot: rename exception functions
Rename exception functions. Preparing to pull them out of
dm-snap.c for broader use.
Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-snap.c')
-rw-r--r-- | drivers/md/dm-snap.c | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f40331cb1f6e..cb4c2c3a43f0 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -351,8 +351,8 @@ static void unregister_snapshot(struct dm_snapshot *s) | |||
351 | * The lowest hash_shift bits of the chunk number are ignored, allowing | 351 | * The lowest hash_shift bits of the chunk number are ignored, allowing |
352 | * some consecutive chunks to be grouped together. | 352 | * some consecutive chunks to be grouped together. |
353 | */ | 353 | */ |
354 | static int init_exception_table(struct dm_exception_table *et, uint32_t size, | 354 | static int dm_exception_table_init(struct dm_exception_table *et, |
355 | unsigned hash_shift) | 355 | uint32_t size, unsigned hash_shift) |
356 | { | 356 | { |
357 | unsigned int i; | 357 | unsigned int i; |
358 | 358 | ||
@@ -368,8 +368,8 @@ static int init_exception_table(struct dm_exception_table *et, uint32_t size, | |||
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
371 | static void exit_exception_table(struct dm_exception_table *et, | 371 | static void dm_exception_table_exit(struct dm_exception_table *et, |
372 | struct kmem_cache *mem) | 372 | struct kmem_cache *mem) |
373 | { | 373 | { |
374 | struct list_head *slot; | 374 | struct list_head *slot; |
375 | struct dm_exception *ex, *next; | 375 | struct dm_exception *ex, *next; |
@@ -391,7 +391,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) | |||
391 | return (chunk >> et->hash_shift) & et->hash_mask; | 391 | return (chunk >> et->hash_shift) & et->hash_mask; |
392 | } | 392 | } |
393 | 393 | ||
394 | static void remove_exception(struct dm_exception *e) | 394 | static void dm_remove_exception(struct dm_exception *e) |
395 | { | 395 | { |
396 | list_del(&e->hash_list); | 396 | list_del(&e->hash_list); |
397 | } | 397 | } |
@@ -400,8 +400,8 @@ static void remove_exception(struct dm_exception *e) | |||
400 | * Return the exception data for a sector, or NULL if not | 400 | * Return the exception data for a sector, or NULL if not |
401 | * remapped. | 401 | * remapped. |
402 | */ | 402 | */ |
403 | static struct dm_exception *lookup_exception(struct dm_exception_table *et, | 403 | static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, |
404 | chunk_t chunk) | 404 | chunk_t chunk) |
405 | { | 405 | { |
406 | struct list_head *slot; | 406 | struct list_head *slot; |
407 | struct dm_exception *e; | 407 | struct dm_exception *e; |
@@ -415,7 +415,7 @@ static struct dm_exception *lookup_exception(struct dm_exception_table *et, | |||
415 | return NULL; | 415 | return NULL; |
416 | } | 416 | } |
417 | 417 | ||
418 | static struct dm_exception *alloc_exception(void) | 418 | static struct dm_exception *alloc_completed_exception(void) |
419 | { | 419 | { |
420 | struct dm_exception *e; | 420 | struct dm_exception *e; |
421 | 421 | ||
@@ -426,7 +426,7 @@ static struct dm_exception *alloc_exception(void) | |||
426 | return e; | 426 | return e; |
427 | } | 427 | } |
428 | 428 | ||
429 | static void free_exception(struct dm_exception *e) | 429 | static void free_completed_exception(struct dm_exception *e) |
430 | { | 430 | { |
431 | kmem_cache_free(exception_cache, e); | 431 | kmem_cache_free(exception_cache, e); |
432 | } | 432 | } |
@@ -451,8 +451,8 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) | |||
451 | atomic_dec(&s->pending_exceptions_count); | 451 | atomic_dec(&s->pending_exceptions_count); |
452 | } | 452 | } |
453 | 453 | ||
454 | static void insert_exception(struct dm_exception_table *eh, | 454 | static void dm_insert_exception(struct dm_exception_table *eh, |
455 | struct dm_exception *new_e) | 455 | struct dm_exception *new_e) |
456 | { | 456 | { |
457 | struct list_head *l; | 457 | struct list_head *l; |
458 | struct dm_exception *e = NULL; | 458 | struct dm_exception *e = NULL; |
@@ -471,7 +471,7 @@ static void insert_exception(struct dm_exception_table *eh, | |||
471 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + | 471 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + |
472 | dm_consecutive_chunk_count(e) + 1)) { | 472 | dm_consecutive_chunk_count(e) + 1)) { |
473 | dm_consecutive_chunk_count_inc(e); | 473 | dm_consecutive_chunk_count_inc(e); |
474 | free_exception(new_e); | 474 | free_completed_exception(new_e); |
475 | return; | 475 | return; |
476 | } | 476 | } |
477 | 477 | ||
@@ -481,7 +481,7 @@ static void insert_exception(struct dm_exception_table *eh, | |||
481 | dm_consecutive_chunk_count_inc(e); | 481 | dm_consecutive_chunk_count_inc(e); |
482 | e->old_chunk--; | 482 | e->old_chunk--; |
483 | e->new_chunk--; | 483 | e->new_chunk--; |
484 | free_exception(new_e); | 484 | free_completed_exception(new_e); |
485 | return; | 485 | return; |
486 | } | 486 | } |
487 | 487 | ||
@@ -502,7 +502,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) | |||
502 | struct dm_snapshot *s = context; | 502 | struct dm_snapshot *s = context; |
503 | struct dm_exception *e; | 503 | struct dm_exception *e; |
504 | 504 | ||
505 | e = alloc_exception(); | 505 | e = alloc_completed_exception(); |
506 | if (!e) | 506 | if (!e) |
507 | return -ENOMEM; | 507 | return -ENOMEM; |
508 | 508 | ||
@@ -511,7 +511,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) | |||
511 | /* Consecutive_count is implicitly initialised to zero */ | 511 | /* Consecutive_count is implicitly initialised to zero */ |
512 | e->new_chunk = new; | 512 | e->new_chunk = new; |
513 | 513 | ||
514 | insert_exception(&s->complete, e); | 514 | dm_insert_exception(&s->complete, e); |
515 | 515 | ||
516 | return 0; | 516 | return 0; |
517 | } | 517 | } |
@@ -568,8 +568,8 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
568 | if (hash_size < 64) | 568 | if (hash_size < 64) |
569 | hash_size = 64; | 569 | hash_size = 64; |
570 | hash_size = rounddown_pow_of_two(hash_size); | 570 | hash_size = rounddown_pow_of_two(hash_size); |
571 | if (init_exception_table(&s->complete, hash_size, | 571 | if (dm_exception_table_init(&s->complete, hash_size, |
572 | DM_CHUNK_CONSECUTIVE_BITS)) | 572 | DM_CHUNK_CONSECUTIVE_BITS)) |
573 | return -ENOMEM; | 573 | return -ENOMEM; |
574 | 574 | ||
575 | /* | 575 | /* |
@@ -580,8 +580,8 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
580 | if (hash_size < 64) | 580 | if (hash_size < 64) |
581 | hash_size = 64; | 581 | hash_size = 64; |
582 | 582 | ||
583 | if (init_exception_table(&s->pending, hash_size, 0)) { | 583 | if (dm_exception_table_init(&s->pending, hash_size, 0)) { |
584 | exit_exception_table(&s->complete, exception_cache); | 584 | dm_exception_table_exit(&s->complete, exception_cache); |
585 | return -ENOMEM; | 585 | return -ENOMEM; |
586 | } | 586 | } |
587 | 587 | ||
@@ -716,8 +716,8 @@ bad_pending_pool: | |||
716 | dm_kcopyd_client_destroy(s->kcopyd_client); | 716 | dm_kcopyd_client_destroy(s->kcopyd_client); |
717 | 717 | ||
718 | bad_kcopyd: | 718 | bad_kcopyd: |
719 | exit_exception_table(&s->pending, pending_cache); | 719 | dm_exception_table_exit(&s->pending, pending_cache); |
720 | exit_exception_table(&s->complete, exception_cache); | 720 | dm_exception_table_exit(&s->complete, exception_cache); |
721 | 721 | ||
722 | bad_hash_tables: | 722 | bad_hash_tables: |
723 | dm_put_device(ti, s->origin); | 723 | dm_put_device(ti, s->origin); |
@@ -737,8 +737,8 @@ static void __free_exceptions(struct dm_snapshot *s) | |||
737 | dm_kcopyd_client_destroy(s->kcopyd_client); | 737 | dm_kcopyd_client_destroy(s->kcopyd_client); |
738 | s->kcopyd_client = NULL; | 738 | s->kcopyd_client = NULL; |
739 | 739 | ||
740 | exit_exception_table(&s->pending, pending_cache); | 740 | dm_exception_table_exit(&s->pending, pending_cache); |
741 | exit_exception_table(&s->complete, exception_cache); | 741 | dm_exception_table_exit(&s->complete, exception_cache); |
742 | } | 742 | } |
743 | 743 | ||
744 | static void snapshot_dtr(struct dm_target *ti) | 744 | static void snapshot_dtr(struct dm_target *ti) |
@@ -891,7 +891,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) | |||
891 | goto out; | 891 | goto out; |
892 | } | 892 | } |
893 | 893 | ||
894 | e = alloc_exception(); | 894 | e = alloc_completed_exception(); |
895 | if (!e) { | 895 | if (!e) { |
896 | down_write(&s->lock); | 896 | down_write(&s->lock); |
897 | __invalidate_snapshot(s, -ENOMEM); | 897 | __invalidate_snapshot(s, -ENOMEM); |
@@ -902,7 +902,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) | |||
902 | 902 | ||
903 | down_write(&s->lock); | 903 | down_write(&s->lock); |
904 | if (!s->valid) { | 904 | if (!s->valid) { |
905 | free_exception(e); | 905 | free_completed_exception(e); |
906 | error = 1; | 906 | error = 1; |
907 | goto out; | 907 | goto out; |
908 | } | 908 | } |
@@ -918,10 +918,10 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) | |||
918 | * Add a proper exception, and remove the | 918 | * Add a proper exception, and remove the |
919 | * in-flight exception from the list. | 919 | * in-flight exception from the list. |
920 | */ | 920 | */ |
921 | insert_exception(&s->complete, e); | 921 | dm_insert_exception(&s->complete, e); |
922 | 922 | ||
923 | out: | 923 | out: |
924 | remove_exception(&pe->e); | 924 | dm_remove_exception(&pe->e); |
925 | snapshot_bios = bio_list_get(&pe->snapshot_bios); | 925 | snapshot_bios = bio_list_get(&pe->snapshot_bios); |
926 | origin_bios = put_pending_exception(pe); | 926 | origin_bios = put_pending_exception(pe); |
927 | 927 | ||
@@ -989,7 +989,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) | |||
989 | static struct dm_snap_pending_exception * | 989 | static struct dm_snap_pending_exception * |
990 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) | 990 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) |
991 | { | 991 | { |
992 | struct dm_exception *e = lookup_exception(&s->pending, chunk); | 992 | struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); |
993 | 993 | ||
994 | if (!e) | 994 | if (!e) |
995 | return NULL; | 995 | return NULL; |
@@ -1030,7 +1030,7 @@ __find_pending_exception(struct dm_snapshot *s, | |||
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | get_pending_exception(pe); | 1032 | get_pending_exception(pe); |
1033 | insert_exception(&s->pending, &pe->e); | 1033 | dm_insert_exception(&s->pending, &pe->e); |
1034 | 1034 | ||
1035 | return pe; | 1035 | return pe; |
1036 | } | 1036 | } |
@@ -1077,7 +1077,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | /* If the block is already remapped - use that, else remap it */ | 1079 | /* If the block is already remapped - use that, else remap it */ |
1080 | e = lookup_exception(&s->complete, chunk); | 1080 | e = dm_lookup_exception(&s->complete, chunk); |
1081 | if (e) { | 1081 | if (e) { |
1082 | remap_exception(s, e, bio, chunk); | 1082 | remap_exception(s, e, bio, chunk); |
1083 | goto out_unlock; | 1083 | goto out_unlock; |
@@ -1101,7 +1101,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1101 | goto out_unlock; | 1101 | goto out_unlock; |
1102 | } | 1102 | } |
1103 | 1103 | ||
1104 | e = lookup_exception(&s->complete, chunk); | 1104 | e = dm_lookup_exception(&s->complete, chunk); |
1105 | if (e) { | 1105 | if (e) { |
1106 | free_pending_exception(pe); | 1106 | free_pending_exception(pe); |
1107 | remap_exception(s, e, bio, chunk); | 1107 | remap_exception(s, e, bio, chunk); |
@@ -1254,7 +1254,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) | |||
1254 | * ref_count is initialised to 1 so pending_complete() | 1254 | * ref_count is initialised to 1 so pending_complete() |
1255 | * won't destroy the primary_pe while we're inside this loop. | 1255 | * won't destroy the primary_pe while we're inside this loop. |
1256 | */ | 1256 | */ |
1257 | e = lookup_exception(&snap->complete, chunk); | 1257 | e = dm_lookup_exception(&snap->complete, chunk); |
1258 | if (e) | 1258 | if (e) |
1259 | goto next_snapshot; | 1259 | goto next_snapshot; |
1260 | 1260 | ||
@@ -1269,7 +1269,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) | |||
1269 | goto next_snapshot; | 1269 | goto next_snapshot; |
1270 | } | 1270 | } |
1271 | 1271 | ||
1272 | e = lookup_exception(&snap->complete, chunk); | 1272 | e = dm_lookup_exception(&snap->complete, chunk); |
1273 | if (e) { | 1273 | if (e) { |
1274 | free_pending_exception(pe); | 1274 | free_pending_exception(pe); |
1275 | goto next_snapshot; | 1275 | goto next_snapshot; |