diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2014-01-13 19:14:04 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-01-14 13:38:32 -0500 |
commit | 2cadabd512acca99e6553d303eaedc97a3178a4d (patch) | |
tree | aec274ee48975f8ad9f0b53419c6272c9ec8439c | |
parent | 119bc547362e5252074f81f56b8fcdac45cedff4 (diff) |
dm snapshot: prepare for switch to using dm-bufio
Change the functions get_exception, read_exception and insert_exceptions
so that ps->area is passed as an argument.
This patch doesn't change any functionality, but it refactors the code
to allow for a cleaner switch over to using dm-bufio.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 2f5a9f83fc8d..ba792ae068b7 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -402,17 +402,18 @@ static int write_header(struct pstore *ps) | |||
402 | /* | 402 | /* |
403 | * Access functions for the disk exceptions, these do the endian conversions. | 403 | * Access functions for the disk exceptions, these do the endian conversions. |
404 | */ | 404 | */ |
405 | static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) | 405 | static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, |
406 | uint32_t index) | ||
406 | { | 407 | { |
407 | BUG_ON(index >= ps->exceptions_per_area); | 408 | BUG_ON(index >= ps->exceptions_per_area); |
408 | 409 | ||
409 | return ((struct disk_exception *) ps->area) + index; | 410 | return ((struct disk_exception *) ps_area) + index; |
410 | } | 411 | } |
411 | 412 | ||
412 | static void read_exception(struct pstore *ps, | 413 | static void read_exception(struct pstore *ps, void *ps_area, |
413 | uint32_t index, struct core_exception *result) | 414 | uint32_t index, struct core_exception *result) |
414 | { | 415 | { |
415 | struct disk_exception *de = get_exception(ps, index); | 416 | struct disk_exception *de = get_exception(ps, ps_area, index); |
416 | 417 | ||
417 | /* copy it */ | 418 | /* copy it */ |
418 | result->old_chunk = le64_to_cpu(de->old_chunk); | 419 | result->old_chunk = le64_to_cpu(de->old_chunk); |
@@ -422,7 +423,7 @@ static void read_exception(struct pstore *ps, | |||
422 | static void write_exception(struct pstore *ps, | 423 | static void write_exception(struct pstore *ps, |
423 | uint32_t index, struct core_exception *e) | 424 | uint32_t index, struct core_exception *e) |
424 | { | 425 | { |
425 | struct disk_exception *de = get_exception(ps, index); | 426 | struct disk_exception *de = get_exception(ps, ps->area, index); |
426 | 427 | ||
427 | /* copy it */ | 428 | /* copy it */ |
428 | de->old_chunk = cpu_to_le64(e->old_chunk); | 429 | de->old_chunk = cpu_to_le64(e->old_chunk); |
@@ -431,7 +432,7 @@ static void write_exception(struct pstore *ps, | |||
431 | 432 | ||
432 | static void clear_exception(struct pstore *ps, uint32_t index) | 433 | static void clear_exception(struct pstore *ps, uint32_t index) |
433 | { | 434 | { |
434 | struct disk_exception *de = get_exception(ps, index); | 435 | struct disk_exception *de = get_exception(ps, ps->area, index); |
435 | 436 | ||
436 | /* clear it */ | 437 | /* clear it */ |
437 | de->old_chunk = 0; | 438 | de->old_chunk = 0; |
@@ -443,7 +444,7 @@ static void clear_exception(struct pstore *ps, uint32_t index) | |||
443 | * 'full' is filled in to indicate if the area has been | 444 | * 'full' is filled in to indicate if the area has been |
444 | * filled. | 445 | * filled. |
445 | */ | 446 | */ |
446 | static int insert_exceptions(struct pstore *ps, | 447 | static int insert_exceptions(struct pstore *ps, void *ps_area, |
447 | int (*callback)(void *callback_context, | 448 | int (*callback)(void *callback_context, |
448 | chunk_t old, chunk_t new), | 449 | chunk_t old, chunk_t new), |
449 | void *callback_context, | 450 | void *callback_context, |
@@ -457,7 +458,7 @@ static int insert_exceptions(struct pstore *ps, | |||
457 | *full = 1; | 458 | *full = 1; |
458 | 459 | ||
459 | for (i = 0; i < ps->exceptions_per_area; i++) { | 460 | for (i = 0; i < ps->exceptions_per_area; i++) { |
460 | read_exception(ps, i, &e); | 461 | read_exception(ps, ps_area, i, &e); |
461 | 462 | ||
462 | /* | 463 | /* |
463 | * If the new_chunk is pointing at the start of | 464 | * If the new_chunk is pointing at the start of |
@@ -504,7 +505,8 @@ static int read_exceptions(struct pstore *ps, | |||
504 | if (r) | 505 | if (r) |
505 | return r; | 506 | return r; |
506 | 507 | ||
507 | r = insert_exceptions(ps, callback, callback_context, &full); | 508 | r = insert_exceptions(ps, ps->area, callback, callback_context, |
509 | &full); | ||
508 | if (r) | 510 | if (r) |
509 | return r; | 511 | return r; |
510 | } | 512 | } |
@@ -734,7 +736,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
734 | ps->current_committed = ps->exceptions_per_area; | 736 | ps->current_committed = ps->exceptions_per_area; |
735 | } | 737 | } |
736 | 738 | ||
737 | read_exception(ps, ps->current_committed - 1, &ce); | 739 | read_exception(ps, ps->area, ps->current_committed - 1, &ce); |
738 | *last_old_chunk = ce.old_chunk; | 740 | *last_old_chunk = ce.old_chunk; |
739 | *last_new_chunk = ce.new_chunk; | 741 | *last_new_chunk = ce.new_chunk; |
740 | 742 | ||
@@ -744,8 +746,8 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
744 | */ | 746 | */ |
745 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | 747 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; |
746 | nr_consecutive++) { | 748 | nr_consecutive++) { |
747 | read_exception(ps, ps->current_committed - 1 - nr_consecutive, | 749 | read_exception(ps, ps->area, |
748 | &ce); | 750 | ps->current_committed - 1 - nr_consecutive, &ce); |
749 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || | 751 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || |
750 | ce.new_chunk != *last_new_chunk - nr_consecutive) | 752 | ce.new_chunk != *last_new_chunk - nr_consecutive) |
751 | break; | 753 | break; |