diff options
Diffstat (limited to 'drivers/md/dm-snap-persistent.c')
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 90 |
1 files changed, 72 insertions, 18 deletions
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 2d2b1b7588d7..d6e88178d22c 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -13,10 +13,13 @@ | |||
13 | #include <linux/export.h> | 13 | #include <linux/export.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/dm-io.h> | 15 | #include <linux/dm-io.h> |
16 | #include "dm-bufio.h" | ||
16 | 17 | ||
17 | #define DM_MSG_PREFIX "persistent snapshot" | 18 | #define DM_MSG_PREFIX "persistent snapshot" |
18 | #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ | 19 | #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ |
19 | 20 | ||
21 | #define DM_PREFETCH_CHUNKS 12 | ||
22 | |||
20 | /*----------------------------------------------------------------- | 23 | /*----------------------------------------------------------------- |
21 | * Persistent snapshots, by persistent we mean that the snapshot | 24 | * Persistent snapshots, by persistent we mean that the snapshot |
22 | * will survive a reboot. | 25 | * will survive a reboot. |
@@ -257,6 +260,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, | |||
257 | INIT_WORK_ONSTACK(&req.work, do_metadata); | 260 | INIT_WORK_ONSTACK(&req.work, do_metadata); |
258 | queue_work(ps->metadata_wq, &req.work); | 261 | queue_work(ps->metadata_wq, &req.work); |
259 | flush_workqueue(ps->metadata_wq); | 262 | flush_workqueue(ps->metadata_wq); |
263 | destroy_work_on_stack(&req.work); | ||
260 | 264 | ||
261 | return req.result; | 265 | return req.result; |
262 | } | 266 | } |
@@ -401,17 +405,18 @@ static int write_header(struct pstore *ps) | |||
401 | /* | 405 | /* |
402 | * Access functions for the disk exceptions, these do the endian conversions. | 406 | * Access functions for the disk exceptions, these do the endian conversions. |
403 | */ | 407 | */ |
404 | static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) | 408 | static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, |
409 | uint32_t index) | ||
405 | { | 410 | { |
406 | BUG_ON(index >= ps->exceptions_per_area); | 411 | BUG_ON(index >= ps->exceptions_per_area); |
407 | 412 | ||
408 | return ((struct disk_exception *) ps->area) + index; | 413 | return ((struct disk_exception *) ps_area) + index; |
409 | } | 414 | } |
410 | 415 | ||
411 | static void read_exception(struct pstore *ps, | 416 | static void read_exception(struct pstore *ps, void *ps_area, |
412 | uint32_t index, struct core_exception *result) | 417 | uint32_t index, struct core_exception *result) |
413 | { | 418 | { |
414 | struct disk_exception *de = get_exception(ps, index); | 419 | struct disk_exception *de = get_exception(ps, ps_area, index); |
415 | 420 | ||
416 | /* copy it */ | 421 | /* copy it */ |
417 | result->old_chunk = le64_to_cpu(de->old_chunk); | 422 | result->old_chunk = le64_to_cpu(de->old_chunk); |
@@ -421,7 +426,7 @@ static void read_exception(struct pstore *ps, | |||
421 | static void write_exception(struct pstore *ps, | 426 | static void write_exception(struct pstore *ps, |
422 | uint32_t index, struct core_exception *e) | 427 | uint32_t index, struct core_exception *e) |
423 | { | 428 | { |
424 | struct disk_exception *de = get_exception(ps, index); | 429 | struct disk_exception *de = get_exception(ps, ps->area, index); |
425 | 430 | ||
426 | /* copy it */ | 431 | /* copy it */ |
427 | de->old_chunk = cpu_to_le64(e->old_chunk); | 432 | de->old_chunk = cpu_to_le64(e->old_chunk); |
@@ -430,7 +435,7 @@ static void write_exception(struct pstore *ps, | |||
430 | 435 | ||
431 | static void clear_exception(struct pstore *ps, uint32_t index) | 436 | static void clear_exception(struct pstore *ps, uint32_t index) |
432 | { | 437 | { |
433 | struct disk_exception *de = get_exception(ps, index); | 438 | struct disk_exception *de = get_exception(ps, ps->area, index); |
434 | 439 | ||
435 | /* clear it */ | 440 | /* clear it */ |
436 | de->old_chunk = 0; | 441 | de->old_chunk = 0; |
@@ -442,7 +447,7 @@ static void clear_exception(struct pstore *ps, uint32_t index) | |||
442 | * 'full' is filled in to indicate if the area has been | 447 | * 'full' is filled in to indicate if the area has been |
443 | * filled. | 448 | * filled. |
444 | */ | 449 | */ |
445 | static int insert_exceptions(struct pstore *ps, | 450 | static int insert_exceptions(struct pstore *ps, void *ps_area, |
446 | int (*callback)(void *callback_context, | 451 | int (*callback)(void *callback_context, |
447 | chunk_t old, chunk_t new), | 452 | chunk_t old, chunk_t new), |
448 | void *callback_context, | 453 | void *callback_context, |
@@ -456,7 +461,7 @@ static int insert_exceptions(struct pstore *ps, | |||
456 | *full = 1; | 461 | *full = 1; |
457 | 462 | ||
458 | for (i = 0; i < ps->exceptions_per_area; i++) { | 463 | for (i = 0; i < ps->exceptions_per_area; i++) { |
459 | read_exception(ps, i, &e); | 464 | read_exception(ps, ps_area, i, &e); |
460 | 465 | ||
461 | /* | 466 | /* |
462 | * If the new_chunk is pointing at the start of | 467 | * If the new_chunk is pointing at the start of |
@@ -493,26 +498,75 @@ static int read_exceptions(struct pstore *ps, | |||
493 | void *callback_context) | 498 | void *callback_context) |
494 | { | 499 | { |
495 | int r, full = 1; | 500 | int r, full = 1; |
501 | struct dm_bufio_client *client; | ||
502 | chunk_t prefetch_area = 0; | ||
503 | |||
504 | client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, | ||
505 | ps->store->chunk_size << SECTOR_SHIFT, | ||
506 | 1, 0, NULL, NULL); | ||
507 | |||
508 | if (IS_ERR(client)) | ||
509 | return PTR_ERR(client); | ||
510 | |||
511 | /* | ||
512 | * Setup for one current buffer + desired readahead buffers. | ||
513 | */ | ||
514 | dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS); | ||
496 | 515 | ||
497 | /* | 516 | /* |
498 | * Keeping reading chunks and inserting exceptions until | 517 | * Keeping reading chunks and inserting exceptions until |
499 | * we find a partially full area. | 518 | * we find a partially full area. |
500 | */ | 519 | */ |
501 | for (ps->current_area = 0; full; ps->current_area++) { | 520 | for (ps->current_area = 0; full; ps->current_area++) { |
502 | r = area_io(ps, READ); | 521 | struct dm_buffer *bp; |
503 | if (r) | 522 | void *area; |
504 | return r; | 523 | chunk_t chunk; |
524 | |||
525 | if (unlikely(prefetch_area < ps->current_area)) | ||
526 | prefetch_area = ps->current_area; | ||
527 | |||
528 | if (DM_PREFETCH_CHUNKS) do { | ||
529 | chunk_t pf_chunk = area_location(ps, prefetch_area); | ||
530 | if (unlikely(pf_chunk >= dm_bufio_get_device_size(client))) | ||
531 | break; | ||
532 | dm_bufio_prefetch(client, pf_chunk, 1); | ||
533 | prefetch_area++; | ||
534 | if (unlikely(!prefetch_area)) | ||
535 | break; | ||
536 | } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS); | ||
537 | |||
538 | chunk = area_location(ps, ps->current_area); | ||
539 | |||
540 | area = dm_bufio_read(client, chunk, &bp); | ||
541 | if (unlikely(IS_ERR(area))) { | ||
542 | r = PTR_ERR(area); | ||
543 | goto ret_destroy_bufio; | ||
544 | } | ||
505 | 545 | ||
506 | r = insert_exceptions(ps, callback, callback_context, &full); | 546 | r = insert_exceptions(ps, area, callback, callback_context, |
507 | if (r) | 547 | &full); |
508 | return r; | 548 | |
549 | if (!full) | ||
550 | memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); | ||
551 | |||
552 | dm_bufio_release(bp); | ||
553 | |||
554 | dm_bufio_forget(client, chunk); | ||
555 | |||
556 | if (unlikely(r)) | ||
557 | goto ret_destroy_bufio; | ||
509 | } | 558 | } |
510 | 559 | ||
511 | ps->current_area--; | 560 | ps->current_area--; |
512 | 561 | ||
513 | skip_metadata(ps); | 562 | skip_metadata(ps); |
514 | 563 | ||
515 | return 0; | 564 | r = 0; |
565 | |||
566 | ret_destroy_bufio: | ||
567 | dm_bufio_client_destroy(client); | ||
568 | |||
569 | return r; | ||
516 | } | 570 | } |
517 | 571 | ||
518 | static struct pstore *get_info(struct dm_exception_store *store) | 572 | static struct pstore *get_info(struct dm_exception_store *store) |
@@ -733,7 +787,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
733 | ps->current_committed = ps->exceptions_per_area; | 787 | ps->current_committed = ps->exceptions_per_area; |
734 | } | 788 | } |
735 | 789 | ||
736 | read_exception(ps, ps->current_committed - 1, &ce); | 790 | read_exception(ps, ps->area, ps->current_committed - 1, &ce); |
737 | *last_old_chunk = ce.old_chunk; | 791 | *last_old_chunk = ce.old_chunk; |
738 | *last_new_chunk = ce.new_chunk; | 792 | *last_new_chunk = ce.new_chunk; |
739 | 793 | ||
@@ -743,8 +797,8 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
743 | */ | 797 | */ |
744 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | 798 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; |
745 | nr_consecutive++) { | 799 | nr_consecutive++) { |
746 | read_exception(ps, ps->current_committed - 1 - nr_consecutive, | 800 | read_exception(ps, ps->area, |
747 | &ce); | 801 | ps->current_committed - 1 - nr_consecutive, &ce); |
748 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || | 802 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || |
749 | ce.new_chunk != *last_new_chunk - nr_consecutive) | 803 | ce.new_chunk != *last_new_chunk - nr_consecutive) |
750 | break; | 804 | break; |