diff options
Diffstat (limited to 'drivers/md/dm-snap.c')
-rw-r--r-- | drivers/md/dm-snap.c | 384 |
1 files changed, 200 insertions, 184 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 65ff82ff124e..981a0413068f 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -7,7 +7,6 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/blkdev.h> | 9 | #include <linux/blkdev.h> |
10 | #include <linux/ctype.h> | ||
11 | #include <linux/device-mapper.h> | 10 | #include <linux/device-mapper.h> |
12 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
13 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
@@ -20,9 +19,9 @@ | |||
20 | #include <linux/vmalloc.h> | 19 | #include <linux/vmalloc.h> |
21 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
22 | #include <linux/dm-kcopyd.h> | 21 | #include <linux/dm-kcopyd.h> |
22 | #include <linux/workqueue.h> | ||
23 | 23 | ||
24 | #include "dm-exception-store.h" | 24 | #include "dm-exception-store.h" |
25 | #include "dm-snap.h" | ||
26 | #include "dm-bio-list.h" | 25 | #include "dm-bio-list.h" |
27 | 26 | ||
28 | #define DM_MSG_PREFIX "snapshots" | 27 | #define DM_MSG_PREFIX "snapshots" |
@@ -47,9 +46,76 @@ | |||
47 | */ | 46 | */ |
48 | #define MIN_IOS 256 | 47 | #define MIN_IOS 256 |
49 | 48 | ||
49 | #define DM_TRACKED_CHUNK_HASH_SIZE 16 | ||
50 | #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ | ||
51 | (DM_TRACKED_CHUNK_HASH_SIZE - 1)) | ||
52 | |||
53 | struct exception_table { | ||
54 | uint32_t hash_mask; | ||
55 | unsigned hash_shift; | ||
56 | struct list_head *table; | ||
57 | }; | ||
58 | |||
59 | struct dm_snapshot { | ||
60 | struct rw_semaphore lock; | ||
61 | |||
62 | struct dm_dev *origin; | ||
63 | |||
64 | /* List of snapshots per Origin */ | ||
65 | struct list_head list; | ||
66 | |||
67 | /* You can't use a snapshot if this is 0 (e.g. if full) */ | ||
68 | int valid; | ||
69 | |||
70 | /* Origin writes don't trigger exceptions until this is set */ | ||
71 | int active; | ||
72 | |||
73 | mempool_t *pending_pool; | ||
74 | |||
75 | atomic_t pending_exceptions_count; | ||
76 | |||
77 | struct exception_table pending; | ||
78 | struct exception_table complete; | ||
79 | |||
80 | /* | ||
81 | * pe_lock protects all pending_exception operations and access | ||
82 | * as well as the snapshot_bios list. | ||
83 | */ | ||
84 | spinlock_t pe_lock; | ||
85 | |||
86 | /* The on disk metadata handler */ | ||
87 | struct dm_exception_store *store; | ||
88 | |||
89 | struct dm_kcopyd_client *kcopyd_client; | ||
90 | |||
91 | /* Queue of snapshot writes for ksnapd to flush */ | ||
92 | struct bio_list queued_bios; | ||
93 | struct work_struct queued_bios_work; | ||
94 | |||
95 | /* Chunks with outstanding reads */ | ||
96 | mempool_t *tracked_chunk_pool; | ||
97 | spinlock_t tracked_chunk_lock; | ||
98 | struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; | ||
99 | }; | ||
100 | |||
50 | static struct workqueue_struct *ksnapd; | 101 | static struct workqueue_struct *ksnapd; |
51 | static void flush_queued_bios(struct work_struct *work); | 102 | static void flush_queued_bios(struct work_struct *work); |
52 | 103 | ||
104 | static sector_t chunk_to_sector(struct dm_exception_store *store, | ||
105 | chunk_t chunk) | ||
106 | { | ||
107 | return chunk << store->chunk_shift; | ||
108 | } | ||
109 | |||
110 | static int bdev_equal(struct block_device *lhs, struct block_device *rhs) | ||
111 | { | ||
112 | /* | ||
113 | * There is only ever one instance of a particular block | ||
114 | * device so we can compare pointers safely. | ||
115 | */ | ||
116 | return lhs == rhs; | ||
117 | } | ||
118 | |||
53 | struct dm_snap_pending_exception { | 119 | struct dm_snap_pending_exception { |
54 | struct dm_snap_exception e; | 120 | struct dm_snap_exception e; |
55 | 121 | ||
@@ -476,11 +542,11 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
476 | * Calculate based on the size of the original volume or | 542 | * Calculate based on the size of the original volume or |
477 | * the COW volume... | 543 | * the COW volume... |
478 | */ | 544 | */ |
479 | cow_dev_size = get_dev_size(s->cow->bdev); | 545 | cow_dev_size = get_dev_size(s->store->cow->bdev); |
480 | origin_dev_size = get_dev_size(s->origin->bdev); | 546 | origin_dev_size = get_dev_size(s->origin->bdev); |
481 | max_buckets = calc_max_buckets(); | 547 | max_buckets = calc_max_buckets(); |
482 | 548 | ||
483 | hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; | 549 | hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; |
484 | hash_size = min(hash_size, max_buckets); | 550 | hash_size = min(hash_size, max_buckets); |
485 | 551 | ||
486 | hash_size = rounddown_pow_of_two(hash_size); | 552 | hash_size = rounddown_pow_of_two(hash_size); |
@@ -505,58 +571,6 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
505 | } | 571 | } |
506 | 572 | ||
507 | /* | 573 | /* |
508 | * Round a number up to the nearest 'size' boundary. size must | ||
509 | * be a power of 2. | ||
510 | */ | ||
511 | static ulong round_up(ulong n, ulong size) | ||
512 | { | ||
513 | size--; | ||
514 | return (n + size) & ~size; | ||
515 | } | ||
516 | |||
517 | static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, | ||
518 | char **error) | ||
519 | { | ||
520 | unsigned long chunk_size; | ||
521 | char *value; | ||
522 | |||
523 | chunk_size = simple_strtoul(chunk_size_arg, &value, 10); | ||
524 | if (*chunk_size_arg == '\0' || *value != '\0') { | ||
525 | *error = "Invalid chunk size"; | ||
526 | return -EINVAL; | ||
527 | } | ||
528 | |||
529 | if (!chunk_size) { | ||
530 | s->chunk_size = s->chunk_mask = s->chunk_shift = 0; | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | /* | ||
535 | * Chunk size must be multiple of page size. Silently | ||
536 | * round up if it's not. | ||
537 | */ | ||
538 | chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); | ||
539 | |||
540 | /* Check chunk_size is a power of 2 */ | ||
541 | if (!is_power_of_2(chunk_size)) { | ||
542 | *error = "Chunk size is not a power of 2"; | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | |||
546 | /* Validate the chunk size against the device block size */ | ||
547 | if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) { | ||
548 | *error = "Chunk size is not a multiple of device blocksize"; | ||
549 | return -EINVAL; | ||
550 | } | ||
551 | |||
552 | s->chunk_size = chunk_size; | ||
553 | s->chunk_mask = chunk_size - 1; | ||
554 | s->chunk_shift = ffs(chunk_size) - 1; | ||
555 | |||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> | 574 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> |
561 | */ | 575 | */ |
562 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 576 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
@@ -564,91 +578,68 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
564 | struct dm_snapshot *s; | 578 | struct dm_snapshot *s; |
565 | int i; | 579 | int i; |
566 | int r = -EINVAL; | 580 | int r = -EINVAL; |
567 | char persistent; | ||
568 | char *origin_path; | 581 | char *origin_path; |
569 | char *cow_path; | 582 | struct dm_exception_store *store; |
583 | unsigned args_used; | ||
570 | 584 | ||
571 | if (argc != 4) { | 585 | if (argc != 4) { |
572 | ti->error = "requires exactly 4 arguments"; | 586 | ti->error = "requires exactly 4 arguments"; |
573 | r = -EINVAL; | 587 | r = -EINVAL; |
574 | goto bad1; | 588 | goto bad_args; |
575 | } | 589 | } |
576 | 590 | ||
577 | origin_path = argv[0]; | 591 | origin_path = argv[0]; |
578 | cow_path = argv[1]; | 592 | argv++; |
579 | persistent = toupper(*argv[2]); | 593 | argc--; |
580 | 594 | ||
581 | if (persistent != 'P' && persistent != 'N') { | 595 | r = dm_exception_store_create(ti, argc, argv, &args_used, &store); |
582 | ti->error = "Persistent flag is not P or N"; | 596 | if (r) { |
597 | ti->error = "Couldn't create exception store"; | ||
583 | r = -EINVAL; | 598 | r = -EINVAL; |
584 | goto bad1; | 599 | goto bad_args; |
585 | } | 600 | } |
586 | 601 | ||
602 | argv += args_used; | ||
603 | argc -= args_used; | ||
604 | |||
587 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 605 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
588 | if (s == NULL) { | 606 | if (!s) { |
589 | ti->error = "Cannot allocate snapshot context private " | 607 | ti->error = "Cannot allocate snapshot context private " |
590 | "structure"; | 608 | "structure"; |
591 | r = -ENOMEM; | 609 | r = -ENOMEM; |
592 | goto bad1; | 610 | goto bad_snap; |
593 | } | 611 | } |
594 | 612 | ||
595 | r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); | 613 | r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); |
596 | if (r) { | 614 | if (r) { |
597 | ti->error = "Cannot get origin device"; | 615 | ti->error = "Cannot get origin device"; |
598 | goto bad2; | 616 | goto bad_origin; |
599 | } | ||
600 | |||
601 | r = dm_get_device(ti, cow_path, 0, 0, | ||
602 | FMODE_READ | FMODE_WRITE, &s->cow); | ||
603 | if (r) { | ||
604 | dm_put_device(ti, s->origin); | ||
605 | ti->error = "Cannot get COW device"; | ||
606 | goto bad2; | ||
607 | } | 617 | } |
608 | 618 | ||
609 | r = set_chunk_size(s, argv[3], &ti->error); | 619 | s->store = store; |
610 | if (r) | ||
611 | goto bad3; | ||
612 | |||
613 | s->type = persistent; | ||
614 | |||
615 | s->valid = 1; | 620 | s->valid = 1; |
616 | s->active = 0; | 621 | s->active = 0; |
617 | atomic_set(&s->pending_exceptions_count, 0); | 622 | atomic_set(&s->pending_exceptions_count, 0); |
618 | init_rwsem(&s->lock); | 623 | init_rwsem(&s->lock); |
619 | spin_lock_init(&s->pe_lock); | 624 | spin_lock_init(&s->pe_lock); |
620 | s->ti = ti; | ||
621 | 625 | ||
622 | /* Allocate hash table for COW data */ | 626 | /* Allocate hash table for COW data */ |
623 | if (init_hash_tables(s)) { | 627 | if (init_hash_tables(s)) { |
624 | ti->error = "Unable to allocate hash table space"; | 628 | ti->error = "Unable to allocate hash table space"; |
625 | r = -ENOMEM; | 629 | r = -ENOMEM; |
626 | goto bad3; | 630 | goto bad_hash_tables; |
627 | } | ||
628 | |||
629 | s->store.snap = s; | ||
630 | |||
631 | if (persistent == 'P') | ||
632 | r = dm_create_persistent(&s->store); | ||
633 | else | ||
634 | r = dm_create_transient(&s->store); | ||
635 | |||
636 | if (r) { | ||
637 | ti->error = "Couldn't create exception store"; | ||
638 | r = -EINVAL; | ||
639 | goto bad4; | ||
640 | } | 631 | } |
641 | 632 | ||
642 | r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); | 633 | r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); |
643 | if (r) { | 634 | if (r) { |
644 | ti->error = "Could not create kcopyd client"; | 635 | ti->error = "Could not create kcopyd client"; |
645 | goto bad5; | 636 | goto bad_kcopyd; |
646 | } | 637 | } |
647 | 638 | ||
648 | s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); | 639 | s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); |
649 | if (!s->pending_pool) { | 640 | if (!s->pending_pool) { |
650 | ti->error = "Could not allocate mempool for pending exceptions"; | 641 | ti->error = "Could not allocate mempool for pending exceptions"; |
651 | goto bad6; | 642 | goto bad_pending_pool; |
652 | } | 643 | } |
653 | 644 | ||
654 | s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, | 645 | s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, |
@@ -665,7 +656,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
665 | spin_lock_init(&s->tracked_chunk_lock); | 656 | spin_lock_init(&s->tracked_chunk_lock); |
666 | 657 | ||
667 | /* Metadata must only be loaded into one table at once */ | 658 | /* Metadata must only be loaded into one table at once */ |
668 | r = s->store.read_metadata(&s->store, dm_add_exception, (void *)s); | 659 | r = s->store->type->read_metadata(s->store, dm_add_exception, |
660 | (void *)s); | ||
669 | if (r < 0) { | 661 | if (r < 0) { |
670 | ti->error = "Failed to read snapshot metadata"; | 662 | ti->error = "Failed to read snapshot metadata"; |
671 | goto bad_load_and_register; | 663 | goto bad_load_and_register; |
@@ -686,34 +678,33 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
686 | } | 678 | } |
687 | 679 | ||
688 | ti->private = s; | 680 | ti->private = s; |
689 | ti->split_io = s->chunk_size; | 681 | ti->split_io = s->store->chunk_size; |
690 | 682 | ||
691 | return 0; | 683 | return 0; |
692 | 684 | ||
693 | bad_load_and_register: | 685 | bad_load_and_register: |
694 | mempool_destroy(s->tracked_chunk_pool); | 686 | mempool_destroy(s->tracked_chunk_pool); |
695 | 687 | ||
696 | bad_tracked_chunk_pool: | 688 | bad_tracked_chunk_pool: |
697 | mempool_destroy(s->pending_pool); | 689 | mempool_destroy(s->pending_pool); |
698 | 690 | ||
699 | bad6: | 691 | bad_pending_pool: |
700 | dm_kcopyd_client_destroy(s->kcopyd_client); | 692 | dm_kcopyd_client_destroy(s->kcopyd_client); |
701 | 693 | ||
702 | bad5: | 694 | bad_kcopyd: |
703 | s->store.destroy(&s->store); | ||
704 | |||
705 | bad4: | ||
706 | exit_exception_table(&s->pending, pending_cache); | 695 | exit_exception_table(&s->pending, pending_cache); |
707 | exit_exception_table(&s->complete, exception_cache); | 696 | exit_exception_table(&s->complete, exception_cache); |
708 | 697 | ||
709 | bad3: | 698 | bad_hash_tables: |
710 | dm_put_device(ti, s->cow); | ||
711 | dm_put_device(ti, s->origin); | 699 | dm_put_device(ti, s->origin); |
712 | 700 | ||
713 | bad2: | 701 | bad_origin: |
714 | kfree(s); | 702 | kfree(s); |
715 | 703 | ||
716 | bad1: | 704 | bad_snap: |
705 | dm_exception_store_destroy(store); | ||
706 | |||
707 | bad_args: | ||
717 | return r; | 708 | return r; |
718 | } | 709 | } |
719 | 710 | ||
@@ -724,8 +715,6 @@ static void __free_exceptions(struct dm_snapshot *s) | |||
724 | 715 | ||
725 | exit_exception_table(&s->pending, pending_cache); | 716 | exit_exception_table(&s->pending, pending_cache); |
726 | exit_exception_table(&s->complete, exception_cache); | 717 | exit_exception_table(&s->complete, exception_cache); |
727 | |||
728 | s->store.destroy(&s->store); | ||
729 | } | 718 | } |
730 | 719 | ||
731 | static void snapshot_dtr(struct dm_target *ti) | 720 | static void snapshot_dtr(struct dm_target *ti) |
@@ -761,7 +750,8 @@ static void snapshot_dtr(struct dm_target *ti) | |||
761 | mempool_destroy(s->pending_pool); | 750 | mempool_destroy(s->pending_pool); |
762 | 751 | ||
763 | dm_put_device(ti, s->origin); | 752 | dm_put_device(ti, s->origin); |
764 | dm_put_device(ti, s->cow); | 753 | |
754 | dm_exception_store_destroy(s->store); | ||
765 | 755 | ||
766 | kfree(s); | 756 | kfree(s); |
767 | } | 757 | } |
@@ -820,12 +810,12 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) | |||
820 | else if (err == -ENOMEM) | 810 | else if (err == -ENOMEM) |
821 | DMERR("Invalidating snapshot: Unable to allocate exception."); | 811 | DMERR("Invalidating snapshot: Unable to allocate exception."); |
822 | 812 | ||
823 | if (s->store.drop_snapshot) | 813 | if (s->store->type->drop_snapshot) |
824 | s->store.drop_snapshot(&s->store); | 814 | s->store->type->drop_snapshot(s->store); |
825 | 815 | ||
826 | s->valid = 0; | 816 | s->valid = 0; |
827 | 817 | ||
828 | dm_table_event(s->ti->table); | 818 | dm_table_event(s->store->ti->table); |
829 | } | 819 | } |
830 | 820 | ||
831 | static void get_pending_exception(struct dm_snap_pending_exception *pe) | 821 | static void get_pending_exception(struct dm_snap_pending_exception *pe) |
@@ -943,8 +933,8 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) | |||
943 | 933 | ||
944 | else | 934 | else |
945 | /* Update the metadata if we are persistent */ | 935 | /* Update the metadata if we are persistent */ |
946 | s->store.commit_exception(&s->store, &pe->e, commit_callback, | 936 | s->store->type->commit_exception(s->store, &pe->e, |
947 | pe); | 937 | commit_callback, pe); |
948 | } | 938 | } |
949 | 939 | ||
950 | /* | 940 | /* |
@@ -960,11 +950,11 @@ static void start_copy(struct dm_snap_pending_exception *pe) | |||
960 | dev_size = get_dev_size(bdev); | 950 | dev_size = get_dev_size(bdev); |
961 | 951 | ||
962 | src.bdev = bdev; | 952 | src.bdev = bdev; |
963 | src.sector = chunk_to_sector(s, pe->e.old_chunk); | 953 | src.sector = chunk_to_sector(s->store, pe->e.old_chunk); |
964 | src.count = min(s->chunk_size, dev_size - src.sector); | 954 | src.count = min(s->store->chunk_size, dev_size - src.sector); |
965 | 955 | ||
966 | dest.bdev = s->cow->bdev; | 956 | dest.bdev = s->store->cow->bdev; |
967 | dest.sector = chunk_to_sector(s, pe->e.new_chunk); | 957 | dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); |
968 | dest.count = src.count; | 958 | dest.count = src.count; |
969 | 959 | ||
970 | /* Hand over to kcopyd */ | 960 | /* Hand over to kcopyd */ |
@@ -972,6 +962,17 @@ static void start_copy(struct dm_snap_pending_exception *pe) | |||
972 | &src, 1, &dest, 0, copy_callback, pe); | 962 | &src, 1, &dest, 0, copy_callback, pe); |
973 | } | 963 | } |
974 | 964 | ||
965 | static struct dm_snap_pending_exception * | ||
966 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) | ||
967 | { | ||
968 | struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); | ||
969 | |||
970 | if (!e) | ||
971 | return NULL; | ||
972 | |||
973 | return container_of(e, struct dm_snap_pending_exception, e); | ||
974 | } | ||
975 | |||
975 | /* | 976 | /* |
976 | * Looks to see if this snapshot already has a pending exception | 977 | * Looks to see if this snapshot already has a pending exception |
977 | * for this chunk, otherwise it allocates a new one and inserts | 978 | * for this chunk, otherwise it allocates a new one and inserts |
@@ -981,40 +982,15 @@ static void start_copy(struct dm_snap_pending_exception *pe) | |||
981 | * this. | 982 | * this. |
982 | */ | 983 | */ |
983 | static struct dm_snap_pending_exception * | 984 | static struct dm_snap_pending_exception * |
984 | __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | 985 | __find_pending_exception(struct dm_snapshot *s, |
986 | struct dm_snap_pending_exception *pe, chunk_t chunk) | ||
985 | { | 987 | { |
986 | struct dm_snap_exception *e; | 988 | struct dm_snap_pending_exception *pe2; |
987 | struct dm_snap_pending_exception *pe; | ||
988 | chunk_t chunk = sector_to_chunk(s, bio->bi_sector); | ||
989 | 989 | ||
990 | /* | 990 | pe2 = __lookup_pending_exception(s, chunk); |
991 | * Is there a pending exception for this already ? | 991 | if (pe2) { |
992 | */ | ||
993 | e = lookup_exception(&s->pending, chunk); | ||
994 | if (e) { | ||
995 | /* cast the exception to a pending exception */ | ||
996 | pe = container_of(e, struct dm_snap_pending_exception, e); | ||
997 | goto out; | ||
998 | } | ||
999 | |||
1000 | /* | ||
1001 | * Create a new pending exception, we don't want | ||
1002 | * to hold the lock while we do this. | ||
1003 | */ | ||
1004 | up_write(&s->lock); | ||
1005 | pe = alloc_pending_exception(s); | ||
1006 | down_write(&s->lock); | ||
1007 | |||
1008 | if (!s->valid) { | ||
1009 | free_pending_exception(pe); | ||
1010 | return NULL; | ||
1011 | } | ||
1012 | |||
1013 | e = lookup_exception(&s->pending, chunk); | ||
1014 | if (e) { | ||
1015 | free_pending_exception(pe); | 992 | free_pending_exception(pe); |
1016 | pe = container_of(e, struct dm_snap_pending_exception, e); | 993 | return pe2; |
1017 | goto out; | ||
1018 | } | 994 | } |
1019 | 995 | ||
1020 | pe->e.old_chunk = chunk; | 996 | pe->e.old_chunk = chunk; |
@@ -1024,7 +1000,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
1024 | atomic_set(&pe->ref_count, 0); | 1000 | atomic_set(&pe->ref_count, 0); |
1025 | pe->started = 0; | 1001 | pe->started = 0; |
1026 | 1002 | ||
1027 | if (s->store.prepare_exception(&s->store, &pe->e)) { | 1003 | if (s->store->type->prepare_exception(s->store, &pe->e)) { |
1028 | free_pending_exception(pe); | 1004 | free_pending_exception(pe); |
1029 | return NULL; | 1005 | return NULL; |
1030 | } | 1006 | } |
@@ -1032,17 +1008,18 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
1032 | get_pending_exception(pe); | 1008 | get_pending_exception(pe); |
1033 | insert_exception(&s->pending, &pe->e); | 1009 | insert_exception(&s->pending, &pe->e); |
1034 | 1010 | ||
1035 | out: | ||
1036 | return pe; | 1011 | return pe; |
1037 | } | 1012 | } |
1038 | 1013 | ||
1039 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, | 1014 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, |
1040 | struct bio *bio, chunk_t chunk) | 1015 | struct bio *bio, chunk_t chunk) |
1041 | { | 1016 | { |
1042 | bio->bi_bdev = s->cow->bdev; | 1017 | bio->bi_bdev = s->store->cow->bdev; |
1043 | bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) + | 1018 | bio->bi_sector = chunk_to_sector(s->store, |
1044 | (chunk - e->old_chunk)) + | 1019 | dm_chunk_number(e->new_chunk) + |
1045 | (bio->bi_sector & s->chunk_mask); | 1020 | (chunk - e->old_chunk)) + |
1021 | (bio->bi_sector & | ||
1022 | s->store->chunk_mask); | ||
1046 | } | 1023 | } |
1047 | 1024 | ||
1048 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | 1025 | static int snapshot_map(struct dm_target *ti, struct bio *bio, |
@@ -1054,7 +1031,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1054 | chunk_t chunk; | 1031 | chunk_t chunk; |
1055 | struct dm_snap_pending_exception *pe = NULL; | 1032 | struct dm_snap_pending_exception *pe = NULL; |
1056 | 1033 | ||
1057 | chunk = sector_to_chunk(s, bio->bi_sector); | 1034 | chunk = sector_to_chunk(s->store, bio->bi_sector); |
1058 | 1035 | ||
1059 | /* Full snapshots are not usable */ | 1036 | /* Full snapshots are not usable */ |
1060 | /* To get here the table must be live so s->active is always set. */ | 1037 | /* To get here the table must be live so s->active is always set. */ |
@@ -1083,11 +1060,31 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1083 | * writeable. | 1060 | * writeable. |
1084 | */ | 1061 | */ |
1085 | if (bio_rw(bio) == WRITE) { | 1062 | if (bio_rw(bio) == WRITE) { |
1086 | pe = __find_pending_exception(s, bio); | 1063 | pe = __lookup_pending_exception(s, chunk); |
1087 | if (!pe) { | 1064 | if (!pe) { |
1088 | __invalidate_snapshot(s, -ENOMEM); | 1065 | up_write(&s->lock); |
1089 | r = -EIO; | 1066 | pe = alloc_pending_exception(s); |
1090 | goto out_unlock; | 1067 | down_write(&s->lock); |
1068 | |||
1069 | if (!s->valid) { | ||
1070 | free_pending_exception(pe); | ||
1071 | r = -EIO; | ||
1072 | goto out_unlock; | ||
1073 | } | ||
1074 | |||
1075 | e = lookup_exception(&s->complete, chunk); | ||
1076 | if (e) { | ||
1077 | free_pending_exception(pe); | ||
1078 | remap_exception(s, e, bio, chunk); | ||
1079 | goto out_unlock; | ||
1080 | } | ||
1081 | |||
1082 | pe = __find_pending_exception(s, pe, chunk); | ||
1083 | if (!pe) { | ||
1084 | __invalidate_snapshot(s, -ENOMEM); | ||
1085 | r = -EIO; | ||
1086 | goto out_unlock; | ||
1087 | } | ||
1091 | } | 1088 | } |
1092 | 1089 | ||
1093 | remap_exception(s, &pe->e, bio, chunk); | 1090 | remap_exception(s, &pe->e, bio, chunk); |
@@ -1137,24 +1134,25 @@ static void snapshot_resume(struct dm_target *ti) | |||
1137 | static int snapshot_status(struct dm_target *ti, status_type_t type, | 1134 | static int snapshot_status(struct dm_target *ti, status_type_t type, |
1138 | char *result, unsigned int maxlen) | 1135 | char *result, unsigned int maxlen) |
1139 | { | 1136 | { |
1137 | unsigned sz = 0; | ||
1140 | struct dm_snapshot *snap = ti->private; | 1138 | struct dm_snapshot *snap = ti->private; |
1141 | 1139 | ||
1142 | switch (type) { | 1140 | switch (type) { |
1143 | case STATUSTYPE_INFO: | 1141 | case STATUSTYPE_INFO: |
1144 | if (!snap->valid) | 1142 | if (!snap->valid) |
1145 | snprintf(result, maxlen, "Invalid"); | 1143 | DMEMIT("Invalid"); |
1146 | else { | 1144 | else { |
1147 | if (snap->store.fraction_full) { | 1145 | if (snap->store->type->fraction_full) { |
1148 | sector_t numerator, denominator; | 1146 | sector_t numerator, denominator; |
1149 | snap->store.fraction_full(&snap->store, | 1147 | snap->store->type->fraction_full(snap->store, |
1150 | &numerator, | 1148 | &numerator, |
1151 | &denominator); | 1149 | &denominator); |
1152 | snprintf(result, maxlen, "%llu/%llu", | 1150 | DMEMIT("%llu/%llu", |
1153 | (unsigned long long)numerator, | 1151 | (unsigned long long)numerator, |
1154 | (unsigned long long)denominator); | 1152 | (unsigned long long)denominator); |
1155 | } | 1153 | } |
1156 | else | 1154 | else |
1157 | snprintf(result, maxlen, "Unknown"); | 1155 | DMEMIT("Unknown"); |
1158 | } | 1156 | } |
1159 | break; | 1157 | break; |
1160 | 1158 | ||
@@ -1164,10 +1162,9 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | |||
1164 | * to make private copies if the output is to | 1162 | * to make private copies if the output is to |
1165 | * make sense. | 1163 | * make sense. |
1166 | */ | 1164 | */ |
1167 | snprintf(result, maxlen, "%s %s %c %llu", | 1165 | DMEMIT("%s", snap->origin->name); |
1168 | snap->origin->name, snap->cow->name, | 1166 | snap->store->type->status(snap->store, type, result + sz, |
1169 | snap->type, | 1167 | maxlen - sz); |
1170 | (unsigned long long)snap->chunk_size); | ||
1171 | break; | 1168 | break; |
1172 | } | 1169 | } |
1173 | 1170 | ||
@@ -1196,14 +1193,14 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) | |||
1196 | goto next_snapshot; | 1193 | goto next_snapshot; |
1197 | 1194 | ||
1198 | /* Nothing to do if writing beyond end of snapshot */ | 1195 | /* Nothing to do if writing beyond end of snapshot */ |
1199 | if (bio->bi_sector >= dm_table_get_size(snap->ti->table)) | 1196 | if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) |
1200 | goto next_snapshot; | 1197 | goto next_snapshot; |
1201 | 1198 | ||
1202 | /* | 1199 | /* |
1203 | * Remember, different snapshots can have | 1200 | * Remember, different snapshots can have |
1204 | * different chunk sizes. | 1201 | * different chunk sizes. |
1205 | */ | 1202 | */ |
1206 | chunk = sector_to_chunk(snap, bio->bi_sector); | 1203 | chunk = sector_to_chunk(snap->store, bio->bi_sector); |
1207 | 1204 | ||
1208 | /* | 1205 | /* |
1209 | * Check exception table to see if block | 1206 | * Check exception table to see if block |
@@ -1217,10 +1214,28 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) | |||
1217 | if (e) | 1214 | if (e) |
1218 | goto next_snapshot; | 1215 | goto next_snapshot; |
1219 | 1216 | ||
1220 | pe = __find_pending_exception(snap, bio); | 1217 | pe = __lookup_pending_exception(snap, chunk); |
1221 | if (!pe) { | 1218 | if (!pe) { |
1222 | __invalidate_snapshot(snap, -ENOMEM); | 1219 | up_write(&snap->lock); |
1223 | goto next_snapshot; | 1220 | pe = alloc_pending_exception(snap); |
1221 | down_write(&snap->lock); | ||
1222 | |||
1223 | if (!snap->valid) { | ||
1224 | free_pending_exception(pe); | ||
1225 | goto next_snapshot; | ||
1226 | } | ||
1227 | |||
1228 | e = lookup_exception(&snap->complete, chunk); | ||
1229 | if (e) { | ||
1230 | free_pending_exception(pe); | ||
1231 | goto next_snapshot; | ||
1232 | } | ||
1233 | |||
1234 | pe = __find_pending_exception(snap, pe, chunk); | ||
1235 | if (!pe) { | ||
1236 | __invalidate_snapshot(snap, -ENOMEM); | ||
1237 | goto next_snapshot; | ||
1238 | } | ||
1224 | } | 1239 | } |
1225 | 1240 | ||
1226 | if (!primary_pe) { | 1241 | if (!primary_pe) { |
@@ -1360,7 +1375,8 @@ static void origin_resume(struct dm_target *ti) | |||
1360 | o = __lookup_origin(dev->bdev); | 1375 | o = __lookup_origin(dev->bdev); |
1361 | if (o) | 1376 | if (o) |
1362 | list_for_each_entry (snap, &o->snapshots, list) | 1377 | list_for_each_entry (snap, &o->snapshots, list) |
1363 | chunk_size = min_not_zero(chunk_size, snap->chunk_size); | 1378 | chunk_size = min_not_zero(chunk_size, |
1379 | snap->store->chunk_size); | ||
1364 | up_read(&_origins_lock); | 1380 | up_read(&_origins_lock); |
1365 | 1381 | ||
1366 | ti->split_io = chunk_size; | 1382 | ti->split_io = chunk_size; |