diff options
| -rw-r--r-- | drivers/md/dm-thin.c | 161 |
1 files changed, 99 insertions, 62 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index eb3d138ff55a..db1b041ce975 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -111,7 +111,7 @@ struct cell_key { | |||
| 111 | dm_block_t block; | 111 | dm_block_t block; |
| 112 | }; | 112 | }; |
| 113 | 113 | ||
| 114 | struct cell { | 114 | struct dm_bio_prison_cell { |
| 115 | struct hlist_node list; | 115 | struct hlist_node list; |
| 116 | struct bio_prison *prison; | 116 | struct bio_prison *prison; |
| 117 | struct cell_key key; | 117 | struct cell_key key; |
| @@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells) | |||
| 141 | return n; | 141 | return n; |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static struct kmem_cache *_cell_cache; | ||
| 145 | |||
| 144 | /* | 146 | /* |
| 145 | * @nr_cells should be the number of cells you want in use _concurrently_. | 147 | * @nr_cells should be the number of cells you want in use _concurrently_. |
| 146 | * Don't confuse it with the number of distinct keys. | 148 | * Don't confuse it with the number of distinct keys. |
| @@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells) | |||
| 157 | return NULL; | 159 | return NULL; |
| 158 | 160 | ||
| 159 | spin_lock_init(&prison->lock); | 161 | spin_lock_init(&prison->lock); |
| 160 | prison->cell_pool = mempool_create_kmalloc_pool(nr_cells, | 162 | prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); |
| 161 | sizeof(struct cell)); | ||
| 162 | if (!prison->cell_pool) { | 163 | if (!prison->cell_pool) { |
| 163 | kfree(prison); | 164 | kfree(prison); |
| 164 | return NULL; | 165 | return NULL; |
| @@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) | |||
| 194 | (lhs->block == rhs->block); | 195 | (lhs->block == rhs->block); |
| 195 | } | 196 | } |
| 196 | 197 | ||
| 197 | static struct cell *__search_bucket(struct hlist_head *bucket, | 198 | static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, |
| 198 | struct cell_key *key) | 199 | struct cell_key *key) |
| 199 | { | 200 | { |
| 200 | struct cell *cell; | 201 | struct dm_bio_prison_cell *cell; |
| 201 | struct hlist_node *tmp; | 202 | struct hlist_node *tmp; |
| 202 | 203 | ||
| 203 | hlist_for_each_entry(cell, tmp, bucket, list) | 204 | hlist_for_each_entry(cell, tmp, bucket, list) |
| @@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket, | |||
| 214 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | 215 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. |
| 215 | */ | 216 | */ |
| 216 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, | 217 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, |
| 217 | struct bio *inmate, struct cell **ref) | 218 | struct bio *inmate, struct dm_bio_prison_cell **ref) |
| 218 | { | 219 | { |
| 219 | int r = 1; | 220 | int r = 1; |
| 220 | unsigned long flags; | 221 | unsigned long flags; |
| 221 | uint32_t hash = hash_key(prison, key); | 222 | uint32_t hash = hash_key(prison, key); |
| 222 | struct cell *cell, *cell2; | 223 | struct dm_bio_prison_cell *cell, *cell2; |
| 223 | 224 | ||
| 224 | BUG_ON(hash > prison->nr_buckets); | 225 | BUG_ON(hash > prison->nr_buckets); |
| 225 | 226 | ||
| @@ -273,7 +274,7 @@ out: | |||
| 273 | /* | 274 | /* |
| 274 | * @inmates must have been initialised prior to this call | 275 | * @inmates must have been initialised prior to this call |
| 275 | */ | 276 | */ |
| 276 | static void __cell_release(struct cell *cell, struct bio_list *inmates) | 277 | static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) |
| 277 | { | 278 | { |
| 278 | struct bio_prison *prison = cell->prison; | 279 | struct bio_prison *prison = cell->prison; |
| 279 | 280 | ||
| @@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates) | |||
| 287 | mempool_free(cell, prison->cell_pool); | 288 | mempool_free(cell, prison->cell_pool); |
| 288 | } | 289 | } |
| 289 | 290 | ||
| 290 | static void cell_release(struct cell *cell, struct bio_list *bios) | 291 | static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) |
| 291 | { | 292 | { |
| 292 | unsigned long flags; | 293 | unsigned long flags; |
| 293 | struct bio_prison *prison = cell->prison; | 294 | struct bio_prison *prison = cell->prison; |
| @@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios) | |||
| 303 | * bio may be in the cell. This function releases the cell, and also does | 304 | * bio may be in the cell. This function releases the cell, and also does |
| 304 | * a sanity check. | 305 | * a sanity check. |
| 305 | */ | 306 | */ |
| 306 | static void __cell_release_singleton(struct cell *cell, struct bio *bio) | 307 | static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) |
| 307 | { | 308 | { |
| 308 | BUG_ON(cell->holder != bio); | 309 | BUG_ON(cell->holder != bio); |
| 309 | BUG_ON(!bio_list_empty(&cell->bios)); | 310 | BUG_ON(!bio_list_empty(&cell->bios)); |
| @@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio) | |||
| 311 | __cell_release(cell, NULL); | 312 | __cell_release(cell, NULL); |
| 312 | } | 313 | } |
| 313 | 314 | ||
| 314 | static void cell_release_singleton(struct cell *cell, struct bio *bio) | 315 | static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) |
| 315 | { | 316 | { |
| 316 | unsigned long flags; | 317 | unsigned long flags; |
| 317 | struct bio_prison *prison = cell->prison; | 318 | struct bio_prison *prison = cell->prison; |
| @@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio) | |||
| 324 | /* | 325 | /* |
| 325 | * Sometimes we don't want the holder, just the additional bios. | 326 | * Sometimes we don't want the holder, just the additional bios. |
| 326 | */ | 327 | */ |
| 327 | static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) | 328 | static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, |
| 329 | struct bio_list *inmates) | ||
| 328 | { | 330 | { |
| 329 | struct bio_prison *prison = cell->prison; | 331 | struct bio_prison *prison = cell->prison; |
| 330 | 332 | ||
| @@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates | |||
| 334 | mempool_free(cell, prison->cell_pool); | 336 | mempool_free(cell, prison->cell_pool); |
| 335 | } | 337 | } |
| 336 | 338 | ||
| 337 | static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) | 339 | static void cell_release_no_holder(struct dm_bio_prison_cell *cell, |
| 340 | struct bio_list *inmates) | ||
| 338 | { | 341 | { |
| 339 | unsigned long flags; | 342 | unsigned long flags; |
| 340 | struct bio_prison *prison = cell->prison; | 343 | struct bio_prison *prison = cell->prison; |
| @@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) | |||
| 344 | spin_unlock_irqrestore(&prison->lock, flags); | 347 | spin_unlock_irqrestore(&prison->lock, flags); |
| 345 | } | 348 | } |
| 346 | 349 | ||
| 347 | static void cell_error(struct cell *cell) | 350 | static void cell_error(struct dm_bio_prison_cell *cell) |
| 348 | { | 351 | { |
| 349 | struct bio_prison *prison = cell->prison; | 352 | struct bio_prison *prison = cell->prison; |
| 350 | struct bio_list bios; | 353 | struct bio_list bios; |
| @@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, | |||
| 491 | * also provides the interface for creating and destroying internal | 494 | * also provides the interface for creating and destroying internal |
| 492 | * devices. | 495 | * devices. |
| 493 | */ | 496 | */ |
| 494 | struct new_mapping; | 497 | struct dm_thin_new_mapping; |
| 495 | 498 | ||
| 496 | struct pool_features { | 499 | struct pool_features { |
| 497 | unsigned zero_new_blocks:1; | 500 | unsigned zero_new_blocks:1; |
| @@ -537,7 +540,7 @@ struct pool { | |||
| 537 | struct deferred_set shared_read_ds; | 540 | struct deferred_set shared_read_ds; |
| 538 | struct deferred_set all_io_ds; | 541 | struct deferred_set all_io_ds; |
| 539 | 542 | ||
| 540 | struct new_mapping *next_mapping; | 543 | struct dm_thin_new_mapping *next_mapping; |
| 541 | mempool_t *mapping_pool; | 544 | mempool_t *mapping_pool; |
| 542 | mempool_t *endio_hook_pool; | 545 | mempool_t *endio_hook_pool; |
| 543 | }; | 546 | }; |
| @@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev | |||
| 630 | 633 | ||
| 631 | /*----------------------------------------------------------------*/ | 634 | /*----------------------------------------------------------------*/ |
| 632 | 635 | ||
| 633 | struct endio_hook { | 636 | struct dm_thin_endio_hook { |
| 634 | struct thin_c *tc; | 637 | struct thin_c *tc; |
| 635 | struct deferred_entry *shared_read_entry; | 638 | struct deferred_entry *shared_read_entry; |
| 636 | struct deferred_entry *all_io_entry; | 639 | struct deferred_entry *all_io_entry; |
| 637 | struct new_mapping *overwrite_mapping; | 640 | struct dm_thin_new_mapping *overwrite_mapping; |
| 638 | }; | 641 | }; |
| 639 | 642 | ||
| 640 | static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) | 643 | static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) |
| @@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) | |||
| 647 | bio_list_init(master); | 650 | bio_list_init(master); |
| 648 | 651 | ||
| 649 | while ((bio = bio_list_pop(&bios))) { | 652 | while ((bio = bio_list_pop(&bios))) { |
| 650 | struct endio_hook *h = dm_get_mapinfo(bio)->ptr; | 653 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 654 | |||
| 651 | if (h->tc == tc) | 655 | if (h->tc == tc) |
| 652 | bio_endio(bio, DM_ENDIO_REQUEUE); | 656 | bio_endio(bio, DM_ENDIO_REQUEUE); |
| 653 | else | 657 | else |
| @@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool) | |||
| 736 | /* | 740 | /* |
| 737 | * Bio endio functions. | 741 | * Bio endio functions. |
| 738 | */ | 742 | */ |
| 739 | struct new_mapping { | 743 | struct dm_thin_new_mapping { |
| 740 | struct list_head list; | 744 | struct list_head list; |
| 741 | 745 | ||
| 742 | unsigned quiesced:1; | 746 | unsigned quiesced:1; |
| @@ -746,7 +750,7 @@ struct new_mapping { | |||
| 746 | struct thin_c *tc; | 750 | struct thin_c *tc; |
| 747 | dm_block_t virt_block; | 751 | dm_block_t virt_block; |
| 748 | dm_block_t data_block; | 752 | dm_block_t data_block; |
| 749 | struct cell *cell, *cell2; | 753 | struct dm_bio_prison_cell *cell, *cell2; |
| 750 | int err; | 754 | int err; |
| 751 | 755 | ||
| 752 | /* | 756 | /* |
| @@ -759,7 +763,7 @@ struct new_mapping { | |||
| 759 | bio_end_io_t *saved_bi_end_io; | 763 | bio_end_io_t *saved_bi_end_io; |
| 760 | }; | 764 | }; |
| 761 | 765 | ||
| 762 | static void __maybe_add_mapping(struct new_mapping *m) | 766 | static void __maybe_add_mapping(struct dm_thin_new_mapping *m) |
| 763 | { | 767 | { |
| 764 | struct pool *pool = m->tc->pool; | 768 | struct pool *pool = m->tc->pool; |
| 765 | 769 | ||
| @@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m) | |||
| 772 | static void copy_complete(int read_err, unsigned long write_err, void *context) | 776 | static void copy_complete(int read_err, unsigned long write_err, void *context) |
| 773 | { | 777 | { |
| 774 | unsigned long flags; | 778 | unsigned long flags; |
| 775 | struct new_mapping *m = context; | 779 | struct dm_thin_new_mapping *m = context; |
| 776 | struct pool *pool = m->tc->pool; | 780 | struct pool *pool = m->tc->pool; |
| 777 | 781 | ||
| 778 | m->err = read_err || write_err ? -EIO : 0; | 782 | m->err = read_err || write_err ? -EIO : 0; |
| @@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) | |||
| 786 | static void overwrite_endio(struct bio *bio, int err) | 790 | static void overwrite_endio(struct bio *bio, int err) |
| 787 | { | 791 | { |
| 788 | unsigned long flags; | 792 | unsigned long flags; |
| 789 | struct endio_hook *h = dm_get_mapinfo(bio)->ptr; | 793 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 790 | struct new_mapping *m = h->overwrite_mapping; | 794 | struct dm_thin_new_mapping *m = h->overwrite_mapping; |
| 791 | struct pool *pool = m->tc->pool; | 795 | struct pool *pool = m->tc->pool; |
| 792 | 796 | ||
| 793 | m->err = err; | 797 | m->err = err; |
| @@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err) | |||
| 811 | /* | 815 | /* |
| 812 | * This sends the bios in the cell back to the deferred_bios list. | 816 | * This sends the bios in the cell back to the deferred_bios list. |
| 813 | */ | 817 | */ |
| 814 | static void cell_defer(struct thin_c *tc, struct cell *cell, | 818 | static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, |
| 815 | dm_block_t data_block) | 819 | dm_block_t data_block) |
| 816 | { | 820 | { |
| 817 | struct pool *pool = tc->pool; | 821 | struct pool *pool = tc->pool; |
| @@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell, | |||
| 828 | * Same as cell_defer above, except it omits one particular detainee, | 832 | * Same as cell_defer above, except it omits one particular detainee, |
| 829 | * a write bio that covers the block and has already been processed. | 833 | * a write bio that covers the block and has already been processed. |
| 830 | */ | 834 | */ |
| 831 | static void cell_defer_except(struct thin_c *tc, struct cell *cell) | 835 | static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell) |
| 832 | { | 836 | { |
| 833 | struct bio_list bios; | 837 | struct bio_list bios; |
| 834 | struct pool *pool = tc->pool; | 838 | struct pool *pool = tc->pool; |
| @@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell) | |||
| 843 | wake_worker(pool); | 847 | wake_worker(pool); |
| 844 | } | 848 | } |
| 845 | 849 | ||
| 846 | static void process_prepared_mapping(struct new_mapping *m) | 850 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) |
| 847 | { | 851 | { |
| 848 | struct thin_c *tc = m->tc; | 852 | struct thin_c *tc = m->tc; |
| 849 | struct bio *bio; | 853 | struct bio *bio; |
| @@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m) | |||
| 886 | mempool_free(m, tc->pool->mapping_pool); | 890 | mempool_free(m, tc->pool->mapping_pool); |
| 887 | } | 891 | } |
| 888 | 892 | ||
| 889 | static void process_prepared_discard(struct new_mapping *m) | 893 | static void process_prepared_discard(struct dm_thin_new_mapping *m) |
| 890 | { | 894 | { |
| 891 | int r; | 895 | int r; |
| 892 | struct thin_c *tc = m->tc; | 896 | struct thin_c *tc = m->tc; |
| @@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m) | |||
| 909 | } | 913 | } |
| 910 | 914 | ||
| 911 | static void process_prepared(struct pool *pool, struct list_head *head, | 915 | static void process_prepared(struct pool *pool, struct list_head *head, |
| 912 | void (*fn)(struct new_mapping *)) | 916 | void (*fn)(struct dm_thin_new_mapping *)) |
| 913 | { | 917 | { |
| 914 | unsigned long flags; | 918 | unsigned long flags; |
| 915 | struct list_head maps; | 919 | struct list_head maps; |
| 916 | struct new_mapping *m, *tmp; | 920 | struct dm_thin_new_mapping *m, *tmp; |
| 917 | 921 | ||
| 918 | INIT_LIST_HEAD(&maps); | 922 | INIT_LIST_HEAD(&maps); |
| 919 | spin_lock_irqsave(&pool->lock, flags); | 923 | spin_lock_irqsave(&pool->lock, flags); |
| @@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool) | |||
| 957 | return pool->next_mapping ? 0 : -ENOMEM; | 961 | return pool->next_mapping ? 0 : -ENOMEM; |
| 958 | } | 962 | } |
| 959 | 963 | ||
| 960 | static struct new_mapping *get_next_mapping(struct pool *pool) | 964 | static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) |
| 961 | { | 965 | { |
| 962 | struct new_mapping *r = pool->next_mapping; | 966 | struct dm_thin_new_mapping *r = pool->next_mapping; |
| 963 | 967 | ||
| 964 | BUG_ON(!pool->next_mapping); | 968 | BUG_ON(!pool->next_mapping); |
| 965 | 969 | ||
| @@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool) | |||
| 971 | static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | 975 | static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, |
| 972 | struct dm_dev *origin, dm_block_t data_origin, | 976 | struct dm_dev *origin, dm_block_t data_origin, |
| 973 | dm_block_t data_dest, | 977 | dm_block_t data_dest, |
| 974 | struct cell *cell, struct bio *bio) | 978 | struct dm_bio_prison_cell *cell, struct bio *bio) |
| 975 | { | 979 | { |
| 976 | int r; | 980 | int r; |
| 977 | struct pool *pool = tc->pool; | 981 | struct pool *pool = tc->pool; |
| 978 | struct new_mapping *m = get_next_mapping(pool); | 982 | struct dm_thin_new_mapping *m = get_next_mapping(pool); |
| 979 | 983 | ||
| 980 | INIT_LIST_HEAD(&m->list); | 984 | INIT_LIST_HEAD(&m->list); |
| 981 | m->quiesced = 0; | 985 | m->quiesced = 0; |
| @@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
| 997 | * bio immediately. Otherwise we use kcopyd to clone the data first. | 1001 | * bio immediately. Otherwise we use kcopyd to clone the data first. |
| 998 | */ | 1002 | */ |
| 999 | if (io_overwrites_block(pool, bio)) { | 1003 | if (io_overwrites_block(pool, bio)) { |
| 1000 | struct endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1004 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 1005 | |||
| 1001 | h->overwrite_mapping = m; | 1006 | h->overwrite_mapping = m; |
| 1002 | m->bio = bio; | 1007 | m->bio = bio; |
| 1003 | save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); | 1008 | save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); |
| @@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
| 1025 | 1030 | ||
| 1026 | static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, | 1031 | static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, |
| 1027 | dm_block_t data_origin, dm_block_t data_dest, | 1032 | dm_block_t data_origin, dm_block_t data_dest, |
| 1028 | struct cell *cell, struct bio *bio) | 1033 | struct dm_bio_prison_cell *cell, struct bio *bio) |
| 1029 | { | 1034 | { |
| 1030 | schedule_copy(tc, virt_block, tc->pool_dev, | 1035 | schedule_copy(tc, virt_block, tc->pool_dev, |
| 1031 | data_origin, data_dest, cell, bio); | 1036 | data_origin, data_dest, cell, bio); |
| @@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, | |||
| 1033 | 1038 | ||
| 1034 | static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, | 1039 | static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, |
| 1035 | dm_block_t data_dest, | 1040 | dm_block_t data_dest, |
| 1036 | struct cell *cell, struct bio *bio) | 1041 | struct dm_bio_prison_cell *cell, struct bio *bio) |
| 1037 | { | 1042 | { |
| 1038 | schedule_copy(tc, virt_block, tc->origin_dev, | 1043 | schedule_copy(tc, virt_block, tc->origin_dev, |
| 1039 | virt_block, data_dest, cell, bio); | 1044 | virt_block, data_dest, cell, bio); |
| 1040 | } | 1045 | } |
| 1041 | 1046 | ||
| 1042 | static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, | 1047 | static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, |
| 1043 | dm_block_t data_block, struct cell *cell, | 1048 | dm_block_t data_block, struct dm_bio_prison_cell *cell, |
| 1044 | struct bio *bio) | 1049 | struct bio *bio) |
| 1045 | { | 1050 | { |
| 1046 | struct pool *pool = tc->pool; | 1051 | struct pool *pool = tc->pool; |
| 1047 | struct new_mapping *m = get_next_mapping(pool); | 1052 | struct dm_thin_new_mapping *m = get_next_mapping(pool); |
| 1048 | 1053 | ||
| 1049 | INIT_LIST_HEAD(&m->list); | 1054 | INIT_LIST_HEAD(&m->list); |
| 1050 | m->quiesced = 1; | 1055 | m->quiesced = 1; |
| @@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, | |||
| 1065 | process_prepared_mapping(m); | 1070 | process_prepared_mapping(m); |
| 1066 | 1071 | ||
| 1067 | else if (io_overwrites_block(pool, bio)) { | 1072 | else if (io_overwrites_block(pool, bio)) { |
| 1068 | struct endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1073 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 1074 | |||
| 1069 | h->overwrite_mapping = m; | 1075 | h->overwrite_mapping = m; |
| 1070 | m->bio = bio; | 1076 | m->bio = bio; |
| 1071 | save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); | 1077 | save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); |
| 1072 | remap_and_issue(tc, bio, data_block); | 1078 | remap_and_issue(tc, bio, data_block); |
| 1073 | |||
| 1074 | } else { | 1079 | } else { |
| 1075 | int r; | 1080 | int r; |
| 1076 | struct dm_io_region to; | 1081 | struct dm_io_region to; |
| @@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | |||
| 1155 | */ | 1160 | */ |
| 1156 | static void retry_on_resume(struct bio *bio) | 1161 | static void retry_on_resume(struct bio *bio) |
| 1157 | { | 1162 | { |
| 1158 | struct endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1163 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 1159 | struct thin_c *tc = h->tc; | 1164 | struct thin_c *tc = h->tc; |
| 1160 | struct pool *pool = tc->pool; | 1165 | struct pool *pool = tc->pool; |
| 1161 | unsigned long flags; | 1166 | unsigned long flags; |
| @@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio) | |||
| 1165 | spin_unlock_irqrestore(&pool->lock, flags); | 1170 | spin_unlock_irqrestore(&pool->lock, flags); |
| 1166 | } | 1171 | } |
| 1167 | 1172 | ||
| 1168 | static void no_space(struct cell *cell) | 1173 | static void no_space(struct dm_bio_prison_cell *cell) |
| 1169 | { | 1174 | { |
| 1170 | struct bio *bio; | 1175 | struct bio *bio; |
| 1171 | struct bio_list bios; | 1176 | struct bio_list bios; |
| @@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1182 | int r; | 1187 | int r; |
| 1183 | unsigned long flags; | 1188 | unsigned long flags; |
| 1184 | struct pool *pool = tc->pool; | 1189 | struct pool *pool = tc->pool; |
| 1185 | struct cell *cell, *cell2; | 1190 | struct dm_bio_prison_cell *cell, *cell2; |
| 1186 | struct cell_key key, key2; | 1191 | struct cell_key key, key2; |
| 1187 | dm_block_t block = get_bio_block(tc, bio); | 1192 | dm_block_t block = get_bio_block(tc, bio); |
| 1188 | struct dm_thin_lookup_result lookup_result; | 1193 | struct dm_thin_lookup_result lookup_result; |
| 1189 | struct new_mapping *m; | 1194 | struct dm_thin_new_mapping *m; |
| 1190 | 1195 | ||
| 1191 | build_virtual_key(tc->td, block, &key); | 1196 | build_virtual_key(tc->td, block, &key); |
| 1192 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) | 1197 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) |
| @@ -1263,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1263 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, | 1268 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, |
| 1264 | struct cell_key *key, | 1269 | struct cell_key *key, |
| 1265 | struct dm_thin_lookup_result *lookup_result, | 1270 | struct dm_thin_lookup_result *lookup_result, |
| 1266 | struct cell *cell) | 1271 | struct dm_bio_prison_cell *cell) |
| 1267 | { | 1272 | { |
| 1268 | int r; | 1273 | int r; |
| 1269 | dm_block_t data_block; | 1274 | dm_block_t data_block; |
| @@ -1290,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
| 1290 | dm_block_t block, | 1295 | dm_block_t block, |
| 1291 | struct dm_thin_lookup_result *lookup_result) | 1296 | struct dm_thin_lookup_result *lookup_result) |
| 1292 | { | 1297 | { |
| 1293 | struct cell *cell; | 1298 | struct dm_bio_prison_cell *cell; |
| 1294 | struct pool *pool = tc->pool; | 1299 | struct pool *pool = tc->pool; |
| 1295 | struct cell_key key; | 1300 | struct cell_key key; |
| 1296 | 1301 | ||
| @@ -1305,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
| 1305 | if (bio_data_dir(bio) == WRITE) | 1310 | if (bio_data_dir(bio) == WRITE) |
| 1306 | break_sharing(tc, bio, block, &key, lookup_result, cell); | 1311 | break_sharing(tc, bio, block, &key, lookup_result, cell); |
| 1307 | else { | 1312 | else { |
| 1308 | struct endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1313 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 1309 | 1314 | ||
| 1310 | h->shared_read_entry = ds_inc(&pool->shared_read_ds); | 1315 | h->shared_read_entry = ds_inc(&pool->shared_read_ds); |
| 1311 | 1316 | ||
| @@ -1315,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
| 1315 | } | 1320 | } |
| 1316 | 1321 | ||
| 1317 | static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, | 1322 | static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, |
| 1318 | struct cell *cell) | 1323 | struct dm_bio_prison_cell *cell) |
| 1319 | { | 1324 | { |
| 1320 | int r; | 1325 | int r; |
| 1321 | dm_block_t data_block; | 1326 | dm_block_t data_block; |
| @@ -1363,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1363 | { | 1368 | { |
| 1364 | int r; | 1369 | int r; |
| 1365 | dm_block_t block = get_bio_block(tc, bio); | 1370 | dm_block_t block = get_bio_block(tc, bio); |
| 1366 | struct cell *cell; | 1371 | struct dm_bio_prison_cell *cell; |
| 1367 | struct cell_key key; | 1372 | struct cell_key key; |
| 1368 | struct dm_thin_lookup_result lookup_result; | 1373 | struct dm_thin_lookup_result lookup_result; |
| 1369 | 1374 | ||
| @@ -1432,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool) | |||
| 1432 | spin_unlock_irqrestore(&pool->lock, flags); | 1437 | spin_unlock_irqrestore(&pool->lock, flags); |
| 1433 | 1438 | ||
| 1434 | while ((bio = bio_list_pop(&bios))) { | 1439 | while ((bio = bio_list_pop(&bios))) { |
| 1435 | struct endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1440 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 1436 | struct thin_c *tc = h->tc; | 1441 | struct thin_c *tc = h->tc; |
| 1437 | 1442 | ||
| 1438 | /* | 1443 | /* |
| @@ -1522,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio) | |||
| 1522 | wake_worker(pool); | 1527 | wake_worker(pool); |
| 1523 | } | 1528 | } |
| 1524 | 1529 | ||
| 1525 | static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) | 1530 | static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) |
| 1526 | { | 1531 | { |
| 1527 | struct pool *pool = tc->pool; | 1532 | struct pool *pool = tc->pool; |
| 1528 | struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); | 1533 | struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); |
| 1529 | 1534 | ||
| 1530 | h->tc = tc; | 1535 | h->tc = tc; |
| 1531 | h->shared_read_entry = NULL; | 1536 | h->shared_read_entry = NULL; |
| @@ -1687,6 +1692,9 @@ static void __pool_destroy(struct pool *pool) | |||
| 1687 | kfree(pool); | 1692 | kfree(pool); |
| 1688 | } | 1693 | } |
| 1689 | 1694 | ||
| 1695 | static struct kmem_cache *_new_mapping_cache; | ||
| 1696 | static struct kmem_cache *_endio_hook_cache; | ||
| 1697 | |||
| 1690 | static struct pool *pool_create(struct mapped_device *pool_md, | 1698 | static struct pool *pool_create(struct mapped_device *pool_md, |
| 1691 | struct block_device *metadata_dev, | 1699 | struct block_device *metadata_dev, |
| 1692 | unsigned long block_size, char **error) | 1700 | unsigned long block_size, char **error) |
| @@ -1755,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
| 1755 | ds_init(&pool->all_io_ds); | 1763 | ds_init(&pool->all_io_ds); |
| 1756 | 1764 | ||
| 1757 | pool->next_mapping = NULL; | 1765 | pool->next_mapping = NULL; |
| 1758 | pool->mapping_pool = | 1766 | pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, |
| 1759 | mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping)); | 1767 | _new_mapping_cache); |
| 1760 | if (!pool->mapping_pool) { | 1768 | if (!pool->mapping_pool) { |
| 1761 | *error = "Error creating pool's mapping mempool"; | 1769 | *error = "Error creating pool's mapping mempool"; |
| 1762 | err_p = ERR_PTR(-ENOMEM); | 1770 | err_p = ERR_PTR(-ENOMEM); |
| 1763 | goto bad_mapping_pool; | 1771 | goto bad_mapping_pool; |
| 1764 | } | 1772 | } |
| 1765 | 1773 | ||
| 1766 | pool->endio_hook_pool = | 1774 | pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE, |
| 1767 | mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook)); | 1775 | _endio_hook_cache); |
| 1768 | if (!pool->endio_hook_pool) { | 1776 | if (!pool->endio_hook_pool) { |
| 1769 | *error = "Error creating pool's endio_hook mempool"; | 1777 | *error = "Error creating pool's endio_hook mempool"; |
| 1770 | err_p = ERR_PTR(-ENOMEM); | 1778 | err_p = ERR_PTR(-ENOMEM); |
| @@ -2613,9 +2621,9 @@ static int thin_endio(struct dm_target *ti, | |||
| 2613 | union map_info *map_context) | 2621 | union map_info *map_context) |
| 2614 | { | 2622 | { |
| 2615 | unsigned long flags; | 2623 | unsigned long flags; |
| 2616 | struct endio_hook *h = map_context->ptr; | 2624 | struct dm_thin_endio_hook *h = map_context->ptr; |
| 2617 | struct list_head work; | 2625 | struct list_head work; |
| 2618 | struct new_mapping *m, *tmp; | 2626 | struct dm_thin_new_mapping *m, *tmp; |
| 2619 | struct pool *pool = h->tc->pool; | 2627 | struct pool *pool = h->tc->pool; |
| 2620 | 2628 | ||
| 2621 | if (h->shared_read_entry) { | 2629 | if (h->shared_read_entry) { |
| @@ -2755,7 +2763,32 @@ static int __init dm_thin_init(void) | |||
| 2755 | 2763 | ||
| 2756 | r = dm_register_target(&pool_target); | 2764 | r = dm_register_target(&pool_target); |
| 2757 | if (r) | 2765 | if (r) |
| 2758 | dm_unregister_target(&thin_target); | 2766 | goto bad_pool_target; |
| 2767 | |||
| 2768 | r = -ENOMEM; | ||
| 2769 | |||
| 2770 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); | ||
| 2771 | if (!_cell_cache) | ||
| 2772 | goto bad_cell_cache; | ||
| 2773 | |||
| 2774 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); | ||
| 2775 | if (!_new_mapping_cache) | ||
| 2776 | goto bad_new_mapping_cache; | ||
| 2777 | |||
| 2778 | _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0); | ||
| 2779 | if (!_endio_hook_cache) | ||
| 2780 | goto bad_endio_hook_cache; | ||
| 2781 | |||
| 2782 | return 0; | ||
| 2783 | |||
| 2784 | bad_endio_hook_cache: | ||
| 2785 | kmem_cache_destroy(_new_mapping_cache); | ||
| 2786 | bad_new_mapping_cache: | ||
| 2787 | kmem_cache_destroy(_cell_cache); | ||
| 2788 | bad_cell_cache: | ||
| 2789 | dm_unregister_target(&pool_target); | ||
| 2790 | bad_pool_target: | ||
| 2791 | dm_unregister_target(&thin_target); | ||
| 2759 | 2792 | ||
| 2760 | return r; | 2793 | return r; |
| 2761 | } | 2794 | } |
| @@ -2764,6 +2797,10 @@ static void dm_thin_exit(void) | |||
| 2764 | { | 2797 | { |
| 2765 | dm_unregister_target(&thin_target); | 2798 | dm_unregister_target(&thin_target); |
| 2766 | dm_unregister_target(&pool_target); | 2799 | dm_unregister_target(&pool_target); |
| 2800 | |||
| 2801 | kmem_cache_destroy(_cell_cache); | ||
| 2802 | kmem_cache_destroy(_new_mapping_cache); | ||
| 2803 | kmem_cache_destroy(_endio_hook_cache); | ||
| 2767 | } | 2804 | } |
| 2768 | 2805 | ||
| 2769 | module_init(dm_thin_init); | 2806 | module_init(dm_thin_init); |
