diff options
| author | Mike Snitzer <snitzer@redhat.com> | 2012-10-12 16:02:10 -0400 |
|---|---|---|
| committer | Alasdair G Kergon <agk@redhat.com> | 2012-10-12 16:02:10 -0400 |
| commit | 44feb387f6f5584535bd6e3ad7ccfdce715d7dba (patch) | |
| tree | f49e3e91bcba0c55d3a0357def6472069d00c42f | |
| parent | 28eed34e7662d7602da6753b0ba2563006b8e7a2 (diff) | |
dm thin: prepare to separate bio_prison code
The bio prison code will be useful to share with future DM targets.
Prepare to move this code into a separate module, adding a dm prefix
to structures and functions that will be exported.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
| -rw-r--r-- | drivers/md/dm-thin.c | 221 |
1 files changed, 131 insertions, 90 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index df20a115136f..22a22a701e16 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -58,7 +58,7 @@ | |||
| 58 | * i) plug io further to this physical block. (see bio_prison code). | 58 | * i) plug io further to this physical block. (see bio_prison code). |
| 59 | * | 59 | * |
| 60 | * ii) quiesce any read io to that shared data block. Obviously | 60 | * ii) quiesce any read io to that shared data block. Obviously |
| 61 | * including all devices that share this block. (see deferred_set code) | 61 | * including all devices that share this block. (see dm_deferred_set code) |
| 62 | * | 62 | * |
| 63 | * iii) copy the data block to a newly allocate block. This step can be | 63 | * iii) copy the data block to a newly allocate block. This step can be |
| 64 | * missed out if the io covers the block. (schedule_copy). | 64 | * missed out if the io covers the block. (schedule_copy). |
| @@ -104,9 +104,9 @@ | |||
| 104 | * by a key, multiple bios can be in the same cell. When the cell is | 104 | * by a key, multiple bios can be in the same cell. When the cell is |
| 105 | * subsequently unlocked the bios become available. | 105 | * subsequently unlocked the bios become available. |
| 106 | */ | 106 | */ |
| 107 | struct bio_prison; | 107 | struct dm_bio_prison; |
| 108 | 108 | ||
| 109 | struct cell_key { | 109 | struct dm_cell_key { |
| 110 | int virtual; | 110 | int virtual; |
| 111 | dm_thin_id dev; | 111 | dm_thin_id dev; |
| 112 | dm_block_t block; | 112 | dm_block_t block; |
| @@ -114,13 +114,13 @@ struct cell_key { | |||
| 114 | 114 | ||
| 115 | struct dm_bio_prison_cell { | 115 | struct dm_bio_prison_cell { |
| 116 | struct hlist_node list; | 116 | struct hlist_node list; |
| 117 | struct bio_prison *prison; | 117 | struct dm_bio_prison *prison; |
| 118 | struct cell_key key; | 118 | struct dm_cell_key key; |
| 119 | struct bio *holder; | 119 | struct bio *holder; |
| 120 | struct bio_list bios; | 120 | struct bio_list bios; |
| 121 | }; | 121 | }; |
| 122 | 122 | ||
| 123 | struct bio_prison { | 123 | struct dm_bio_prison { |
| 124 | spinlock_t lock; | 124 | spinlock_t lock; |
| 125 | mempool_t *cell_pool; | 125 | mempool_t *cell_pool; |
| 126 | 126 | ||
| @@ -148,13 +148,13 @@ static struct kmem_cache *_cell_cache; | |||
| 148 | * @nr_cells should be the number of cells you want in use _concurrently_. | 148 | * @nr_cells should be the number of cells you want in use _concurrently_. |
| 149 | * Don't confuse it with the number of distinct keys. | 149 | * Don't confuse it with the number of distinct keys. |
| 150 | */ | 150 | */ |
| 151 | static struct bio_prison *prison_create(unsigned nr_cells) | 151 | static struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells) |
| 152 | { | 152 | { |
| 153 | unsigned i; | 153 | unsigned i; |
| 154 | uint32_t nr_buckets = calc_nr_buckets(nr_cells); | 154 | uint32_t nr_buckets = calc_nr_buckets(nr_cells); |
| 155 | size_t len = sizeof(struct bio_prison) + | 155 | size_t len = sizeof(struct dm_bio_prison) + |
| 156 | (sizeof(struct hlist_head) * nr_buckets); | 156 | (sizeof(struct hlist_head) * nr_buckets); |
| 157 | struct bio_prison *prison = kmalloc(len, GFP_KERNEL); | 157 | struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL); |
| 158 | 158 | ||
| 159 | if (!prison) | 159 | if (!prison) |
| 160 | return NULL; | 160 | return NULL; |
| @@ -175,13 +175,13 @@ static struct bio_prison *prison_create(unsigned nr_cells) | |||
| 175 | return prison; | 175 | return prison; |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | static void prison_destroy(struct bio_prison *prison) | 178 | static void dm_bio_prison_destroy(struct dm_bio_prison *prison) |
| 179 | { | 179 | { |
| 180 | mempool_destroy(prison->cell_pool); | 180 | mempool_destroy(prison->cell_pool); |
| 181 | kfree(prison); | 181 | kfree(prison); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key) | 184 | static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key) |
| 185 | { | 185 | { |
| 186 | const unsigned long BIG_PRIME = 4294967291UL; | 186 | const unsigned long BIG_PRIME = 4294967291UL; |
| 187 | uint64_t hash = key->block * BIG_PRIME; | 187 | uint64_t hash = key->block * BIG_PRIME; |
| @@ -189,7 +189,7 @@ static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key) | |||
| 189 | return (uint32_t) (hash & prison->hash_mask); | 189 | return (uint32_t) (hash & prison->hash_mask); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) | 192 | static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs) |
| 193 | { | 193 | { |
| 194 | return (lhs->virtual == rhs->virtual) && | 194 | return (lhs->virtual == rhs->virtual) && |
| 195 | (lhs->dev == rhs->dev) && | 195 | (lhs->dev == rhs->dev) && |
| @@ -197,7 +197,7 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) | |||
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, | 199 | static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, |
| 200 | struct cell_key *key) | 200 | struct dm_cell_key *key) |
| 201 | { | 201 | { |
| 202 | struct dm_bio_prison_cell *cell; | 202 | struct dm_bio_prison_cell *cell; |
| 203 | struct hlist_node *tmp; | 203 | struct hlist_node *tmp; |
| @@ -215,8 +215,8 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, | |||
| 215 | * | 215 | * |
| 216 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | 216 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. |
| 217 | */ | 217 | */ |
| 218 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, | 218 | static int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, |
| 219 | struct bio *inmate, struct dm_bio_prison_cell **ref) | 219 | struct bio *inmate, struct dm_bio_prison_cell **ref) |
| 220 | { | 220 | { |
| 221 | int r = 1; | 221 | int r = 1; |
| 222 | unsigned long flags; | 222 | unsigned long flags; |
| @@ -277,7 +277,7 @@ out: | |||
| 277 | */ | 277 | */ |
| 278 | static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | 278 | static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) |
| 279 | { | 279 | { |
| 280 | struct bio_prison *prison = cell->prison; | 280 | struct dm_bio_prison *prison = cell->prison; |
| 281 | 281 | ||
| 282 | hlist_del(&cell->list); | 282 | hlist_del(&cell->list); |
| 283 | 283 | ||
| @@ -289,10 +289,10 @@ static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inm | |||
| 289 | mempool_free(cell, prison->cell_pool); | 289 | mempool_free(cell, prison->cell_pool); |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) | 292 | static void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) |
| 293 | { | 293 | { |
| 294 | unsigned long flags; | 294 | unsigned long flags; |
| 295 | struct bio_prison *prison = cell->prison; | 295 | struct dm_bio_prison *prison = cell->prison; |
| 296 | 296 | ||
| 297 | spin_lock_irqsave(&prison->lock, flags); | 297 | spin_lock_irqsave(&prison->lock, flags); |
| 298 | __cell_release(cell, bios); | 298 | __cell_release(cell, bios); |
| @@ -313,10 +313,10 @@ static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio | |||
| 313 | __cell_release(cell, NULL); | 313 | __cell_release(cell, NULL); |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | 316 | static void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) |
| 317 | { | 317 | { |
| 318 | unsigned long flags; | 318 | unsigned long flags; |
| 319 | struct bio_prison *prison = cell->prison; | 319 | struct dm_bio_prison *prison = cell->prison; |
| 320 | 320 | ||
| 321 | spin_lock_irqsave(&prison->lock, flags); | 321 | spin_lock_irqsave(&prison->lock, flags); |
| 322 | __cell_release_singleton(cell, bio); | 322 | __cell_release_singleton(cell, bio); |
| @@ -329,7 +329,7 @@ static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio * | |||
| 329 | static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, | 329 | static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, |
| 330 | struct bio_list *inmates) | 330 | struct bio_list *inmates) |
| 331 | { | 331 | { |
| 332 | struct bio_prison *prison = cell->prison; | 332 | struct dm_bio_prison *prison = cell->prison; |
| 333 | 333 | ||
| 334 | hlist_del(&cell->list); | 334 | hlist_del(&cell->list); |
| 335 | bio_list_merge(inmates, &cell->bios); | 335 | bio_list_merge(inmates, &cell->bios); |
| @@ -337,20 +337,20 @@ static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, | |||
| 337 | mempool_free(cell, prison->cell_pool); | 337 | mempool_free(cell, prison->cell_pool); |
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | static void cell_release_no_holder(struct dm_bio_prison_cell *cell, | 340 | static void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, |
| 341 | struct bio_list *inmates) | 341 | struct bio_list *inmates) |
| 342 | { | 342 | { |
| 343 | unsigned long flags; | 343 | unsigned long flags; |
| 344 | struct bio_prison *prison = cell->prison; | 344 | struct dm_bio_prison *prison = cell->prison; |
| 345 | 345 | ||
| 346 | spin_lock_irqsave(&prison->lock, flags); | 346 | spin_lock_irqsave(&prison->lock, flags); |
| 347 | __cell_release_no_holder(cell, inmates); | 347 | __cell_release_no_holder(cell, inmates); |
| 348 | spin_unlock_irqrestore(&prison->lock, flags); | 348 | spin_unlock_irqrestore(&prison->lock, flags); |
| 349 | } | 349 | } |
| 350 | 350 | ||
| 351 | static void cell_error(struct dm_bio_prison_cell *cell) | 351 | static void dm_cell_error(struct dm_bio_prison_cell *cell) |
| 352 | { | 352 | { |
| 353 | struct bio_prison *prison = cell->prison; | 353 | struct dm_bio_prison *prison = cell->prison; |
| 354 | struct bio_list bios; | 354 | struct bio_list bios; |
| 355 | struct bio *bio; | 355 | struct bio *bio; |
| 356 | unsigned long flags; | 356 | unsigned long flags; |
| @@ -374,23 +374,28 @@ static void cell_error(struct dm_bio_prison_cell *cell) | |||
| 374 | * new mapping could free the old block that the read bios are mapped to. | 374 | * new mapping could free the old block that the read bios are mapped to. |
| 375 | */ | 375 | */ |
| 376 | 376 | ||
| 377 | struct deferred_set; | 377 | struct dm_deferred_set; |
| 378 | struct deferred_entry { | 378 | struct dm_deferred_entry { |
| 379 | struct deferred_set *ds; | 379 | struct dm_deferred_set *ds; |
| 380 | unsigned count; | 380 | unsigned count; |
| 381 | struct list_head work_items; | 381 | struct list_head work_items; |
| 382 | }; | 382 | }; |
| 383 | 383 | ||
| 384 | struct deferred_set { | 384 | struct dm_deferred_set { |
| 385 | spinlock_t lock; | 385 | spinlock_t lock; |
| 386 | unsigned current_entry; | 386 | unsigned current_entry; |
| 387 | unsigned sweeper; | 387 | unsigned sweeper; |
| 388 | struct deferred_entry entries[DEFERRED_SET_SIZE]; | 388 | struct dm_deferred_entry entries[DEFERRED_SET_SIZE]; |
| 389 | }; | 389 | }; |
| 390 | 390 | ||
| 391 | static void ds_init(struct deferred_set *ds) | 391 | static struct dm_deferred_set *dm_deferred_set_create(void) |
| 392 | { | 392 | { |
| 393 | int i; | 393 | int i; |
| 394 | struct dm_deferred_set *ds; | ||
| 395 | |||
| 396 | ds = kmalloc(sizeof(*ds), GFP_KERNEL); | ||
| 397 | if (!ds) | ||
| 398 | return NULL; | ||
| 394 | 399 | ||
| 395 | spin_lock_init(&ds->lock); | 400 | spin_lock_init(&ds->lock); |
| 396 | ds->current_entry = 0; | 401 | ds->current_entry = 0; |
| @@ -400,12 +405,19 @@ static void ds_init(struct deferred_set *ds) | |||
| 400 | ds->entries[i].count = 0; | 405 | ds->entries[i].count = 0; |
| 401 | INIT_LIST_HEAD(&ds->entries[i].work_items); | 406 | INIT_LIST_HEAD(&ds->entries[i].work_items); |
| 402 | } | 407 | } |
| 408 | |||
| 409 | return ds; | ||
| 403 | } | 410 | } |
| 404 | 411 | ||
| 405 | static struct deferred_entry *ds_inc(struct deferred_set *ds) | 412 | static void dm_deferred_set_destroy(struct dm_deferred_set *ds) |
| 413 | { | ||
| 414 | kfree(ds); | ||
| 415 | } | ||
| 416 | |||
| 417 | static struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds) | ||
| 406 | { | 418 | { |
| 407 | unsigned long flags; | 419 | unsigned long flags; |
| 408 | struct deferred_entry *entry; | 420 | struct dm_deferred_entry *entry; |
| 409 | 421 | ||
| 410 | spin_lock_irqsave(&ds->lock, flags); | 422 | spin_lock_irqsave(&ds->lock, flags); |
| 411 | entry = ds->entries + ds->current_entry; | 423 | entry = ds->entries + ds->current_entry; |
| @@ -420,7 +432,7 @@ static unsigned ds_next(unsigned index) | |||
| 420 | return (index + 1) % DEFERRED_SET_SIZE; | 432 | return (index + 1) % DEFERRED_SET_SIZE; |
| 421 | } | 433 | } |
| 422 | 434 | ||
| 423 | static void __sweep(struct deferred_set *ds, struct list_head *head) | 435 | static void __sweep(struct dm_deferred_set *ds, struct list_head *head) |
| 424 | { | 436 | { |
| 425 | while ((ds->sweeper != ds->current_entry) && | 437 | while ((ds->sweeper != ds->current_entry) && |
| 426 | !ds->entries[ds->sweeper].count) { | 438 | !ds->entries[ds->sweeper].count) { |
| @@ -432,7 +444,7 @@ static void __sweep(struct deferred_set *ds, struct list_head *head) | |||
| 432 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | 444 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); |
| 433 | } | 445 | } |
| 434 | 446 | ||
| 435 | static void ds_dec(struct deferred_entry *entry, struct list_head *head) | 447 | static void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head) |
| 436 | { | 448 | { |
| 437 | unsigned long flags; | 449 | unsigned long flags; |
| 438 | 450 | ||
| @@ -446,7 +458,7 @@ static void ds_dec(struct deferred_entry *entry, struct list_head *head) | |||
| 446 | /* | 458 | /* |
| 447 | * Returns 1 if deferred or 0 if no pending items to delay job. | 459 | * Returns 1 if deferred or 0 if no pending items to delay job. |
| 448 | */ | 460 | */ |
| 449 | static int ds_add_work(struct deferred_set *ds, struct list_head *work) | 461 | static int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) |
| 450 | { | 462 | { |
| 451 | int r = 1; | 463 | int r = 1; |
| 452 | unsigned long flags; | 464 | unsigned long flags; |
| @@ -467,13 +479,28 @@ static int ds_add_work(struct deferred_set *ds, struct list_head *work) | |||
| 467 | return r; | 479 | return r; |
| 468 | } | 480 | } |
| 469 | 481 | ||
| 482 | static int __init dm_bio_prison_init(void) | ||
| 483 | { | ||
| 484 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); | ||
| 485 | if (!_cell_cache) | ||
| 486 | return -ENOMEM; | ||
| 487 | |||
| 488 | return 0; | ||
| 489 | } | ||
| 490 | |||
| 491 | static void __exit dm_bio_prison_exit(void) | ||
| 492 | { | ||
| 493 | kmem_cache_destroy(_cell_cache); | ||
| 494 | _cell_cache = NULL; | ||
| 495 | } | ||
| 496 | |||
| 470 | /*----------------------------------------------------------------*/ | 497 | /*----------------------------------------------------------------*/ |
| 471 | 498 | ||
| 472 | /* | 499 | /* |
| 473 | * Key building. | 500 | * Key building. |
| 474 | */ | 501 | */ |
| 475 | static void build_data_key(struct dm_thin_device *td, | 502 | static void build_data_key(struct dm_thin_device *td, |
| 476 | dm_block_t b, struct cell_key *key) | 503 | dm_block_t b, struct dm_cell_key *key) |
| 477 | { | 504 | { |
| 478 | key->virtual = 0; | 505 | key->virtual = 0; |
| 479 | key->dev = dm_thin_dev_id(td); | 506 | key->dev = dm_thin_dev_id(td); |
| @@ -481,7 +508,7 @@ static void build_data_key(struct dm_thin_device *td, | |||
| 481 | } | 508 | } |
| 482 | 509 | ||
| 483 | static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, | 510 | static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, |
| 484 | struct cell_key *key) | 511 | struct dm_cell_key *key) |
| 485 | { | 512 | { |
| 486 | key->virtual = 1; | 513 | key->virtual = 1; |
| 487 | key->dev = dm_thin_dev_id(td); | 514 | key->dev = dm_thin_dev_id(td); |
| @@ -534,7 +561,7 @@ struct pool { | |||
| 534 | unsigned low_water_triggered:1; /* A dm event has been sent */ | 561 | unsigned low_water_triggered:1; /* A dm event has been sent */ |
| 535 | unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ | 562 | unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ |
| 536 | 563 | ||
| 537 | struct bio_prison *prison; | 564 | struct dm_bio_prison *prison; |
| 538 | struct dm_kcopyd_client *copier; | 565 | struct dm_kcopyd_client *copier; |
| 539 | 566 | ||
| 540 | struct workqueue_struct *wq; | 567 | struct workqueue_struct *wq; |
| @@ -552,8 +579,8 @@ struct pool { | |||
| 552 | 579 | ||
| 553 | struct bio_list retry_on_resume_list; | 580 | struct bio_list retry_on_resume_list; |
| 554 | 581 | ||
| 555 | struct deferred_set shared_read_ds; | 582 | struct dm_deferred_set *shared_read_ds; |
| 556 | struct deferred_set all_io_ds; | 583 | struct dm_deferred_set *all_io_ds; |
| 557 | 584 | ||
| 558 | struct dm_thin_new_mapping *next_mapping; | 585 | struct dm_thin_new_mapping *next_mapping; |
| 559 | mempool_t *mapping_pool; | 586 | mempool_t *mapping_pool; |
| @@ -660,8 +687,8 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev | |||
| 660 | 687 | ||
| 661 | struct dm_thin_endio_hook { | 688 | struct dm_thin_endio_hook { |
| 662 | struct thin_c *tc; | 689 | struct thin_c *tc; |
| 663 | struct deferred_entry *shared_read_entry; | 690 | struct dm_deferred_entry *shared_read_entry; |
| 664 | struct deferred_entry *all_io_entry; | 691 | struct dm_deferred_entry *all_io_entry; |
| 665 | struct dm_thin_new_mapping *overwrite_mapping; | 692 | struct dm_thin_new_mapping *overwrite_mapping; |
| 666 | }; | 693 | }; |
| 667 | 694 | ||
| @@ -877,7 +904,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, | |||
| 877 | unsigned long flags; | 904 | unsigned long flags; |
| 878 | 905 | ||
| 879 | spin_lock_irqsave(&pool->lock, flags); | 906 | spin_lock_irqsave(&pool->lock, flags); |
| 880 | cell_release(cell, &pool->deferred_bios); | 907 | dm_cell_release(cell, &pool->deferred_bios); |
| 881 | spin_unlock_irqrestore(&tc->pool->lock, flags); | 908 | spin_unlock_irqrestore(&tc->pool->lock, flags); |
| 882 | 909 | ||
| 883 | wake_worker(pool); | 910 | wake_worker(pool); |
| @@ -896,7 +923,7 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell | |||
| 896 | bio_list_init(&bios); | 923 | bio_list_init(&bios); |
| 897 | 924 | ||
| 898 | spin_lock_irqsave(&pool->lock, flags); | 925 | spin_lock_irqsave(&pool->lock, flags); |
| 899 | cell_release_no_holder(cell, &pool->deferred_bios); | 926 | dm_cell_release_no_holder(cell, &pool->deferred_bios); |
| 900 | spin_unlock_irqrestore(&pool->lock, flags); | 927 | spin_unlock_irqrestore(&pool->lock, flags); |
| 901 | 928 | ||
| 902 | wake_worker(pool); | 929 | wake_worker(pool); |
| @@ -906,7 +933,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | |||
| 906 | { | 933 | { |
| 907 | if (m->bio) | 934 | if (m->bio) |
| 908 | m->bio->bi_end_io = m->saved_bi_end_io; | 935 | m->bio->bi_end_io = m->saved_bi_end_io; |
| 909 | cell_error(m->cell); | 936 | dm_cell_error(m->cell); |
| 910 | list_del(&m->list); | 937 | list_del(&m->list); |
| 911 | mempool_free(m, m->tc->pool->mapping_pool); | 938 | mempool_free(m, m->tc->pool->mapping_pool); |
| 912 | } | 939 | } |
| @@ -921,7 +948,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
| 921 | bio->bi_end_io = m->saved_bi_end_io; | 948 | bio->bi_end_io = m->saved_bi_end_io; |
| 922 | 949 | ||
| 923 | if (m->err) { | 950 | if (m->err) { |
| 924 | cell_error(m->cell); | 951 | dm_cell_error(m->cell); |
| 925 | goto out; | 952 | goto out; |
| 926 | } | 953 | } |
| 927 | 954 | ||
| @@ -933,7 +960,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
| 933 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); | 960 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); |
| 934 | if (r) { | 961 | if (r) { |
| 935 | DMERR("dm_thin_insert_block() failed"); | 962 | DMERR("dm_thin_insert_block() failed"); |
| 936 | cell_error(m->cell); | 963 | dm_cell_error(m->cell); |
| 937 | goto out; | 964 | goto out; |
| 938 | } | 965 | } |
| 939 | 966 | ||
| @@ -1067,7 +1094,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
| 1067 | m->err = 0; | 1094 | m->err = 0; |
| 1068 | m->bio = NULL; | 1095 | m->bio = NULL; |
| 1069 | 1096 | ||
| 1070 | if (!ds_add_work(&pool->shared_read_ds, &m->list)) | 1097 | if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) |
| 1071 | m->quiesced = 1; | 1098 | m->quiesced = 1; |
| 1072 | 1099 | ||
| 1073 | /* | 1100 | /* |
| @@ -1099,7 +1126,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
| 1099 | if (r < 0) { | 1126 | if (r < 0) { |
| 1100 | mempool_free(m, pool->mapping_pool); | 1127 | mempool_free(m, pool->mapping_pool); |
| 1101 | DMERR("dm_kcopyd_copy() failed"); | 1128 | DMERR("dm_kcopyd_copy() failed"); |
| 1102 | cell_error(cell); | 1129 | dm_cell_error(cell); |
| 1103 | } | 1130 | } |
| 1104 | } | 1131 | } |
| 1105 | } | 1132 | } |
| @@ -1164,7 +1191,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, | |||
| 1164 | if (r < 0) { | 1191 | if (r < 0) { |
| 1165 | mempool_free(m, pool->mapping_pool); | 1192 | mempool_free(m, pool->mapping_pool); |
| 1166 | DMERR("dm_kcopyd_zero() failed"); | 1193 | DMERR("dm_kcopyd_zero() failed"); |
| 1167 | cell_error(cell); | 1194 | dm_cell_error(cell); |
| 1168 | } | 1195 | } |
| 1169 | } | 1196 | } |
| 1170 | } | 1197 | } |
| @@ -1276,7 +1303,7 @@ static void no_space(struct dm_bio_prison_cell *cell) | |||
| 1276 | struct bio_list bios; | 1303 | struct bio_list bios; |
| 1277 | 1304 | ||
| 1278 | bio_list_init(&bios); | 1305 | bio_list_init(&bios); |
| 1279 | cell_release(cell, &bios); | 1306 | dm_cell_release(cell, &bios); |
| 1280 | 1307 | ||
| 1281 | while ((bio = bio_list_pop(&bios))) | 1308 | while ((bio = bio_list_pop(&bios))) |
| 1282 | retry_on_resume(bio); | 1309 | retry_on_resume(bio); |
| @@ -1288,13 +1315,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1288 | unsigned long flags; | 1315 | unsigned long flags; |
| 1289 | struct pool *pool = tc->pool; | 1316 | struct pool *pool = tc->pool; |
| 1290 | struct dm_bio_prison_cell *cell, *cell2; | 1317 | struct dm_bio_prison_cell *cell, *cell2; |
| 1291 | struct cell_key key, key2; | 1318 | struct dm_cell_key key, key2; |
| 1292 | dm_block_t block = get_bio_block(tc, bio); | 1319 | dm_block_t block = get_bio_block(tc, bio); |
| 1293 | struct dm_thin_lookup_result lookup_result; | 1320 | struct dm_thin_lookup_result lookup_result; |
| 1294 | struct dm_thin_new_mapping *m; | 1321 | struct dm_thin_new_mapping *m; |
| 1295 | 1322 | ||
| 1296 | build_virtual_key(tc->td, block, &key); | 1323 | build_virtual_key(tc->td, block, &key); |
| 1297 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) | 1324 | if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) |
| 1298 | return; | 1325 | return; |
| 1299 | 1326 | ||
| 1300 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); | 1327 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); |
| @@ -1306,8 +1333,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1306 | * on this block. | 1333 | * on this block. |
| 1307 | */ | 1334 | */ |
| 1308 | build_data_key(tc->td, lookup_result.block, &key2); | 1335 | build_data_key(tc->td, lookup_result.block, &key2); |
| 1309 | if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) { | 1336 | if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { |
| 1310 | cell_release_singleton(cell, bio); | 1337 | dm_cell_release_singleton(cell, bio); |
| 1311 | break; | 1338 | break; |
| 1312 | } | 1339 | } |
| 1313 | 1340 | ||
| @@ -1326,7 +1353,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1326 | m->err = 0; | 1353 | m->err = 0; |
| 1327 | m->bio = bio; | 1354 | m->bio = bio; |
| 1328 | 1355 | ||
| 1329 | if (!ds_add_work(&pool->all_io_ds, &m->list)) { | 1356 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { |
| 1330 | spin_lock_irqsave(&pool->lock, flags); | 1357 | spin_lock_irqsave(&pool->lock, flags); |
| 1331 | list_add(&m->list, &pool->prepared_discards); | 1358 | list_add(&m->list, &pool->prepared_discards); |
| 1332 | spin_unlock_irqrestore(&pool->lock, flags); | 1359 | spin_unlock_irqrestore(&pool->lock, flags); |
| @@ -1338,8 +1365,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1338 | * a block boundary. So we submit the discard of a | 1365 | * a block boundary. So we submit the discard of a |
| 1339 | * partial block appropriately. | 1366 | * partial block appropriately. |
| 1340 | */ | 1367 | */ |
| 1341 | cell_release_singleton(cell, bio); | 1368 | dm_cell_release_singleton(cell, bio); |
| 1342 | cell_release_singleton(cell2, bio); | 1369 | dm_cell_release_singleton(cell2, bio); |
| 1343 | if ((!lookup_result.shared) && pool->pf.discard_passdown) | 1370 | if ((!lookup_result.shared) && pool->pf.discard_passdown) |
| 1344 | remap_and_issue(tc, bio, lookup_result.block); | 1371 | remap_and_issue(tc, bio, lookup_result.block); |
| 1345 | else | 1372 | else |
| @@ -1351,20 +1378,20 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1351 | /* | 1378 | /* |
| 1352 | * It isn't provisioned, just forget it. | 1379 | * It isn't provisioned, just forget it. |
| 1353 | */ | 1380 | */ |
| 1354 | cell_release_singleton(cell, bio); | 1381 | dm_cell_release_singleton(cell, bio); |
| 1355 | bio_endio(bio, 0); | 1382 | bio_endio(bio, 0); |
| 1356 | break; | 1383 | break; |
| 1357 | 1384 | ||
| 1358 | default: | 1385 | default: |
| 1359 | DMERR("discard: find block unexpectedly returned %d", r); | 1386 | DMERR("discard: find block unexpectedly returned %d", r); |
| 1360 | cell_release_singleton(cell, bio); | 1387 | dm_cell_release_singleton(cell, bio); |
| 1361 | bio_io_error(bio); | 1388 | bio_io_error(bio); |
| 1362 | break; | 1389 | break; |
| 1363 | } | 1390 | } |
| 1364 | } | 1391 | } |
| 1365 | 1392 | ||
| 1366 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, | 1393 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, |
| 1367 | struct cell_key *key, | 1394 | struct dm_cell_key *key, |
| 1368 | struct dm_thin_lookup_result *lookup_result, | 1395 | struct dm_thin_lookup_result *lookup_result, |
| 1369 | struct dm_bio_prison_cell *cell) | 1396 | struct dm_bio_prison_cell *cell) |
| 1370 | { | 1397 | { |
| @@ -1384,7 +1411,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, | |||
| 1384 | 1411 | ||
| 1385 | default: | 1412 | default: |
| 1386 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); | 1413 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); |
| 1387 | cell_error(cell); | 1414 | dm_cell_error(cell); |
| 1388 | break; | 1415 | break; |
| 1389 | } | 1416 | } |
| 1390 | } | 1417 | } |
| @@ -1395,14 +1422,14 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
| 1395 | { | 1422 | { |
| 1396 | struct dm_bio_prison_cell *cell; | 1423 | struct dm_bio_prison_cell *cell; |
| 1397 | struct pool *pool = tc->pool; | 1424 | struct pool *pool = tc->pool; |
| 1398 | struct cell_key key; | 1425 | struct dm_cell_key key; |
| 1399 | 1426 | ||
| 1400 | /* | 1427 | /* |
| 1401 | * If cell is already occupied, then sharing is already in the process | 1428 | * If cell is already occupied, then sharing is already in the process |
| 1402 | * of being broken so we have nothing further to do here. | 1429 | * of being broken so we have nothing further to do here. |
| 1403 | */ | 1430 | */ |
| 1404 | build_data_key(tc->td, lookup_result->block, &key); | 1431 | build_data_key(tc->td, lookup_result->block, &key); |
| 1405 | if (bio_detain(pool->prison, &key, bio, &cell)) | 1432 | if (dm_bio_detain(pool->prison, &key, bio, &cell)) |
| 1406 | return; | 1433 | return; |
| 1407 | 1434 | ||
| 1408 | if (bio_data_dir(bio) == WRITE && bio->bi_size) | 1435 | if (bio_data_dir(bio) == WRITE && bio->bi_size) |
| @@ -1410,9 +1437,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
| 1410 | else { | 1437 | else { |
| 1411 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1438 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 1412 | 1439 | ||
| 1413 | h->shared_read_entry = ds_inc(&pool->shared_read_ds); | 1440 | h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); |
| 1414 | 1441 | ||
| 1415 | cell_release_singleton(cell, bio); | 1442 | dm_cell_release_singleton(cell, bio); |
| 1416 | remap_and_issue(tc, bio, lookup_result->block); | 1443 | remap_and_issue(tc, bio, lookup_result->block); |
| 1417 | } | 1444 | } |
| 1418 | } | 1445 | } |
| @@ -1427,7 +1454,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
| 1427 | * Remap empty bios (flushes) immediately, without provisioning. | 1454 | * Remap empty bios (flushes) immediately, without provisioning. |
| 1428 | */ | 1455 | */ |
| 1429 | if (!bio->bi_size) { | 1456 | if (!bio->bi_size) { |
| 1430 | cell_release_singleton(cell, bio); | 1457 | dm_cell_release_singleton(cell, bio); |
| 1431 | remap_and_issue(tc, bio, 0); | 1458 | remap_and_issue(tc, bio, 0); |
| 1432 | return; | 1459 | return; |
| 1433 | } | 1460 | } |
| @@ -1437,7 +1464,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
| 1437 | */ | 1464 | */ |
| 1438 | if (bio_data_dir(bio) == READ) { | 1465 | if (bio_data_dir(bio) == READ) { |
| 1439 | zero_fill_bio(bio); | 1466 | zero_fill_bio(bio); |
| 1440 | cell_release_singleton(cell, bio); | 1467 | dm_cell_release_singleton(cell, bio); |
| 1441 | bio_endio(bio, 0); | 1468 | bio_endio(bio, 0); |
| 1442 | return; | 1469 | return; |
| 1443 | } | 1470 | } |
| @@ -1458,7 +1485,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
| 1458 | default: | 1485 | default: |
| 1459 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); | 1486 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); |
| 1460 | set_pool_mode(tc->pool, PM_READ_ONLY); | 1487 | set_pool_mode(tc->pool, PM_READ_ONLY); |
| 1461 | cell_error(cell); | 1488 | dm_cell_error(cell); |
| 1462 | break; | 1489 | break; |
| 1463 | } | 1490 | } |
| 1464 | } | 1491 | } |
| @@ -1468,7 +1495,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1468 | int r; | 1495 | int r; |
| 1469 | dm_block_t block = get_bio_block(tc, bio); | 1496 | dm_block_t block = get_bio_block(tc, bio); |
| 1470 | struct dm_bio_prison_cell *cell; | 1497 | struct dm_bio_prison_cell *cell; |
| 1471 | struct cell_key key; | 1498 | struct dm_cell_key key; |
| 1472 | struct dm_thin_lookup_result lookup_result; | 1499 | struct dm_thin_lookup_result lookup_result; |
| 1473 | 1500 | ||
| 1474 | /* | 1501 | /* |
| @@ -1476,7 +1503,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1476 | * being provisioned so we have nothing further to do here. | 1503 | * being provisioned so we have nothing further to do here. |
| 1477 | */ | 1504 | */ |
| 1478 | build_virtual_key(tc->td, block, &key); | 1505 | build_virtual_key(tc->td, block, &key); |
| 1479 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) | 1506 | if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) |
| 1480 | return; | 1507 | return; |
| 1481 | 1508 | ||
| 1482 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); | 1509 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); |
| @@ -1491,7 +1518,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1491 | * TODO: this will probably have to change when discard goes | 1518 | * TODO: this will probably have to change when discard goes |
| 1492 | * back in. | 1519 | * back in. |
| 1493 | */ | 1520 | */ |
| 1494 | cell_release_singleton(cell, bio); | 1521 | dm_cell_release_singleton(cell, bio); |
| 1495 | 1522 | ||
| 1496 | if (lookup_result.shared) | 1523 | if (lookup_result.shared) |
| 1497 | process_shared_bio(tc, bio, block, &lookup_result); | 1524 | process_shared_bio(tc, bio, block, &lookup_result); |
| @@ -1501,7 +1528,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1501 | 1528 | ||
| 1502 | case -ENODATA: | 1529 | case -ENODATA: |
| 1503 | if (bio_data_dir(bio) == READ && tc->origin_dev) { | 1530 | if (bio_data_dir(bio) == READ && tc->origin_dev) { |
| 1504 | cell_release_singleton(cell, bio); | 1531 | dm_cell_release_singleton(cell, bio); |
| 1505 | remap_to_origin_and_issue(tc, bio); | 1532 | remap_to_origin_and_issue(tc, bio); |
| 1506 | } else | 1533 | } else |
| 1507 | provision_block(tc, bio, block, cell); | 1534 | provision_block(tc, bio, block, cell); |
| @@ -1509,7 +1536,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1509 | 1536 | ||
| 1510 | default: | 1537 | default: |
| 1511 | DMERR("dm_thin_find_block() failed, error = %d", r); | 1538 | DMERR("dm_thin_find_block() failed, error = %d", r); |
| 1512 | cell_release_singleton(cell, bio); | 1539 | dm_cell_release_singleton(cell, bio); |
| 1513 | bio_io_error(bio); | 1540 | bio_io_error(bio); |
| 1514 | break; | 1541 | break; |
| 1515 | } | 1542 | } |
| @@ -1718,7 +1745,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b | |||
| 1718 | 1745 | ||
| 1719 | h->tc = tc; | 1746 | h->tc = tc; |
| 1720 | h->shared_read_entry = NULL; | 1747 | h->shared_read_entry = NULL; |
| 1721 | h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds); | 1748 | h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds); |
| 1722 | h->overwrite_mapping = NULL; | 1749 | h->overwrite_mapping = NULL; |
| 1723 | 1750 | ||
| 1724 | return h; | 1751 | return h; |
| @@ -1928,7 +1955,7 @@ static void __pool_destroy(struct pool *pool) | |||
| 1928 | if (dm_pool_metadata_close(pool->pmd) < 0) | 1955 | if (dm_pool_metadata_close(pool->pmd) < 0) |
| 1929 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); | 1956 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); |
| 1930 | 1957 | ||
| 1931 | prison_destroy(pool->prison); | 1958 | dm_bio_prison_destroy(pool->prison); |
| 1932 | dm_kcopyd_client_destroy(pool->copier); | 1959 | dm_kcopyd_client_destroy(pool->copier); |
| 1933 | 1960 | ||
| 1934 | if (pool->wq) | 1961 | if (pool->wq) |
| @@ -1938,6 +1965,8 @@ static void __pool_destroy(struct pool *pool) | |||
| 1938 | mempool_free(pool->next_mapping, pool->mapping_pool); | 1965 | mempool_free(pool->next_mapping, pool->mapping_pool); |
| 1939 | mempool_destroy(pool->mapping_pool); | 1966 | mempool_destroy(pool->mapping_pool); |
| 1940 | mempool_destroy(pool->endio_hook_pool); | 1967 | mempool_destroy(pool->endio_hook_pool); |
| 1968 | dm_deferred_set_destroy(pool->shared_read_ds); | ||
| 1969 | dm_deferred_set_destroy(pool->all_io_ds); | ||
| 1941 | kfree(pool); | 1970 | kfree(pool); |
| 1942 | } | 1971 | } |
| 1943 | 1972 | ||
| @@ -1976,7 +2005,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
| 1976 | pool->sectors_per_block_shift = __ffs(block_size); | 2005 | pool->sectors_per_block_shift = __ffs(block_size); |
| 1977 | pool->low_water_blocks = 0; | 2006 | pool->low_water_blocks = 0; |
| 1978 | pool_features_init(&pool->pf); | 2007 | pool_features_init(&pool->pf); |
| 1979 | pool->prison = prison_create(PRISON_CELLS); | 2008 | pool->prison = dm_bio_prison_create(PRISON_CELLS); |
| 1980 | if (!pool->prison) { | 2009 | if (!pool->prison) { |
| 1981 | *error = "Error creating pool's bio prison"; | 2010 | *error = "Error creating pool's bio prison"; |
| 1982 | err_p = ERR_PTR(-ENOMEM); | 2011 | err_p = ERR_PTR(-ENOMEM); |
| @@ -2012,8 +2041,20 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
| 2012 | pool->low_water_triggered = 0; | 2041 | pool->low_water_triggered = 0; |
| 2013 | pool->no_free_space = 0; | 2042 | pool->no_free_space = 0; |
| 2014 | bio_list_init(&pool->retry_on_resume_list); | 2043 | bio_list_init(&pool->retry_on_resume_list); |
| 2015 | ds_init(&pool->shared_read_ds); | 2044 | |
| 2016 | ds_init(&pool->all_io_ds); | 2045 | pool->shared_read_ds = dm_deferred_set_create(); |
| 2046 | if (!pool->shared_read_ds) { | ||
| 2047 | *error = "Error creating pool's shared read deferred set"; | ||
| 2048 | err_p = ERR_PTR(-ENOMEM); | ||
| 2049 | goto bad_shared_read_ds; | ||
| 2050 | } | ||
| 2051 | |||
| 2052 | pool->all_io_ds = dm_deferred_set_create(); | ||
| 2053 | if (!pool->all_io_ds) { | ||
| 2054 | *error = "Error creating pool's all io deferred set"; | ||
| 2055 | err_p = ERR_PTR(-ENOMEM); | ||
| 2056 | goto bad_all_io_ds; | ||
| 2057 | } | ||
| 2017 | 2058 | ||
| 2018 | pool->next_mapping = NULL; | 2059 | pool->next_mapping = NULL; |
| 2019 | pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, | 2060 | pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, |
| @@ -2042,11 +2083,15 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
| 2042 | bad_endio_hook_pool: | 2083 | bad_endio_hook_pool: |
| 2043 | mempool_destroy(pool->mapping_pool); | 2084 | mempool_destroy(pool->mapping_pool); |
| 2044 | bad_mapping_pool: | 2085 | bad_mapping_pool: |
| 2086 | dm_deferred_set_destroy(pool->all_io_ds); | ||
| 2087 | bad_all_io_ds: | ||
| 2088 | dm_deferred_set_destroy(pool->shared_read_ds); | ||
| 2089 | bad_shared_read_ds: | ||
| 2045 | destroy_workqueue(pool->wq); | 2090 | destroy_workqueue(pool->wq); |
| 2046 | bad_wq: | 2091 | bad_wq: |
| 2047 | dm_kcopyd_client_destroy(pool->copier); | 2092 | dm_kcopyd_client_destroy(pool->copier); |
| 2048 | bad_kcopyd_client: | 2093 | bad_kcopyd_client: |
| 2049 | prison_destroy(pool->prison); | 2094 | dm_bio_prison_destroy(pool->prison); |
| 2050 | bad_prison: | 2095 | bad_prison: |
| 2051 | kfree(pool); | 2096 | kfree(pool); |
| 2052 | bad_pool: | 2097 | bad_pool: |
| @@ -2982,7 +3027,7 @@ static int thin_endio(struct dm_target *ti, | |||
| 2982 | 3027 | ||
| 2983 | if (h->shared_read_entry) { | 3028 | if (h->shared_read_entry) { |
| 2984 | INIT_LIST_HEAD(&work); | 3029 | INIT_LIST_HEAD(&work); |
| 2985 | ds_dec(h->shared_read_entry, &work); | 3030 | dm_deferred_entry_dec(h->shared_read_entry, &work); |
| 2986 | 3031 | ||
| 2987 | spin_lock_irqsave(&pool->lock, flags); | 3032 | spin_lock_irqsave(&pool->lock, flags); |
| 2988 | list_for_each_entry_safe(m, tmp, &work, list) { | 3033 | list_for_each_entry_safe(m, tmp, &work, list) { |
| @@ -2995,7 +3040,7 @@ static int thin_endio(struct dm_target *ti, | |||
| 2995 | 3040 | ||
| 2996 | if (h->all_io_entry) { | 3041 | if (h->all_io_entry) { |
| 2997 | INIT_LIST_HEAD(&work); | 3042 | INIT_LIST_HEAD(&work); |
| 2998 | ds_dec(h->all_io_entry, &work); | 3043 | dm_deferred_entry_dec(h->all_io_entry, &work); |
| 2999 | spin_lock_irqsave(&pool->lock, flags); | 3044 | spin_lock_irqsave(&pool->lock, flags); |
| 3000 | list_for_each_entry_safe(m, tmp, &work, list) | 3045 | list_for_each_entry_safe(m, tmp, &work, list) |
| 3001 | list_add(&m->list, &pool->prepared_discards); | 3046 | list_add(&m->list, &pool->prepared_discards); |
| @@ -3128,9 +3173,7 @@ static int __init dm_thin_init(void) | |||
| 3128 | 3173 | ||
| 3129 | r = -ENOMEM; | 3174 | r = -ENOMEM; |
| 3130 | 3175 | ||
| 3131 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); | 3176 | dm_bio_prison_init(); |
| 3132 | if (!_cell_cache) | ||
| 3133 | goto bad_cell_cache; | ||
| 3134 | 3177 | ||
| 3135 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); | 3178 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); |
| 3136 | if (!_new_mapping_cache) | 3179 | if (!_new_mapping_cache) |
| @@ -3145,8 +3188,6 @@ static int __init dm_thin_init(void) | |||
| 3145 | bad_endio_hook_cache: | 3188 | bad_endio_hook_cache: |
| 3146 | kmem_cache_destroy(_new_mapping_cache); | 3189 | kmem_cache_destroy(_new_mapping_cache); |
| 3147 | bad_new_mapping_cache: | 3190 | bad_new_mapping_cache: |
| 3148 | kmem_cache_destroy(_cell_cache); | ||
| 3149 | bad_cell_cache: | ||
| 3150 | dm_unregister_target(&pool_target); | 3191 | dm_unregister_target(&pool_target); |
| 3151 | bad_pool_target: | 3192 | bad_pool_target: |
| 3152 | dm_unregister_target(&thin_target); | 3193 | dm_unregister_target(&thin_target); |
| @@ -3159,7 +3200,7 @@ static void dm_thin_exit(void) | |||
| 3159 | dm_unregister_target(&thin_target); | 3200 | dm_unregister_target(&thin_target); |
| 3160 | dm_unregister_target(&pool_target); | 3201 | dm_unregister_target(&pool_target); |
| 3161 | 3202 | ||
| 3162 | kmem_cache_destroy(_cell_cache); | 3203 | dm_bio_prison_exit(); |
| 3163 | kmem_cache_destroy(_new_mapping_cache); | 3204 | kmem_cache_destroy(_new_mapping_cache); |
| 3164 | kmem_cache_destroy(_endio_hook_cache); | 3205 | kmem_cache_destroy(_endio_hook_cache); |
| 3165 | } | 3206 | } |
