aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 51e656a3002c..5f1b11e45702 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -509,16 +509,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
509struct dm_thin_new_mapping { 509struct dm_thin_new_mapping {
510 struct list_head list; 510 struct list_head list;
511 511
512 unsigned quiesced:1; 512 bool quiesced:1;
513 unsigned prepared:1; 513 bool prepared:1;
514 unsigned pass_discard:1; 514 bool pass_discard:1;
515 unsigned definitely_not_shared:1; 515 bool definitely_not_shared:1;
516 516
517 int err;
517 struct thin_c *tc; 518 struct thin_c *tc;
518 dm_block_t virt_block; 519 dm_block_t virt_block;
519 dm_block_t data_block; 520 dm_block_t data_block;
520 struct dm_bio_prison_cell *cell, *cell2; 521 struct dm_bio_prison_cell *cell, *cell2;
521 int err;
522 522
523 /* 523 /*
524 * If the bio covers the whole area of a block then we can avoid 524 * If the bio covers the whole area of a block then we can avoid
@@ -549,7 +549,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
549 m->err = read_err || write_err ? -EIO : 0; 549 m->err = read_err || write_err ? -EIO : 0;
550 550
551 spin_lock_irqsave(&pool->lock, flags); 551 spin_lock_irqsave(&pool->lock, flags);
552 m->prepared = 1; 552 m->prepared = true;
553 __maybe_add_mapping(m); 553 __maybe_add_mapping(m);
554 spin_unlock_irqrestore(&pool->lock, flags); 554 spin_unlock_irqrestore(&pool->lock, flags);
555} 555}
@@ -564,7 +564,7 @@ static void overwrite_endio(struct bio *bio, int err)
564 m->err = err; 564 m->err = err;
565 565
566 spin_lock_irqsave(&pool->lock, flags); 566 spin_lock_irqsave(&pool->lock, flags);
567 m->prepared = 1; 567 m->prepared = true;
568 __maybe_add_mapping(m); 568 __maybe_add_mapping(m);
569 spin_unlock_irqrestore(&pool->lock, flags); 569 spin_unlock_irqrestore(&pool->lock, flags);
570} 570}
@@ -788,7 +788,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
788 m->cell = cell; 788 m->cell = cell;
789 789
790 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) 790 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
791 m->quiesced = 1; 791 m->quiesced = true;
792 792
793 /* 793 /*
794 * IO to pool_dev remaps to the pool target's data_dev. 794 * IO to pool_dev remaps to the pool target's data_dev.
@@ -848,8 +848,8 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
848 struct pool *pool = tc->pool; 848 struct pool *pool = tc->pool;
849 struct dm_thin_new_mapping *m = get_next_mapping(pool); 849 struct dm_thin_new_mapping *m = get_next_mapping(pool);
850 850
851 m->quiesced = 1; 851 m->quiesced = true;
852 m->prepared = 0; 852 m->prepared = false;
853 m->tc = tc; 853 m->tc = tc;
854 m->virt_block = virt_block; 854 m->virt_block = virt_block;
855 m->data_block = data_block; 855 m->data_block = data_block;
@@ -2904,7 +2904,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2904 spin_lock_irqsave(&pool->lock, flags); 2904 spin_lock_irqsave(&pool->lock, flags);
2905 list_for_each_entry_safe(m, tmp, &work, list) { 2905 list_for_each_entry_safe(m, tmp, &work, list) {
2906 list_del(&m->list); 2906 list_del(&m->list);
2907 m->quiesced = 1; 2907 m->quiesced = true;
2908 __maybe_add_mapping(m); 2908 __maybe_add_mapping(m);
2909 } 2909 }
2910 spin_unlock_irqrestore(&pool->lock, flags); 2910 spin_unlock_irqrestore(&pool->lock, flags);