diff options
author | Mike Snitzer <snitzer@redhat.com> | 2013-12-17 13:43:31 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-01-07 10:14:18 -0500 |
commit | 7f214665124401db3d171fd1f9f1ec6552b38b36 (patch) | |
tree | 6ea86aed4531f8cc395f01a51bf88fa4ce05b9ff /drivers/md | |
parent | 10343180f5c4023043e82d46e71048e68f975f50 (diff) |
dm thin: use bool rather than unsigned for flags in structures
Also, move 'err' member in dm_thin_new_mapping structure to eliminate 4
byte hole (reduces size from 88 bytes to 80).
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Joe Thornber <ejt@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-thin-metadata.h | 2 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 22 |
2 files changed, 12 insertions, 12 deletions
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 2edf5dbac76a..9a368567632f 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h | |||
@@ -131,7 +131,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td); | |||
131 | 131 | ||
132 | struct dm_thin_lookup_result { | 132 | struct dm_thin_lookup_result { |
133 | dm_block_t block; | 133 | dm_block_t block; |
134 | unsigned shared:1; | 134 | bool shared:1; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | /* | 137 | /* |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 51e656a3002c..5f1b11e45702 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -509,16 +509,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio, | |||
509 | struct dm_thin_new_mapping { | 509 | struct dm_thin_new_mapping { |
510 | struct list_head list; | 510 | struct list_head list; |
511 | 511 | ||
512 | unsigned quiesced:1; | 512 | bool quiesced:1; |
513 | unsigned prepared:1; | 513 | bool prepared:1; |
514 | unsigned pass_discard:1; | 514 | bool pass_discard:1; |
515 | unsigned definitely_not_shared:1; | 515 | bool definitely_not_shared:1; |
516 | 516 | ||
517 | int err; | ||
517 | struct thin_c *tc; | 518 | struct thin_c *tc; |
518 | dm_block_t virt_block; | 519 | dm_block_t virt_block; |
519 | dm_block_t data_block; | 520 | dm_block_t data_block; |
520 | struct dm_bio_prison_cell *cell, *cell2; | 521 | struct dm_bio_prison_cell *cell, *cell2; |
521 | int err; | ||
522 | 522 | ||
523 | /* | 523 | /* |
524 | * If the bio covers the whole area of a block then we can avoid | 524 | * If the bio covers the whole area of a block then we can avoid |
@@ -549,7 +549,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) | |||
549 | m->err = read_err || write_err ? -EIO : 0; | 549 | m->err = read_err || write_err ? -EIO : 0; |
550 | 550 | ||
551 | spin_lock_irqsave(&pool->lock, flags); | 551 | spin_lock_irqsave(&pool->lock, flags); |
552 | m->prepared = 1; | 552 | m->prepared = true; |
553 | __maybe_add_mapping(m); | 553 | __maybe_add_mapping(m); |
554 | spin_unlock_irqrestore(&pool->lock, flags); | 554 | spin_unlock_irqrestore(&pool->lock, flags); |
555 | } | 555 | } |
@@ -564,7 +564,7 @@ static void overwrite_endio(struct bio *bio, int err) | |||
564 | m->err = err; | 564 | m->err = err; |
565 | 565 | ||
566 | spin_lock_irqsave(&pool->lock, flags); | 566 | spin_lock_irqsave(&pool->lock, flags); |
567 | m->prepared = 1; | 567 | m->prepared = true; |
568 | __maybe_add_mapping(m); | 568 | __maybe_add_mapping(m); |
569 | spin_unlock_irqrestore(&pool->lock, flags); | 569 | spin_unlock_irqrestore(&pool->lock, flags); |
570 | } | 570 | } |
@@ -788,7 +788,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
788 | m->cell = cell; | 788 | m->cell = cell; |
789 | 789 | ||
790 | if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) | 790 | if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) |
791 | m->quiesced = 1; | 791 | m->quiesced = true; |
792 | 792 | ||
793 | /* | 793 | /* |
794 | * IO to pool_dev remaps to the pool target's data_dev. | 794 | * IO to pool_dev remaps to the pool target's data_dev. |
@@ -848,8 +848,8 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, | |||
848 | struct pool *pool = tc->pool; | 848 | struct pool *pool = tc->pool; |
849 | struct dm_thin_new_mapping *m = get_next_mapping(pool); | 849 | struct dm_thin_new_mapping *m = get_next_mapping(pool); |
850 | 850 | ||
851 | m->quiesced = 1; | 851 | m->quiesced = true; |
852 | m->prepared = 0; | 852 | m->prepared = false; |
853 | m->tc = tc; | 853 | m->tc = tc; |
854 | m->virt_block = virt_block; | 854 | m->virt_block = virt_block; |
855 | m->data_block = data_block; | 855 | m->data_block = data_block; |
@@ -2904,7 +2904,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err) | |||
2904 | spin_lock_irqsave(&pool->lock, flags); | 2904 | spin_lock_irqsave(&pool->lock, flags); |
2905 | list_for_each_entry_safe(m, tmp, &work, list) { | 2905 | list_for_each_entry_safe(m, tmp, &work, list) { |
2906 | list_del(&m->list); | 2906 | list_del(&m->list); |
2907 | m->quiesced = 1; | 2907 | m->quiesced = true; |
2908 | __maybe_add_mapping(m); | 2908 | __maybe_add_mapping(m); |
2909 | } | 2909 | } |
2910 | spin_unlock_irqrestore(&pool->lock, flags); | 2910 | spin_unlock_irqrestore(&pool->lock, flags); |