aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2013-12-17 13:19:11 -0500
committerMike Snitzer <snitzer@redhat.com>2014-01-07 10:10:03 -0500
commit16961b042db8cc5cf75d782b4255193ad56e1d4f (patch)
tree2e92062b1e8ed6ac4549707f6ce686ab19d36e1e /drivers/md/dm-thin.c
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
dm thin: initialize dm_thin_new_mapping returned by get_next_mapping
As additional members are added to the dm_thin_new_mapping structure care should be taken to make sure they get initialized before use. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Joe Thornber <ejt@redhat.com> Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ee29037ffc2e..da65febdb6c4 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -751,13 +751,17 @@ static int ensure_next_mapping(struct pool *pool)
751 751
752static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) 752static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
753{ 753{
754 struct dm_thin_new_mapping *r = pool->next_mapping; 754 struct dm_thin_new_mapping *m = pool->next_mapping;
755 755
756 BUG_ON(!pool->next_mapping); 756 BUG_ON(!pool->next_mapping);
757 757
758 memset(m, 0, sizeof(struct dm_thin_new_mapping));
759 INIT_LIST_HEAD(&m->list);
760 m->bio = NULL;
761
758 pool->next_mapping = NULL; 762 pool->next_mapping = NULL;
759 763
760 return r; 764 return m;
761} 765}
762 766
763static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 767static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
@@ -769,15 +773,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
769 struct pool *pool = tc->pool; 773 struct pool *pool = tc->pool;
770 struct dm_thin_new_mapping *m = get_next_mapping(pool); 774 struct dm_thin_new_mapping *m = get_next_mapping(pool);
771 775
772 INIT_LIST_HEAD(&m->list);
773 m->quiesced = 0;
774 m->prepared = 0;
775 m->tc = tc; 776 m->tc = tc;
776 m->virt_block = virt_block; 777 m->virt_block = virt_block;
777 m->data_block = data_dest; 778 m->data_block = data_dest;
778 m->cell = cell; 779 m->cell = cell;
779 m->err = 0;
780 m->bio = NULL;
781 780
782 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) 781 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
783 m->quiesced = 1; 782 m->quiesced = 1;
@@ -840,15 +839,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
840 struct pool *pool = tc->pool; 839 struct pool *pool = tc->pool;
841 struct dm_thin_new_mapping *m = get_next_mapping(pool); 840 struct dm_thin_new_mapping *m = get_next_mapping(pool);
842 841
843 INIT_LIST_HEAD(&m->list);
844 m->quiesced = 1; 842 m->quiesced = 1;
845 m->prepared = 0; 843 m->prepared = 0;
846 m->tc = tc; 844 m->tc = tc;
847 m->virt_block = virt_block; 845 m->virt_block = virt_block;
848 m->data_block = data_block; 846 m->data_block = data_block;
849 m->cell = cell; 847 m->cell = cell;
850 m->err = 0;
851 m->bio = NULL;
852 848
853 /* 849 /*
854 * If the whole block of data is being overwritten or we are not 850 * If the whole block of data is being overwritten or we are not
@@ -1045,7 +1041,6 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1045 m->data_block = lookup_result.block; 1041 m->data_block = lookup_result.block;
1046 m->cell = cell; 1042 m->cell = cell;
1047 m->cell2 = cell2; 1043 m->cell2 = cell2;
1048 m->err = 0;
1049 m->bio = bio; 1044 m->bio = bio;
1050 1045
1051 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { 1046 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {