aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-06-03 03:38:06 -0400
committerJens Axboe <axboe@fb.com>2017-06-09 11:27:32 -0400
commit4e4cbee93d56137ebff722be022cae5f70ef84fb (patch)
tree4fa7345155599fc6bdd653fca8c5224ddf90a5be /drivers/md/dm-cache-target.c
parentfc17b6534eb8395f0b3133eb31d87deec32c642b (diff)
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion. Note that device mapper overloaded bi_error with a private value, which we'll have to keep arround at least for now and thus propagate to a proper blk_status_t value. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index c48612e6d525..c5ea03fc7ee1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
119 */ 119 */
120struct continuation { 120struct continuation {
121 struct work_struct ws; 121 struct work_struct ws;
122 int input; 122 blk_status_t input;
123}; 123};
124 124
125static inline void init_continuation(struct continuation *k, 125static inline void init_continuation(struct continuation *k,
@@ -145,7 +145,7 @@ struct batcher {
145 /* 145 /*
146 * The operation that everyone is waiting for. 146 * The operation that everyone is waiting for.
147 */ 147 */
148 int (*commit_op)(void *context); 148 blk_status_t (*commit_op)(void *context);
149 void *commit_context; 149 void *commit_context;
150 150
151 /* 151 /*
@@ -171,8 +171,7 @@ struct batcher {
171static void __commit(struct work_struct *_ws) 171static void __commit(struct work_struct *_ws)
172{ 172{
173 struct batcher *b = container_of(_ws, struct batcher, commit_work); 173 struct batcher *b = container_of(_ws, struct batcher, commit_work);
174 174 blk_status_t r;
175 int r;
176 unsigned long flags; 175 unsigned long flags;
177 struct list_head work_items; 176 struct list_head work_items;
178 struct work_struct *ws, *tmp; 177 struct work_struct *ws, *tmp;
@@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws)
205 204
206 while ((bio = bio_list_pop(&bios))) { 205 while ((bio = bio_list_pop(&bios))) {
207 if (r) { 206 if (r) {
208 bio->bi_error = r; 207 bio->bi_status = r;
209 bio_endio(bio); 208 bio_endio(bio);
210 } else 209 } else
211 b->issue_op(bio, b->issue_context); 210 b->issue_op(bio, b->issue_context);
@@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws)
213} 212}
214 213
215static void batcher_init(struct batcher *b, 214static void batcher_init(struct batcher *b,
216 int (*commit_op)(void *), 215 blk_status_t (*commit_op)(void *),
217 void *commit_context, 216 void *commit_context,
218 void (*issue_op)(struct bio *bio, void *), 217 void (*issue_op)(struct bio *bio, void *),
219 void *issue_context, 218 void *issue_context,
@@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio)
955 954
956 dm_unhook_bio(&pb->hook_info, bio); 955 dm_unhook_bio(&pb->hook_info, bio);
957 956
958 if (bio->bi_error) { 957 if (bio->bi_status) {
959 bio_endio(bio); 958 bio_endio(bio);
960 return; 959 return;
961 } 960 }
@@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
1220 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); 1219 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1221 1220
1222 if (read_err || write_err) 1221 if (read_err || write_err)
1223 mg->k.input = -EIO; 1222 mg->k.input = BLK_STS_IOERR;
1224 1223
1225 queue_continuation(mg->cache->wq, &mg->k); 1224 queue_continuation(mg->cache->wq, &mg->k);
1226} 1225}
@@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio)
1266 1265
1267 dm_unhook_bio(&pb->hook_info, bio); 1266 dm_unhook_bio(&pb->hook_info, bio);
1268 1267
1269 if (bio->bi_error) 1268 if (bio->bi_status)
1270 mg->k.input = bio->bi_error; 1269 mg->k.input = bio->bi_status;
1271 1270
1272 queue_continuation(mg->cache->wq, &mg->k); 1271 queue_continuation(mg->cache->wq, &mg->k);
1273} 1272}
@@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
1323 if (mg->overwrite_bio) { 1322 if (mg->overwrite_bio) {
1324 if (success) 1323 if (success)
1325 force_set_dirty(cache, cblock); 1324 force_set_dirty(cache, cblock);
1325 else if (mg->k.input)
1326 mg->overwrite_bio->bi_status = mg->k.input;
1326 else 1327 else
1327 mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO); 1328 mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1328 bio_endio(mg->overwrite_bio); 1329 bio_endio(mg->overwrite_bio);
1329 } else { 1330 } else {
1330 if (success) 1331 if (success)
@@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws)
1504 r = copy(mg, is_policy_promote); 1505 r = copy(mg, is_policy_promote);
1505 if (r) { 1506 if (r) {
1506 DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); 1507 DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
1507 mg->k.input = -EIO; 1508 mg->k.input = BLK_STS_IOERR;
1508 mg_complete(mg, false); 1509 mg_complete(mg, false);
1509 } 1510 }
1510 } 1511 }
@@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown)
1907/* 1908/*
1908 * Used by the batcher. 1909 * Used by the batcher.
1909 */ 1910 */
1910static int commit_op(void *context) 1911static blk_status_t commit_op(void *context)
1911{ 1912{
1912 struct cache *cache = context; 1913 struct cache *cache = context;
1913 1914
1914 if (dm_cache_changed_this_transaction(cache->cmd)) 1915 if (dm_cache_changed_this_transaction(cache->cmd))
1915 return commit(cache, false); 1916 return errno_to_blk_status(commit(cache, false));
1916 1917
1917 return 0; 1918 return 0;
1918} 1919}
@@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache)
2018 bio_list_init(&cache->deferred_bios); 2019 bio_list_init(&cache->deferred_bios);
2019 2020
2020 while ((bio = bio_list_pop(&bios))) { 2021 while ((bio = bio_list_pop(&bios))) {
2021 bio->bi_error = DM_ENDIO_REQUEUE; 2022 bio->bi_status = BLK_STS_DM_REQUEUE;
2022 bio_endio(bio); 2023 bio_endio(bio);
2023 } 2024 }
2024} 2025}
@@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2820 return r; 2821 return r;
2821} 2822}
2822 2823
2823static int cache_end_io(struct dm_target *ti, struct bio *bio, int *error) 2824static int cache_end_io(struct dm_target *ti, struct bio *bio,
2825 blk_status_t *error)
2824{ 2826{
2825 struct cache *cache = ti->private; 2827 struct cache *cache = ti->private;
2826 unsigned long flags; 2828 unsigned long flags;