summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2018-07-31 17:27:02 -0400
committerMike Snitzer <snitzer@redhat.com>2018-07-31 17:33:21 -0400
commit7209049d40dc37791ce0f3738965296f30e26044 (patch)
tree1e469739cfedec594032343f889bea5dab3fffb2
parent63c8ecb6261abcb79191a264778e8dae222e67cf (diff)
dm kcopyd: return void from dm_kcopyd_copy()
dm_kcopyd_copy() only ever returns 0 so there is no need for callers to account for possible failure. Same goes for dm_kcopyd_zero(). Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-target.c16
-rw-r--r--drivers/md/dm-kcopyd.c16
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-thin.c23
-rw-r--r--drivers/md/dm-zoned-reclaim.c6
-rw-r--r--include/linux/dm-kcopyd.h12
6 files changed, 27 insertions, 63 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 44df244807e5..a53413371725 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1188,9 +1188,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
1188 queue_continuation(mg->cache->wq, &mg->k); 1188 queue_continuation(mg->cache->wq, &mg->k);
1189} 1189}
1190 1190
1191static int copy(struct dm_cache_migration *mg, bool promote) 1191static void copy(struct dm_cache_migration *mg, bool promote)
1192{ 1192{
1193 int r;
1194 struct dm_io_region o_region, c_region; 1193 struct dm_io_region o_region, c_region;
1195 struct cache *cache = mg->cache; 1194 struct cache *cache = mg->cache;
1196 1195
@@ -1203,11 +1202,9 @@ static int copy(struct dm_cache_migration *mg, bool promote)
1203 c_region.count = cache->sectors_per_block; 1202 c_region.count = cache->sectors_per_block;
1204 1203
1205 if (promote) 1204 if (promote)
1206 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k); 1205 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1207 else 1206 else
1208 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k); 1207 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1209
1210 return r;
1211} 1208}
1212 1209
1213static void bio_drop_shared_lock(struct cache *cache, struct bio *bio) 1210static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
@@ -1449,12 +1446,7 @@ static void mg_full_copy(struct work_struct *ws)
1449 } 1446 }
1450 1447
1451 init_continuation(&mg->k, mg_upgrade_lock); 1448 init_continuation(&mg->k, mg_upgrade_lock);
1452 1449 copy(mg, is_policy_promote);
1453 if (copy(mg, is_policy_promote)) {
1454 DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
1455 mg->k.input = BLK_STS_IOERR;
1456 mg_complete(mg, false);
1457 }
1458} 1450}
1459 1451
1460static void mg_copy(struct work_struct *ws) 1452static void mg_copy(struct work_struct *ws)
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 3c7547a3c371..cc101f3ec42c 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -741,9 +741,9 @@ static void split_job(struct kcopyd_job *master_job)
741 } 741 }
742} 742}
743 743
744int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, 744void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
745 unsigned int num_dests, struct dm_io_region *dests, 745 unsigned int num_dests, struct dm_io_region *dests,
746 unsigned int flags, dm_kcopyd_notify_fn fn, void *context) 746 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
747{ 747{
748 struct kcopyd_job *job; 748 struct kcopyd_job *job;
749 int i; 749 int i;
@@ -818,16 +818,14 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
818 job->progress = 0; 818 job->progress = 0;
819 split_job(job); 819 split_job(job);
820 } 820 }
821
822 return 0;
823} 821}
824EXPORT_SYMBOL(dm_kcopyd_copy); 822EXPORT_SYMBOL(dm_kcopyd_copy);
825 823
826int dm_kcopyd_zero(struct dm_kcopyd_client *kc, 824void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
827 unsigned num_dests, struct dm_io_region *dests, 825 unsigned num_dests, struct dm_io_region *dests,
828 unsigned flags, dm_kcopyd_notify_fn fn, void *context) 826 unsigned flags, dm_kcopyd_notify_fn fn, void *context)
829{ 827{
830 return dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context); 828 dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
831} 829}
832EXPORT_SYMBOL(dm_kcopyd_zero); 830EXPORT_SYMBOL(dm_kcopyd_zero);
833 831
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 5903e492bb34..79eab1071ec2 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -326,9 +326,8 @@ static void recovery_complete(int read_err, unsigned long write_err,
326 dm_rh_recovery_end(reg, !(read_err || write_err)); 326 dm_rh_recovery_end(reg, !(read_err || write_err));
327} 327}
328 328
329static int recover(struct mirror_set *ms, struct dm_region *reg) 329static void recover(struct mirror_set *ms, struct dm_region *reg)
330{ 330{
331 int r;
332 unsigned i; 331 unsigned i;
333 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; 332 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
334 struct mirror *m; 333 struct mirror *m;
@@ -367,10 +366,8 @@ static int recover(struct mirror_set *ms, struct dm_region *reg)
367 if (!errors_handled(ms)) 366 if (!errors_handled(ms))
368 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); 367 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
369 368
370 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, 369 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
371 flags, recovery_complete, reg); 370 flags, recovery_complete, reg);
372
373 return r;
374} 371}
375 372
376static void reset_ms_flags(struct mirror_set *ms) 373static void reset_ms_flags(struct mirror_set *ms)
@@ -388,7 +385,6 @@ static void do_recovery(struct mirror_set *ms)
388{ 385{
389 struct dm_region *reg; 386 struct dm_region *reg;
390 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 387 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
391 int r;
392 388
393 /* 389 /*
394 * Start quiescing some regions. 390 * Start quiescing some regions.
@@ -398,11 +394,8 @@ static void do_recovery(struct mirror_set *ms)
398 /* 394 /*
399 * Copy any already quiesced regions. 395 * Copy any already quiesced regions.
400 */ 396 */
401 while ((reg = dm_rh_recovery_start(ms->rh))) { 397 while ((reg = dm_rh_recovery_start(ms->rh)))
402 r = recover(ms, reg); 398 recover(ms, reg);
403 if (r)
404 dm_rh_recovery_end(reg, 0);
405 }
406 399
407 /* 400 /*
408 * Update the in sync flag. 401 * Update the in sync flag.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c44477d7a9ea..5997d6808b57 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1220,18 +1220,13 @@ static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1220static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, 1220static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1221 sector_t begin, sector_t end) 1221 sector_t begin, sector_t end)
1222{ 1222{
1223 int r;
1224 struct dm_io_region to; 1223 struct dm_io_region to;
1225 1224
1226 to.bdev = tc->pool_dev->bdev; 1225 to.bdev = tc->pool_dev->bdev;
1227 to.sector = begin; 1226 to.sector = begin;
1228 to.count = end - begin; 1227 to.count = end - begin;
1229 1228
1230 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); 1229 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1231 if (r < 0) {
1232 DMERR_LIMIT("dm_kcopyd_zero() failed");
1233 copy_complete(1, 1, m);
1234 }
1235} 1230}
1236 1231
1237static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, 1232static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
@@ -1257,7 +1252,6 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1257 struct dm_bio_prison_cell *cell, struct bio *bio, 1252 struct dm_bio_prison_cell *cell, struct bio *bio,
1258 sector_t len) 1253 sector_t len)
1259{ 1254{
1260 int r;
1261 struct pool *pool = tc->pool; 1255 struct pool *pool = tc->pool;
1262 struct dm_thin_new_mapping *m = get_next_mapping(pool); 1256 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1263 1257
@@ -1296,19 +1290,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1296 to.sector = data_dest * pool->sectors_per_block; 1290 to.sector = data_dest * pool->sectors_per_block;
1297 to.count = len; 1291 to.count = len;
1298 1292
1299 r = dm_kcopyd_copy(pool->copier, &from, 1, &to, 1293 dm_kcopyd_copy(pool->copier, &from, 1, &to,
1300 0, copy_complete, m); 1294 0, copy_complete, m);
1301 if (r < 0) {
1302 DMERR_LIMIT("dm_kcopyd_copy() failed");
1303 copy_complete(1, 1, m);
1304
1305 /*
1306 * We allow the zero to be issued, to simplify the
1307 * error path. Otherwise we'd need to start
1308 * worrying about decrementing the prepare_actions
1309 * counter.
1310 */
1311 }
1312 1295
1313 /* 1296 /*
1314 * Do we need to zero a tail region? 1297 * Do we need to zero a tail region?
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 44a119e12f1a..edf4b95eb075 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -161,10 +161,8 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
161 161
162 /* Copy the valid region */ 162 /* Copy the valid region */
163 set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags); 163 set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
164 ret = dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags, 164 dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
165 dmz_reclaim_kcopy_end, zrc); 165 dmz_reclaim_kcopy_end, zrc);
166 if (ret)
167 return ret;
168 166
169 /* Wait for copy to complete */ 167 /* Wait for copy to complete */
170 wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY, 168 wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index cfac8588ed56..e42de7750c88 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -62,9 +62,9 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
62typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err, 62typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
63 void *context); 63 void *context);
64 64
65int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, 65void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
66 unsigned num_dests, struct dm_io_region *dests, 66 unsigned num_dests, struct dm_io_region *dests,
67 unsigned flags, dm_kcopyd_notify_fn fn, void *context); 67 unsigned flags, dm_kcopyd_notify_fn fn, void *context);
68 68
69/* 69/*
70 * Prepare a callback and submit it via the kcopyd thread. 70 * Prepare a callback and submit it via the kcopyd thread.
@@ -81,9 +81,9 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
81 dm_kcopyd_notify_fn fn, void *context); 81 dm_kcopyd_notify_fn fn, void *context);
82void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err); 82void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
83 83
84int dm_kcopyd_zero(struct dm_kcopyd_client *kc, 84void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
85 unsigned num_dests, struct dm_io_region *dests, 85 unsigned num_dests, struct dm_io_region *dests,
86 unsigned flags, dm_kcopyd_notify_fn fn, void *context); 86 unsigned flags, dm_kcopyd_notify_fn fn, void *context);
87 87
88#endif /* __KERNEL__ */ 88#endif /* __KERNEL__ */
89#endif /* _LINUX_DM_KCOPYD_H */ 89#endif /* _LINUX_DM_KCOPYD_H */