diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-05-02 20:16:21 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-05-13 09:03:52 -0400 |
commit | 3dba53a958a758fe7bed5002f6a2846e1acefe8e (patch) | |
tree | 25f8fc0555390b619cdc92f18c03e3bd556aa81c /drivers/md/dm-thin.c | |
parent | 13e4f8a695aa1dc7c94525047fc2ffb9abc8125e (diff) |
dm thin: use __blkdev_issue_discard for async discard support
With commit 38f25255330 ("block: add __blkdev_issue_discard") DM thinp
no longer needs to carry its own async discard method.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Joe Thornber <ejt@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 70 |
1 files changed, 16 insertions, 54 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index da42c4916ce6..598a78ba894b 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -322,56 +322,6 @@ struct thin_c { | |||
322 | 322 | ||
323 | /*----------------------------------------------------------------*/ | 323 | /*----------------------------------------------------------------*/ |
324 | 324 | ||
325 | /** | ||
326 | * __blkdev_issue_discard_async - queue a discard with async completion | ||
327 | * @bdev: blockdev to issue discard for | ||
328 | * @sector: start sector | ||
329 | * @nr_sects: number of sectors to discard | ||
330 | * @gfp_mask: memory allocation flags (for bio_alloc) | ||
331 | * @flags: BLKDEV_IFL_* flags to control behaviour | ||
332 | * @parent_bio: parent discard bio that all sub discards get chained to | ||
333 | * | ||
334 | * Description: | ||
335 | * Asynchronously issue a discard request for the sectors in question. | ||
336 | */ | ||
337 | static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector, | ||
338 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags, | ||
339 | struct bio *parent_bio) | ||
340 | { | ||
341 | struct request_queue *q = bdev_get_queue(bdev); | ||
342 | int type = REQ_WRITE | REQ_DISCARD; | ||
343 | struct bio *bio; | ||
344 | |||
345 | if (!q || !nr_sects) | ||
346 | return -ENXIO; | ||
347 | |||
348 | if (!blk_queue_discard(q)) | ||
349 | return -EOPNOTSUPP; | ||
350 | |||
351 | if (flags & BLKDEV_DISCARD_SECURE) { | ||
352 | if (!blk_queue_secdiscard(q)) | ||
353 | return -EOPNOTSUPP; | ||
354 | type |= REQ_SECURE; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Required bio_put occurs in bio_endio thanks to bio_chain below | ||
359 | */ | ||
360 | bio = bio_alloc(gfp_mask, 1); | ||
361 | if (!bio) | ||
362 | return -ENOMEM; | ||
363 | |||
364 | bio_chain(bio, parent_bio); | ||
365 | |||
366 | bio->bi_iter.bi_sector = sector; | ||
367 | bio->bi_bdev = bdev; | ||
368 | bio->bi_iter.bi_size = nr_sects << 9; | ||
369 | |||
370 | submit_bio(type, bio); | ||
371 | |||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static bool block_size_is_power_of_two(struct pool *pool) | 325 | static bool block_size_is_power_of_two(struct pool *pool) |
376 | { | 326 | { |
377 | return pool->sectors_per_block_shift >= 0; | 327 | return pool->sectors_per_block_shift >= 0; |
@@ -387,11 +337,23 @@ static sector_t block_to_sectors(struct pool *pool, dm_block_t b) | |||
387 | static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e, | 337 | static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e, |
388 | struct bio *parent_bio) | 338 | struct bio *parent_bio) |
389 | { | 339 | { |
340 | int type = REQ_WRITE | REQ_DISCARD; | ||
390 | sector_t s = block_to_sectors(tc->pool, data_b); | 341 | sector_t s = block_to_sectors(tc->pool, data_b); |
391 | sector_t len = block_to_sectors(tc->pool, data_e - data_b); | 342 | sector_t len = block_to_sectors(tc->pool, data_e - data_b); |
343 | struct bio *bio = NULL; | ||
344 | struct blk_plug plug; | ||
345 | int ret; | ||
346 | |||
347 | blk_start_plug(&plug); | ||
348 | ret = __blkdev_issue_discard(tc->pool_dev->bdev, s, len, | ||
349 | GFP_NOWAIT, type, &bio); | ||
350 | if (!ret && bio) { | ||
351 | bio_chain(bio, parent_bio); | ||
352 | submit_bio(type, bio); | ||
353 | } | ||
354 | blk_finish_plug(&plug); | ||
392 | 355 | ||
393 | return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len, | 356 | return ret; |
394 | GFP_NOWAIT, 0, parent_bio); | ||
395 | } | 357 | } |
396 | 358 | ||
397 | /*----------------------------------------------------------------*/ | 359 | /*----------------------------------------------------------------*/ |
@@ -1543,11 +1505,11 @@ static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t | |||
1543 | 1505 | ||
1544 | /* | 1506 | /* |
1545 | * The parent bio must not complete before sub discard bios are | 1507 | * The parent bio must not complete before sub discard bios are |
1546 | * chained to it (see __blkdev_issue_discard_async's bio_chain)! | 1508 | * chained to it (see issue_discard's bio_chain)! |
1547 | * | 1509 | * |
1548 | * This per-mapping bi_remaining increment is paired with | 1510 | * This per-mapping bi_remaining increment is paired with |
1549 | * the implicit decrement that occurs via bio_endio() in | 1511 | * the implicit decrement that occurs via bio_endio() in |
1550 | * process_prepared_discard_{passdown,no_passdown}. | 1512 | * process_prepared_discard_passdown(). |
1551 | */ | 1513 | */ |
1552 | bio_inc_remaining(bio); | 1514 | bio_inc_remaining(bio); |
1553 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) | 1515 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) |