diff options
author | Christoph Hellwig <hch@lst.de> | 2016-06-09 10:00:36 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-09 11:52:25 -0400 |
commit | 288dab8a35a0bde426a09870943c8d3ee3a50dab (patch) | |
tree | 483fd3eb60ff8f44d149fb28d3b79e5212645104 /block | |
parent | 56332f02a562390a3198525ad78cb4f558805c0f (diff) |
block: add a separate operation type for secure erase
Instead of overloading the discard support with the REQ_SECURE flag.
Use the opportunity to rename the queue flag as well, and remove the
dead checks for this flag in the RAID 1 and RAID 10 drivers that don't
claim support for secure erase.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 27 | ||||
-rw-r--r-- | block/blk-lib.c | 25 | ||||
-rw-r--r-- | block/blk-merge.c | 6 |
3 files changed, 33 insertions, 25 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 32a283eb7274..db31a2981223 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1977,16 +1977,21 @@ generic_make_request_checks(struct bio *bio) | |||
1977 | } | 1977 | } |
1978 | } | 1978 | } |
1979 | 1979 | ||
1980 | if ((bio_op(bio) == REQ_OP_DISCARD) && | 1980 | switch (bio_op(bio)) { |
1981 | (!blk_queue_discard(q) || | 1981 | case REQ_OP_DISCARD: |
1982 | ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { | 1982 | if (!blk_queue_discard(q)) |
1983 | err = -EOPNOTSUPP; | 1983 | goto not_supported; |
1984 | goto end_io; | 1984 | break; |
1985 | } | 1985 | case REQ_OP_SECURE_ERASE: |
1986 | 1986 | if (!blk_queue_secure_erase(q)) | |
1987 | if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { | 1987 | goto not_supported; |
1988 | err = -EOPNOTSUPP; | 1988 | break; |
1989 | goto end_io; | 1989 | case REQ_OP_WRITE_SAME: |
1990 | if (!bdev_write_same(bio->bi_bdev)) | ||
1991 | goto not_supported; | ||
1992 | break; | ||
1993 | default: | ||
1994 | break; | ||
1990 | } | 1995 | } |
1991 | 1996 | ||
1992 | /* | 1997 | /* |
@@ -2003,6 +2008,8 @@ generic_make_request_checks(struct bio *bio) | |||
2003 | trace_block_bio_queue(q, bio); | 2008 | trace_block_bio_queue(q, bio); |
2004 | return true; | 2009 | return true; |
2005 | 2010 | ||
2011 | not_supported: | ||
2012 | err = -EOPNOTSUPP; | ||
2006 | end_io: | 2013 | end_io: |
2007 | bio->bi_error = err; | 2014 | bio->bi_error = err; |
2008 | bio_endio(bio); | 2015 | bio_endio(bio); |
diff --git a/block/blk-lib.c b/block/blk-lib.c index ff2a7f04af4d..78626c2fde33 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -23,20 +23,27 @@ static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, | |||
23 | } | 23 | } |
24 | 24 | ||
25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
26 | sector_t nr_sects, gfp_t gfp_mask, int op_flags, | 26 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
27 | struct bio **biop) | 27 | struct bio **biop) |
28 | { | 28 | { |
29 | struct request_queue *q = bdev_get_queue(bdev); | 29 | struct request_queue *q = bdev_get_queue(bdev); |
30 | struct bio *bio = *biop; | 30 | struct bio *bio = *biop; |
31 | unsigned int granularity; | 31 | unsigned int granularity; |
32 | enum req_op op; | ||
32 | int alignment; | 33 | int alignment; |
33 | 34 | ||
34 | if (!q) | 35 | if (!q) |
35 | return -ENXIO; | 36 | return -ENXIO; |
36 | if (!blk_queue_discard(q)) | 37 | |
37 | return -EOPNOTSUPP; | 38 | if (flags & BLKDEV_DISCARD_SECURE) { |
38 | if ((op_flags & REQ_SECURE) && !blk_queue_secdiscard(q)) | 39 | if (!blk_queue_secure_erase(q)) |
39 | return -EOPNOTSUPP; | 40 | return -EOPNOTSUPP; |
41 | op = REQ_OP_SECURE_ERASE; | ||
42 | } else { | ||
43 | if (!blk_queue_discard(q)) | ||
44 | return -EOPNOTSUPP; | ||
45 | op = REQ_OP_DISCARD; | ||
46 | } | ||
40 | 47 | ||
41 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | 48 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
42 | granularity = max(q->limits.discard_granularity >> 9, 1U); | 49 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
@@ -66,7 +73,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
66 | bio = next_bio(bio, 1, gfp_mask); | 73 | bio = next_bio(bio, 1, gfp_mask); |
67 | bio->bi_iter.bi_sector = sector; | 74 | bio->bi_iter.bi_sector = sector; |
68 | bio->bi_bdev = bdev; | 75 | bio->bi_bdev = bdev; |
69 | bio_set_op_attrs(bio, REQ_OP_DISCARD, op_flags); | 76 | bio_set_op_attrs(bio, op, 0); |
70 | 77 | ||
71 | bio->bi_iter.bi_size = req_sects << 9; | 78 | bio->bi_iter.bi_size = req_sects << 9; |
72 | nr_sects -= req_sects; | 79 | nr_sects -= req_sects; |
@@ -100,16 +107,12 @@ EXPORT_SYMBOL(__blkdev_issue_discard); | |||
100 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 107 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
101 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | 108 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
102 | { | 109 | { |
103 | int op_flags = 0; | ||
104 | struct bio *bio = NULL; | 110 | struct bio *bio = NULL; |
105 | struct blk_plug plug; | 111 | struct blk_plug plug; |
106 | int ret; | 112 | int ret; |
107 | 113 | ||
108 | if (flags & BLKDEV_DISCARD_SECURE) | ||
109 | op_flags |= REQ_SECURE; | ||
110 | |||
111 | blk_start_plug(&plug); | 114 | blk_start_plug(&plug); |
112 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags, | 115 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
113 | &bio); | 116 | &bio); |
114 | if (!ret && bio) { | 117 | if (!ret && bio) { |
115 | ret = submit_bio_wait(bio); | 118 | ret = submit_bio_wait(bio); |
diff --git a/block/blk-merge.c b/block/blk-merge.c index c265348b75d1..9772308a8391 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -649,8 +649,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
649 | if (!rq_mergeable(req) || !rq_mergeable(next)) | 649 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
650 | return 0; | 650 | return 0; |
651 | 651 | ||
652 | if (!blk_check_merge_flags(req->cmd_flags, req_op(req), next->cmd_flags, | 652 | if (req_op(req) != req_op(next)) |
653 | req_op(next))) | ||
654 | return 0; | 653 | return 0; |
655 | 654 | ||
656 | /* | 655 | /* |
@@ -752,8 +751,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
752 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) | 751 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
753 | return false; | 752 | return false; |
754 | 753 | ||
755 | if (!blk_check_merge_flags(rq->cmd_flags, req_op(rq), bio->bi_rw, | 754 | if (req_op(rq) != bio_op(bio)) |
756 | bio_op(bio))) | ||
757 | return false; | 755 | return false; |
758 | 756 | ||
759 | /* different data direction or already started, don't merge */ | 757 | /* different data direction or already started, don't merge */ |