diff options
| author | Christoph Hellwig <hch@lst.de> | 2016-06-09 10:00:36 -0400 |
|---|---|---|
| committer | Jens Axboe <axboe@fb.com> | 2016-06-09 11:52:25 -0400 |
| commit | 288dab8a35a0bde426a09870943c8d3ee3a50dab (patch) | |
| tree | 483fd3eb60ff8f44d149fb28d3b79e5212645104 /include/linux/blkdev.h | |
| parent | 56332f02a562390a3198525ad78cb4f558805c0f (diff) | |
block: add a separate operation type for secure erase
Instead of overloading the discard support with the REQ_SECURE flag.
Use the opportunity to rename the queue flag as well, and remove the
dead checks for this flag in the RAID 1 and RAID 10 drivers that don't
claim support for secure erase.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 23 |
1 files changed, 4 insertions, 19 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0c9f8793c87e..53fee6123893 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -497,7 +497,7 @@ struct request_queue { | |||
| 497 | #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ | 497 | #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ |
| 498 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ | 498 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
| 499 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ | 499 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
| 500 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ | 500 | #define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ |
| 501 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ | 501 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ |
| 502 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | 502 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ |
| 503 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ | 503 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ |
| @@ -593,8 +593,8 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 593 | #define blk_queue_stackable(q) \ | 593 | #define blk_queue_stackable(q) \ |
| 594 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 594 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
| 595 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 595 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
| 596 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ | 596 | #define blk_queue_secure_erase(q) \ |
| 597 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | 597 | (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) |
| 598 | 598 | ||
| 599 | #define blk_noretry_request(rq) \ | 599 | #define blk_noretry_request(rq) \ |
| 600 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | 600 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ |
| @@ -675,21 +675,6 @@ static inline bool rq_mergeable(struct request *rq) | |||
| 675 | return true; | 675 | return true; |
| 676 | } | 676 | } |
| 677 | 677 | ||
| 678 | static inline bool blk_check_merge_flags(unsigned int flags1, unsigned int op1, | ||
| 679 | unsigned int flags2, unsigned int op2) | ||
| 680 | { | ||
| 681 | if ((op1 == REQ_OP_DISCARD) != (op2 == REQ_OP_DISCARD)) | ||
| 682 | return false; | ||
| 683 | |||
| 684 | if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) | ||
| 685 | return false; | ||
| 686 | |||
| 687 | if ((op1 == REQ_OP_WRITE_SAME) != (op2 == REQ_OP_WRITE_SAME)) | ||
| 688 | return false; | ||
| 689 | |||
| 690 | return true; | ||
| 691 | } | ||
| 692 | |||
| 693 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) | 678 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) |
| 694 | { | 679 | { |
| 695 | if (bio_data(a) == bio_data(b)) | 680 | if (bio_data(a) == bio_data(b)) |
| @@ -1158,7 +1143,7 @@ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | |||
| 1158 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 1143 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 1159 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 1144 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
| 1160 | extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 1145 | extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 1161 | sector_t nr_sects, gfp_t gfp_mask, int op_flags, | 1146 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
| 1162 | struct bio **biop); | 1147 | struct bio **biop); |
| 1163 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | 1148 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 1164 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); | 1149 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); |
