aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-06-09 10:00:36 -0400
committerJens Axboe <axboe@fb.com>2016-06-09 11:52:25 -0400
commit288dab8a35a0bde426a09870943c8d3ee3a50dab (patch)
tree483fd3eb60ff8f44d149fb28d3b79e5212645104 /include/linux
parent56332f02a562390a3198525ad78cb4f558805c0f (diff)
block: add a separate operation type for secure erase
Instead of overloading the discard support with the REQ_SECURE flag. Use the opportunity to rename the queue flag as well, and remove the dead checks for this flag in the RAID 1 and RAID 10 drivers that don't claim support for secure erase. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/blkdev.h23
2 files changed, 6 insertions, 22 deletions
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 562ab8301217..efba1f2ace2e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -163,7 +163,6 @@ enum rq_flag_bits {
163 __REQ_SYNC, /* request is sync (sync write or read) */ 163 __REQ_SYNC, /* request is sync (sync write or read) */
164 __REQ_META, /* metadata io request */ 164 __REQ_META, /* metadata io request */
165 __REQ_PRIO, /* boost priority in cfq */ 165 __REQ_PRIO, /* boost priority in cfq */
166 __REQ_SECURE, /* secure discard (used with REQ_OP_DISCARD) */
167 166
168 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 167 __REQ_NOIDLE, /* don't anticipate more IO after this one */
169 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 168 __REQ_INTEGRITY, /* I/O includes block integrity payload */
@@ -212,7 +211,7 @@ enum rq_flag_bits {
212 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 211 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
213#define REQ_COMMON_MASK \ 212#define REQ_COMMON_MASK \
214 (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ 213 (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
215 REQ_PREFLUSH | REQ_FUA | REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) 214 REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
216#define REQ_CLONE_MASK REQ_COMMON_MASK 215#define REQ_CLONE_MASK REQ_COMMON_MASK
217 216
218/* This mask is used for both bio and request merge checking */ 217/* This mask is used for both bio and request merge checking */
@@ -239,7 +238,6 @@ enum rq_flag_bits {
239#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) 238#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
240#define REQ_IO_STAT (1ULL << __REQ_IO_STAT) 239#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
241#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) 240#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
242#define REQ_SECURE (1ULL << __REQ_SECURE)
243#define REQ_PM (1ULL << __REQ_PM) 241#define REQ_PM (1ULL << __REQ_PM)
244#define REQ_HASHED (1ULL << __REQ_HASHED) 242#define REQ_HASHED (1ULL << __REQ_HASHED)
245#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 243#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
@@ -248,6 +246,7 @@ enum req_op {
248 REQ_OP_READ, 246 REQ_OP_READ,
249 REQ_OP_WRITE, 247 REQ_OP_WRITE,
250 REQ_OP_DISCARD, /* request to discard sectors */ 248 REQ_OP_DISCARD, /* request to discard sectors */
249 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
251 REQ_OP_WRITE_SAME, /* write same block many times */ 250 REQ_OP_WRITE_SAME, /* write same block many times */
252 REQ_OP_FLUSH, /* request for cache flush */ 251 REQ_OP_FLUSH, /* request for cache flush */
253}; 252};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0c9f8793c87e..53fee6123893 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -497,7 +497,7 @@ struct request_queue {
497#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 497#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
498#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 498#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
499#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 499#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
500#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 500#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
501#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 501#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
502#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 502#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
503#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 503#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
@@ -593,8 +593,8 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
593#define blk_queue_stackable(q) \ 593#define blk_queue_stackable(q) \
594 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 594 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
595#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 595#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
596#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 596#define blk_queue_secure_erase(q) \
597 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 597 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
598 598
599#define blk_noretry_request(rq) \ 599#define blk_noretry_request(rq) \
600 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 600 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -675,21 +675,6 @@ static inline bool rq_mergeable(struct request *rq)
675 return true; 675 return true;
676} 676}
677 677
678static inline bool blk_check_merge_flags(unsigned int flags1, unsigned int op1,
679 unsigned int flags2, unsigned int op2)
680{
681 if ((op1 == REQ_OP_DISCARD) != (op2 == REQ_OP_DISCARD))
682 return false;
683
684 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
685 return false;
686
687 if ((op1 == REQ_OP_WRITE_SAME) != (op2 == REQ_OP_WRITE_SAME))
688 return false;
689
690 return true;
691}
692
693static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 678static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
694{ 679{
695 if (bio_data(a) == bio_data(b)) 680 if (bio_data(a) == bio_data(b))
@@ -1158,7 +1143,7 @@ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1158extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1143extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1159 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1144 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1160extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1145extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1161 sector_t nr_sects, gfp_t gfp_mask, int op_flags, 1146 sector_t nr_sects, gfp_t gfp_mask, int flags,
1162 struct bio **biop); 1147 struct bio **biop);
1163extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1148extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1164 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1149 sector_t nr_sects, gfp_t gfp_mask, struct page *page);