aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h93
1 files changed, 74 insertions, 19 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e7cb5dbf6c26..25119041e034 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -82,17 +82,17 @@ enum rq_cmd_type_bits {
82enum { 82enum {
83 REQ_LB_OP_EJECT = 0x40, /* eject request */ 83 REQ_LB_OP_EJECT = 0x40, /* eject request */
84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */
85 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
86}; 85};
87 86
88/* 87/*
89 * request type modified bits. first two bits match BIO_RW* bits, important 88 * request type modified bits. first four bits match BIO_RW* bits, important
90 */ 89 */
91enum rq_flag_bits { 90enum rq_flag_bits {
92 __REQ_RW, /* not set, read. set, write */ 91 __REQ_RW, /* not set, read. set, write */
93 __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 92 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
94 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 93 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
95 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 94 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
95 /* above flags must match BIO_RW_* */
96 __REQ_DISCARD, /* request to discard sectors */ 96 __REQ_DISCARD, /* request to discard sectors */
97 __REQ_SORTED, /* elevator knows about this request */ 97 __REQ_SORTED, /* elevator knows about this request */
98 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 98 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
@@ -114,6 +114,7 @@ enum rq_flag_bits {
114 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 114 __REQ_INTEGRITY, /* integrity metadata has been remapped */
115 __REQ_NOIDLE, /* Don't anticipate more IO after this one */ 115 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
116 __REQ_IO_STAT, /* account I/O stat */ 116 __REQ_IO_STAT, /* account I/O stat */
117 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
117 __REQ_NR_BITS, /* stops here */ 118 __REQ_NR_BITS, /* stops here */
118}; 119};
119 120
@@ -142,6 +143,10 @@ enum rq_flag_bits {
142#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 143#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
143#define REQ_NOIDLE (1 << __REQ_NOIDLE) 144#define REQ_NOIDLE (1 << __REQ_NOIDLE)
144#define REQ_IO_STAT (1 << __REQ_IO_STAT) 145#define REQ_IO_STAT (1 << __REQ_IO_STAT)
146#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
147
148#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
149 REQ_FAILFAST_DRIVER)
145 150
146#define BLK_MAX_CDB 16 151#define BLK_MAX_CDB 16
147 152
@@ -255,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
255typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 260typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
256typedef int (prep_rq_fn) (struct request_queue *, struct request *); 261typedef int (prep_rq_fn) (struct request_queue *, struct request *);
257typedef void (unplug_fn) (struct request_queue *); 262typedef void (unplug_fn) (struct request_queue *);
258typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
259 263
260struct bio_vec; 264struct bio_vec;
261struct bvec_merge_data { 265struct bvec_merge_data {
@@ -307,6 +311,7 @@ struct queue_limits {
307 unsigned int alignment_offset; 311 unsigned int alignment_offset;
308 unsigned int io_min; 312 unsigned int io_min;
309 unsigned int io_opt; 313 unsigned int io_opt;
314 unsigned int max_discard_sectors;
310 315
311 unsigned short logical_block_size; 316 unsigned short logical_block_size;
312 unsigned short max_hw_segments; 317 unsigned short max_hw_segments;
@@ -334,7 +339,6 @@ struct request_queue
334 make_request_fn *make_request_fn; 339 make_request_fn *make_request_fn;
335 prep_rq_fn *prep_rq_fn; 340 prep_rq_fn *prep_rq_fn;
336 unplug_fn *unplug_fn; 341 unplug_fn *unplug_fn;
337 prepare_discard_fn *prepare_discard_fn;
338 merge_bvec_fn *merge_bvec_fn; 342 merge_bvec_fn *merge_bvec_fn;
339 prepare_flush_fn *prepare_flush_fn; 343 prepare_flush_fn *prepare_flush_fn;
340 softirq_done_fn *softirq_done_fn; 344 softirq_done_fn *softirq_done_fn;
@@ -453,10 +457,13 @@ struct request_queue
453#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 457#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
454#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 458#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
455#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 459#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
460#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
461#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
456 462
457#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 463#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
458 (1 << QUEUE_FLAG_CLUSTER) | \ 464 (1 << QUEUE_FLAG_CLUSTER) | \
459 (1 << QUEUE_FLAG_STACKABLE)) 465 (1 << QUEUE_FLAG_STACKABLE) | \
466 (1 << QUEUE_FLAG_SAME_COMP))
460 467
461static inline int queue_is_locked(struct request_queue *q) 468static inline int queue_is_locked(struct request_queue *q)
462{ 469{
@@ -575,6 +582,7 @@ enum {
575 582
576#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 583#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
577#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 584#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
585#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
578#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 586#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
579#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 587#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
580#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 588#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
@@ -582,6 +590,7 @@ enum {
582#define blk_queue_flushing(q) ((q)->ordseq) 590#define blk_queue_flushing(q) ((q)->ordseq)
583#define blk_queue_stackable(q) \ 591#define blk_queue_stackable(q) \
584 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 592 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
593#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
585 594
586#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 595#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
587#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 596#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -828,11 +837,13 @@ static inline void blk_run_address_space(struct address_space *mapping)
828} 837}
829 838
830/* 839/*
831 * blk_rq_pos() : the current sector 840 * blk_rq_pos() : the current sector
832 * blk_rq_bytes() : bytes left in the entire request 841 * blk_rq_bytes() : bytes left in the entire request
833 * blk_rq_cur_bytes() : bytes left in the current segment 842 * blk_rq_cur_bytes() : bytes left in the current segment
834 * blk_rq_sectors() : sectors left in the entire request 843 * blk_rq_err_bytes() : bytes left till the next error boundary
835 * blk_rq_cur_sectors() : sectors left in the current segment 844 * blk_rq_sectors() : sectors left in the entire request
845 * blk_rq_cur_sectors() : sectors left in the current segment
846 * blk_rq_err_sectors() : sectors left till the next error boundary
836 */ 847 */
837static inline sector_t blk_rq_pos(const struct request *rq) 848static inline sector_t blk_rq_pos(const struct request *rq)
838{ 849{
@@ -849,6 +860,8 @@ static inline int blk_rq_cur_bytes(const struct request *rq)
849 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 860 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
850} 861}
851 862
863extern unsigned int blk_rq_err_bytes(const struct request *rq);
864
852static inline unsigned int blk_rq_sectors(const struct request *rq) 865static inline unsigned int blk_rq_sectors(const struct request *rq)
853{ 866{
854 return blk_rq_bytes(rq) >> 9; 867 return blk_rq_bytes(rq) >> 9;
@@ -859,6 +872,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
859 return blk_rq_cur_bytes(rq) >> 9; 872 return blk_rq_cur_bytes(rq) >> 9;
860} 873}
861 874
875static inline unsigned int blk_rq_err_sectors(const struct request *rq)
876{
877 return blk_rq_err_bytes(rq) >> 9;
878}
879
862/* 880/*
863 * Request issue related functions. 881 * Request issue related functions.
864 */ 882 */
@@ -885,10 +903,12 @@ extern bool blk_end_request(struct request *rq, int error,
885 unsigned int nr_bytes); 903 unsigned int nr_bytes);
886extern void blk_end_request_all(struct request *rq, int error); 904extern void blk_end_request_all(struct request *rq, int error);
887extern bool blk_end_request_cur(struct request *rq, int error); 905extern bool blk_end_request_cur(struct request *rq, int error);
906extern bool blk_end_request_err(struct request *rq, int error);
888extern bool __blk_end_request(struct request *rq, int error, 907extern bool __blk_end_request(struct request *rq, int error,
889 unsigned int nr_bytes); 908 unsigned int nr_bytes);
890extern void __blk_end_request_all(struct request *rq, int error); 909extern void __blk_end_request_all(struct request *rq, int error);
891extern bool __blk_end_request_cur(struct request *rq, int error); 910extern bool __blk_end_request_cur(struct request *rq, int error);
911extern bool __blk_end_request_err(struct request *rq, int error);
892 912
893extern void blk_complete_request(struct request *); 913extern void blk_complete_request(struct request *);
894extern void __blk_complete_request(struct request *); 914extern void __blk_complete_request(struct request *);
@@ -909,11 +929,15 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
909extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
910extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
911extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
932extern void blk_queue_max_discard_sectors(struct request_queue *q,
933 unsigned int max_discard_sectors);
912extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 934extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
913extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 935extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
914extern void blk_queue_alignment_offset(struct request_queue *q, 936extern void blk_queue_alignment_offset(struct request_queue *q,
915 unsigned int alignment); 937 unsigned int alignment);
938extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
916extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 939extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
940extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
917extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 941extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
918extern void blk_set_default_limits(struct queue_limits *lim); 942extern void blk_set_default_limits(struct queue_limits *lim);
919extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 943extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
@@ -933,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
933extern void blk_queue_dma_alignment(struct request_queue *, int); 957extern void blk_queue_dma_alignment(struct request_queue *, int);
934extern void blk_queue_update_dma_alignment(struct request_queue *, int); 958extern void blk_queue_update_dma_alignment(struct request_queue *, int);
935extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 959extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
936extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
937extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 960extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
938extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 961extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
939extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 962extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
@@ -976,15 +999,18 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
976} 999}
977 1000
978extern int blkdev_issue_flush(struct block_device *, sector_t *); 1001extern int blkdev_issue_flush(struct block_device *, sector_t *);
979extern int blkdev_issue_discard(struct block_device *, 1002#define DISCARD_FL_WAIT 0x01 /* wait for completion */
980 sector_t sector, sector_t nr_sects, gfp_t); 1003#define DISCARD_FL_BARRIER 0x02 /* issue DISCARD_BARRIER request */
1004extern int blkdev_issue_discard(struct block_device *, sector_t sector,
1005 sector_t nr_sects, gfp_t, int flags);
981 1006
982static inline int sb_issue_discard(struct super_block *sb, 1007static inline int sb_issue_discard(struct super_block *sb,
983 sector_t block, sector_t nr_blocks) 1008 sector_t block, sector_t nr_blocks)
984{ 1009{
985 block <<= (sb->s_blocksize_bits - 9); 1010 block <<= (sb->s_blocksize_bits - 9);
986 nr_blocks <<= (sb->s_blocksize_bits - 9); 1011 nr_blocks <<= (sb->s_blocksize_bits - 9);
987 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); 1012 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL,
1013 DISCARD_FL_BARRIER);
988} 1014}
989 1015
990extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1016extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1055,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1055 return q->limits.physical_block_size; 1081 return q->limits.physical_block_size;
1056} 1082}
1057 1083
1084static inline int bdev_physical_block_size(struct block_device *bdev)
1085{
1086 return queue_physical_block_size(bdev_get_queue(bdev));
1087}
1088
1058static inline unsigned int queue_io_min(struct request_queue *q) 1089static inline unsigned int queue_io_min(struct request_queue *q)
1059{ 1090{
1060 return q->limits.io_min; 1091 return q->limits.io_min;
1061} 1092}
1062 1093
1094static inline int bdev_io_min(struct block_device *bdev)
1095{
1096 return queue_io_min(bdev_get_queue(bdev));
1097}
1098
1063static inline unsigned int queue_io_opt(struct request_queue *q) 1099static inline unsigned int queue_io_opt(struct request_queue *q)
1064{ 1100{
1065 return q->limits.io_opt; 1101 return q->limits.io_opt;
1066} 1102}
1067 1103
1104static inline int bdev_io_opt(struct block_device *bdev)
1105{
1106 return queue_io_opt(bdev_get_queue(bdev));
1107}
1108
1068static inline int queue_alignment_offset(struct request_queue *q) 1109static inline int queue_alignment_offset(struct request_queue *q)
1069{ 1110{
1070 if (q && q->limits.misaligned) 1111 if (q->limits.misaligned)
1071 return -1; 1112 return -1;
1072 1113
1073 if (q && q->limits.alignment_offset) 1114 return q->limits.alignment_offset;
1074 return q->limits.alignment_offset;
1075
1076 return 0;
1077} 1115}
1078 1116
1079static inline int queue_sector_alignment_offset(struct request_queue *q, 1117static inline int queue_sector_alignment_offset(struct request_queue *q,
@@ -1083,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q,
1083 & (q->limits.io_min - 1); 1121 & (q->limits.io_min - 1);
1084} 1122}
1085 1123
1124static inline int bdev_alignment_offset(struct block_device *bdev)
1125{
1126 struct request_queue *q = bdev_get_queue(bdev);
1127
1128 if (q->limits.misaligned)
1129 return -1;
1130
1131 if (bdev != bdev->bd_contains)
1132 return bdev->bd_part->alignment_offset;
1133
1134 return q->limits.alignment_offset;
1135}
1136
1086static inline int queue_dma_alignment(struct request_queue *q) 1137static inline int queue_dma_alignment(struct request_queue *q)
1087{ 1138{
1088 return q ? q->dma_alignment : 511; 1139 return q ? q->dma_alignment : 511;
@@ -1121,7 +1172,11 @@ static inline void put_dev_sector(Sector p)
1121} 1172}
1122 1173
1123struct work_struct; 1174struct work_struct;
1175struct delayed_work;
1124int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1176int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1177int kblockd_schedule_delayed_work(struct request_queue *q,
1178 struct delayed_work *work,
1179 unsigned long delay);
1125 1180
1126#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1181#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1127 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1182 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))