aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h48
1 files changed, 39 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e23a86cae5ac..25119041e034 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -82,7 +82,6 @@ enum rq_cmd_type_bits {
82enum { 82enum {
83 REQ_LB_OP_EJECT = 0x40, /* eject request */ 83 REQ_LB_OP_EJECT = 0x40, /* eject request */
84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */
85 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
86}; 85};
87 86
88/* 87/*
@@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
261typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 260typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
262typedef int (prep_rq_fn) (struct request_queue *, struct request *); 261typedef int (prep_rq_fn) (struct request_queue *, struct request *);
263typedef void (unplug_fn) (struct request_queue *); 262typedef void (unplug_fn) (struct request_queue *);
264typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
265 263
266struct bio_vec; 264struct bio_vec;
267struct bvec_merge_data { 265struct bvec_merge_data {
@@ -313,6 +311,7 @@ struct queue_limits {
313 unsigned int alignment_offset; 311 unsigned int alignment_offset;
314 unsigned int io_min; 312 unsigned int io_min;
315 unsigned int io_opt; 313 unsigned int io_opt;
314 unsigned int max_discard_sectors;
316 315
317 unsigned short logical_block_size; 316 unsigned short logical_block_size;
318 unsigned short max_hw_segments; 317 unsigned short max_hw_segments;
@@ -340,7 +339,6 @@ struct request_queue
340 make_request_fn *make_request_fn; 339 make_request_fn *make_request_fn;
341 prep_rq_fn *prep_rq_fn; 340 prep_rq_fn *prep_rq_fn;
342 unplug_fn *unplug_fn; 341 unplug_fn *unplug_fn;
343 prepare_discard_fn *prepare_discard_fn;
344 merge_bvec_fn *merge_bvec_fn; 342 merge_bvec_fn *merge_bvec_fn;
345 prepare_flush_fn *prepare_flush_fn; 343 prepare_flush_fn *prepare_flush_fn;
346 softirq_done_fn *softirq_done_fn; 344 softirq_done_fn *softirq_done_fn;
@@ -460,6 +458,7 @@ struct request_queue
460#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 458#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
461#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 459#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
462#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 460#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
461#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
463 462
464#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 463#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
465 (1 << QUEUE_FLAG_CLUSTER) | \ 464 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -591,6 +590,7 @@ enum {
591#define blk_queue_flushing(q) ((q)->ordseq) 590#define blk_queue_flushing(q) ((q)->ordseq)
592#define blk_queue_stackable(q) \ 591#define blk_queue_stackable(q) \
593 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 592 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
593#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
594 594
595#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 595#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
596#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 596#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -929,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
932extern void blk_queue_max_discard_sectors(struct request_queue *q,
933 unsigned int max_discard_sectors);
932extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 934extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
933extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 935extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
934extern void blk_queue_alignment_offset(struct request_queue *q, 936extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -955,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
955extern void blk_queue_dma_alignment(struct request_queue *, int); 957extern void blk_queue_dma_alignment(struct request_queue *, int);
956extern void blk_queue_update_dma_alignment(struct request_queue *, int); 958extern void blk_queue_update_dma_alignment(struct request_queue *, int);
957extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 959extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
958extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
959extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 960extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
960extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 961extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
961extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 962extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
@@ -1080,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1080 return q->limits.physical_block_size; 1081 return q->limits.physical_block_size;
1081} 1082}
1082 1083
1084static inline int bdev_physical_block_size(struct block_device *bdev)
1085{
1086 return queue_physical_block_size(bdev_get_queue(bdev));
1087}
1088
1083static inline unsigned int queue_io_min(struct request_queue *q) 1089static inline unsigned int queue_io_min(struct request_queue *q)
1084{ 1090{
1085 return q->limits.io_min; 1091 return q->limits.io_min;
1086} 1092}
1087 1093
1094static inline int bdev_io_min(struct block_device *bdev)
1095{
1096 return queue_io_min(bdev_get_queue(bdev));
1097}
1098
1088static inline unsigned int queue_io_opt(struct request_queue *q) 1099static inline unsigned int queue_io_opt(struct request_queue *q)
1089{ 1100{
1090 return q->limits.io_opt; 1101 return q->limits.io_opt;
1091} 1102}
1092 1103
1104static inline int bdev_io_opt(struct block_device *bdev)
1105{
1106 return queue_io_opt(bdev_get_queue(bdev));
1107}
1108
1093static inline int queue_alignment_offset(struct request_queue *q) 1109static inline int queue_alignment_offset(struct request_queue *q)
1094{ 1110{
1095 if (q && q->limits.misaligned) 1111 if (q->limits.misaligned)
1096 return -1; 1112 return -1;
1097 1113
1098 if (q && q->limits.alignment_offset) 1114 return q->limits.alignment_offset;
1099 return q->limits.alignment_offset;
1100
1101 return 0;
1102} 1115}
1103 1116
1104static inline int queue_sector_alignment_offset(struct request_queue *q, 1117static inline int queue_sector_alignment_offset(struct request_queue *q,
@@ -1108,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q,
1108 & (q->limits.io_min - 1); 1121 & (q->limits.io_min - 1);
1109} 1122}
1110 1123
1124static inline int bdev_alignment_offset(struct block_device *bdev)
1125{
1126 struct request_queue *q = bdev_get_queue(bdev);
1127
1128 if (q->limits.misaligned)
1129 return -1;
1130
1131 if (bdev != bdev->bd_contains)
1132 return bdev->bd_part->alignment_offset;
1133
1134 return q->limits.alignment_offset;
1135}
1136
1111static inline int queue_dma_alignment(struct request_queue *q) 1137static inline int queue_dma_alignment(struct request_queue *q)
1112{ 1138{
1113 return q ? q->dma_alignment : 511; 1139 return q ? q->dma_alignment : 511;
@@ -1146,7 +1172,11 @@ static inline void put_dev_sector(Sector p)
1146} 1172}
1147 1173
1148struct work_struct; 1174struct work_struct;
1175struct delayed_work;
1149int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1176int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1177int kblockd_schedule_delayed_work(struct request_queue *q,
1178 struct delayed_work *work,
1179 unsigned long delay);
1150 1180
1151#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1181#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1152 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1182 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))