aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-01-04 19:17:33 -0500
committerTejun Heo <tj@kernel.org>2010-01-04 19:17:33 -0500
commit32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch)
treeb1ce838a37044bb38dfc128e2116ca35630e629a /include/linux/blkdev.h
parent22b737f4c75197372d64afc6ed1bccd58c00e549 (diff)
parentc5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff)
Merge branch 'master' into percpu
Conflicts: arch/powerpc/platforms/pseries/hvCall.S include/linux/percpu.h
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h115
1 files changed, 86 insertions, 29 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e23a86cae5ac..9b98173a8184 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -82,7 +82,6 @@ enum rq_cmd_type_bits {
82enum { 82enum {
83 REQ_LB_OP_EJECT = 0x40, /* eject request */ 83 REQ_LB_OP_EJECT = 0x40, /* eject request */
84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */
85 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
86}; 85};
87 86
88/* 87/*
@@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
261typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 260typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
262typedef int (prep_rq_fn) (struct request_queue *, struct request *); 261typedef int (prep_rq_fn) (struct request_queue *, struct request *);
263typedef void (unplug_fn) (struct request_queue *); 262typedef void (unplug_fn) (struct request_queue *);
264typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
265 263
266struct bio_vec; 264struct bio_vec;
267struct bvec_merge_data { 265struct bvec_merge_data {
@@ -313,13 +311,18 @@ struct queue_limits {
313 unsigned int alignment_offset; 311 unsigned int alignment_offset;
314 unsigned int io_min; 312 unsigned int io_min;
315 unsigned int io_opt; 313 unsigned int io_opt;
314 unsigned int max_discard_sectors;
315 unsigned int discard_granularity;
316 unsigned int discard_alignment;
316 317
317 unsigned short logical_block_size; 318 unsigned short logical_block_size;
318 unsigned short max_hw_segments; 319 unsigned short max_hw_segments;
319 unsigned short max_phys_segments; 320 unsigned short max_phys_segments;
320 321
321 unsigned char misaligned; 322 unsigned char misaligned;
323 unsigned char discard_misaligned;
322 unsigned char no_cluster; 324 unsigned char no_cluster;
325 signed char discard_zeroes_data;
323}; 326};
324 327
325struct request_queue 328struct request_queue
@@ -340,7 +343,6 @@ struct request_queue
340 make_request_fn *make_request_fn; 343 make_request_fn *make_request_fn;
341 prep_rq_fn *prep_rq_fn; 344 prep_rq_fn *prep_rq_fn;
342 unplug_fn *unplug_fn; 345 unplug_fn *unplug_fn;
343 prepare_discard_fn *prepare_discard_fn;
344 merge_bvec_fn *merge_bvec_fn; 346 merge_bvec_fn *merge_bvec_fn;
345 prepare_flush_fn *prepare_flush_fn; 347 prepare_flush_fn *prepare_flush_fn;
346 softirq_done_fn *softirq_done_fn; 348 softirq_done_fn *softirq_done_fn;
@@ -460,6 +462,7 @@ struct request_queue
460#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
461#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
462#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 464#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
465#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
463 466
464#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 467#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
465 (1 << QUEUE_FLAG_CLUSTER) | \ 468 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -591,6 +594,7 @@ enum {
591#define blk_queue_flushing(q) ((q)->ordseq) 594#define blk_queue_flushing(q) ((q)->ordseq)
592#define blk_queue_stackable(q) \ 595#define blk_queue_stackable(q) \
593 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 596 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
597#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
594 598
595#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 599#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
596#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 600#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -749,6 +753,17 @@ struct req_iterator {
749#define rq_iter_last(rq, _iter) \ 753#define rq_iter_last(rq, _iter) \
750 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 754 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
751 755
756#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
757# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
758#endif
759#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
760extern void rq_flush_dcache_pages(struct request *rq);
761#else
762static inline void rq_flush_dcache_pages(struct request *rq)
763{
764}
765#endif
766
752extern int blk_register_queue(struct gendisk *disk); 767extern int blk_register_queue(struct gendisk *disk);
753extern void blk_unregister_queue(struct gendisk *disk); 768extern void blk_unregister_queue(struct gendisk *disk);
754extern void register_disk(struct gendisk *dev); 769extern void register_disk(struct gendisk *dev);
@@ -823,19 +838,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
823 return bdev->bd_disk->queue; 838 return bdev->bd_disk->queue;
824} 839}
825 840
826static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
827 struct page *page)
828{
829 if (bdi && bdi->unplug_io_fn)
830 bdi->unplug_io_fn(bdi, page);
831}
832
833static inline void blk_run_address_space(struct address_space *mapping)
834{
835 if (mapping)
836 blk_run_backing_dev(mapping->backing_dev_info, NULL);
837}
838
839/* 841/*
840 * blk_rq_pos() : the current sector 842 * blk_rq_pos() : the current sector
841 * blk_rq_bytes() : bytes left in the entire request 843 * blk_rq_bytes() : bytes left in the entire request
@@ -843,7 +845,6 @@ static inline void blk_run_address_space(struct address_space *mapping)
843 * blk_rq_err_bytes() : bytes left till the next error boundary 845 * blk_rq_err_bytes() : bytes left till the next error boundary
844 * blk_rq_sectors() : sectors left in the entire request 846 * blk_rq_sectors() : sectors left in the entire request
845 * blk_rq_cur_sectors() : sectors left in the current segment 847 * blk_rq_cur_sectors() : sectors left in the current segment
846 * blk_rq_err_sectors() : sectors left till the next error boundary
847 */ 848 */
848static inline sector_t blk_rq_pos(const struct request *rq) 849static inline sector_t blk_rq_pos(const struct request *rq)
849{ 850{
@@ -872,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
872 return blk_rq_cur_bytes(rq) >> 9; 873 return blk_rq_cur_bytes(rq) >> 9;
873} 874}
874 875
875static inline unsigned int blk_rq_err_sectors(const struct request *rq)
876{
877 return blk_rq_err_bytes(rq) >> 9;
878}
879
880/* 876/*
881 * Request issue related functions. 877 * Request issue related functions.
882 */ 878 */
@@ -929,6 +925,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 925extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 926extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 927extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
928extern void blk_queue_max_discard_sectors(struct request_queue *q,
929 unsigned int max_discard_sectors);
932extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 930extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
933extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 931extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
934extern void blk_queue_alignment_offset(struct request_queue *q, 932extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -955,7 +953,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
955extern void blk_queue_dma_alignment(struct request_queue *, int); 953extern void blk_queue_dma_alignment(struct request_queue *, int);
956extern void blk_queue_update_dma_alignment(struct request_queue *, int); 954extern void blk_queue_update_dma_alignment(struct request_queue *, int);
957extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 955extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
958extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
959extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 956extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
960extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 957extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
961extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 958extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
@@ -1080,32 +1077,92 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1080 return q->limits.physical_block_size; 1077 return q->limits.physical_block_size;
1081} 1078}
1082 1079
1080static inline int bdev_physical_block_size(struct block_device *bdev)
1081{
1082 return queue_physical_block_size(bdev_get_queue(bdev));
1083}
1084
1083static inline unsigned int queue_io_min(struct request_queue *q) 1085static inline unsigned int queue_io_min(struct request_queue *q)
1084{ 1086{
1085 return q->limits.io_min; 1087 return q->limits.io_min;
1086} 1088}
1087 1089
1090static inline int bdev_io_min(struct block_device *bdev)
1091{
1092 return queue_io_min(bdev_get_queue(bdev));
1093}
1094
1088static inline unsigned int queue_io_opt(struct request_queue *q) 1095static inline unsigned int queue_io_opt(struct request_queue *q)
1089{ 1096{
1090 return q->limits.io_opt; 1097 return q->limits.io_opt;
1091} 1098}
1092 1099
1100static inline int bdev_io_opt(struct block_device *bdev)
1101{
1102 return queue_io_opt(bdev_get_queue(bdev));
1103}
1104
1093static inline int queue_alignment_offset(struct request_queue *q) 1105static inline int queue_alignment_offset(struct request_queue *q)
1094{ 1106{
1095 if (q && q->limits.misaligned) 1107 if (q->limits.misaligned)
1096 return -1; 1108 return -1;
1097 1109
1098 if (q && q->limits.alignment_offset) 1110 return q->limits.alignment_offset;
1099 return q->limits.alignment_offset; 1111}
1100 1112
1101 return 0; 1113static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
1114{
1115 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1116
1117 offset &= granularity - 1;
1118 return (granularity + lim->alignment_offset - offset) & (granularity - 1);
1102} 1119}
1103 1120
1104static inline int queue_sector_alignment_offset(struct request_queue *q, 1121static inline int queue_sector_alignment_offset(struct request_queue *q,
1105 sector_t sector) 1122 sector_t sector)
1106{ 1123{
1107 return ((sector << 9) - q->limits.alignment_offset) 1124 return queue_limit_alignment_offset(&q->limits, sector << 9);
1108 & (q->limits.io_min - 1); 1125}
1126
1127static inline int bdev_alignment_offset(struct block_device *bdev)
1128{
1129 struct request_queue *q = bdev_get_queue(bdev);
1130
1131 if (q->limits.misaligned)
1132 return -1;
1133
1134 if (bdev != bdev->bd_contains)
1135 return bdev->bd_part->alignment_offset;
1136
1137 return q->limits.alignment_offset;
1138}
1139
1140static inline int queue_discard_alignment(struct request_queue *q)
1141{
1142 if (q->limits.discard_misaligned)
1143 return -1;
1144
1145 return q->limits.discard_alignment;
1146}
1147
1148static inline int queue_sector_discard_alignment(struct request_queue *q,
1149 sector_t sector)
1150{
1151 return ((sector << 9) - q->limits.discard_alignment)
1152 & (q->limits.discard_granularity - 1);
1153}
1154
1155static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1156{
1157 if (q->limits.discard_zeroes_data == 1)
1158 return 1;
1159
1160 return 0;
1161}
1162
1163static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1164{
1165 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1109} 1166}
1110 1167
1111static inline int queue_dma_alignment(struct request_queue *q) 1168static inline int queue_dma_alignment(struct request_queue *q)