aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h124
1 files changed, 73 insertions, 51 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 221cecd86bd3..6690e8bae7bb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -158,7 +158,6 @@ enum rq_flag_bits {
158struct request { 158struct request {
159 struct list_head queuelist; 159 struct list_head queuelist;
160 struct call_single_data csd; 160 struct call_single_data csd;
161 int cpu;
162 161
163 struct request_queue *q; 162 struct request_queue *q;
164 163
@@ -166,9 +165,11 @@ struct request {
166 enum rq_cmd_type_bits cmd_type; 165 enum rq_cmd_type_bits cmd_type;
167 unsigned long atomic_flags; 166 unsigned long atomic_flags;
168 167
168 int cpu;
169
169 /* the following two fields are internal, NEVER access directly */ 170 /* the following two fields are internal, NEVER access directly */
170 sector_t __sector; /* sector cursor */
171 unsigned int __data_len; /* total data len */ 171 unsigned int __data_len; /* total data len */
172 sector_t __sector; /* sector cursor */
172 173
173 struct bio *bio; 174 struct bio *bio;
174 struct bio *biotail; 175 struct bio *biotail;
@@ -201,20 +202,20 @@ struct request {
201 202
202 unsigned short ioprio; 203 unsigned short ioprio;
203 204
205 int ref_count;
206
204 void *special; /* opaque pointer available for LLD use */ 207 void *special; /* opaque pointer available for LLD use */
205 char *buffer; /* kaddr of the current segment if available */ 208 char *buffer; /* kaddr of the current segment if available */
206 209
207 int tag; 210 int tag;
208 int errors; 211 int errors;
209 212
210 int ref_count;
211
212 /* 213 /*
213 * when request is used as a packet command carrier 214 * when request is used as a packet command carrier
214 */ 215 */
215 unsigned short cmd_len;
216 unsigned char __cmd[BLK_MAX_CDB]; 216 unsigned char __cmd[BLK_MAX_CDB];
217 unsigned char *cmd; 217 unsigned char *cmd;
218 unsigned short cmd_len;
218 219
219 unsigned int extra_len; /* length of alignment and padding */ 220 unsigned int extra_len; /* length of alignment and padding */
220 unsigned int sense_len; 221 unsigned int sense_len;
@@ -312,13 +313,16 @@ struct queue_limits {
312 unsigned int io_min; 313 unsigned int io_min;
313 unsigned int io_opt; 314 unsigned int io_opt;
314 unsigned int max_discard_sectors; 315 unsigned int max_discard_sectors;
316 unsigned int discard_granularity;
317 unsigned int discard_alignment;
315 318
316 unsigned short logical_block_size; 319 unsigned short logical_block_size;
317 unsigned short max_hw_segments; 320 unsigned short max_segments;
318 unsigned short max_phys_segments;
319 321
320 unsigned char misaligned; 322 unsigned char misaligned;
323 unsigned char discard_misaligned;
321 unsigned char no_cluster; 324 unsigned char no_cluster;
325 signed char discard_zeroes_data;
322}; 326};
323 327
324struct request_queue 328struct request_queue
@@ -457,8 +461,8 @@ struct request_queue
457#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
458#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
459#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
460#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 464#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
461#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ 465#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
462 466
463#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 467#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
464 (1 << QUEUE_FLAG_CLUSTER) | \ 468 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -582,9 +586,10 @@ enum {
582 586
583#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 587#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
584#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 588#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
585#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
586#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 589#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
587#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 590#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
591#define blk_queue_noxmerges(q) \
592 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
588#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 593#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
589#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 594#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
590#define blk_queue_flushing(q) ((q)->ordseq) 595#define blk_queue_flushing(q) ((q)->ordseq)
@@ -749,6 +754,17 @@ struct req_iterator {
749#define rq_iter_last(rq, _iter) \ 754#define rq_iter_last(rq, _iter) \
750 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 755 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
751 756
757#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
758# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
759#endif
760#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
761extern void rq_flush_dcache_pages(struct request *rq);
762#else
763static inline void rq_flush_dcache_pages(struct request *rq)
764{
765}
766#endif
767
752extern int blk_register_queue(struct gendisk *disk); 768extern int blk_register_queue(struct gendisk *disk);
753extern void blk_unregister_queue(struct gendisk *disk); 769extern void blk_unregister_queue(struct gendisk *disk);
754extern void register_disk(struct gendisk *dev); 770extern void register_disk(struct gendisk *dev);
@@ -823,19 +839,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
823 return bdev->bd_disk->queue; 839 return bdev->bd_disk->queue;
824} 840}
825 841
826static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
827 struct page *page)
828{
829 if (bdi && bdi->unplug_io_fn)
830 bdi->unplug_io_fn(bdi, page);
831}
832
833static inline void blk_run_address_space(struct address_space *mapping)
834{
835 if (mapping)
836 blk_run_backing_dev(mapping->backing_dev_info, NULL);
837}
838
839/* 842/*
840 * blk_rq_pos() : the current sector 843 * blk_rq_pos() : the current sector
841 * blk_rq_bytes() : bytes left in the entire request 844 * blk_rq_bytes() : bytes left in the entire request
@@ -843,7 +846,6 @@ static inline void blk_run_address_space(struct address_space *mapping)
843 * blk_rq_err_bytes() : bytes left till the next error boundary 846 * blk_rq_err_bytes() : bytes left till the next error boundary
844 * blk_rq_sectors() : sectors left in the entire request 847 * blk_rq_sectors() : sectors left in the entire request
845 * blk_rq_cur_sectors() : sectors left in the current segment 848 * blk_rq_cur_sectors() : sectors left in the current segment
846 * blk_rq_err_sectors() : sectors left till the next error boundary
847 */ 849 */
848static inline sector_t blk_rq_pos(const struct request *rq) 850static inline sector_t blk_rq_pos(const struct request *rq)
849{ 851{
@@ -872,11 +874,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
872 return blk_rq_cur_bytes(rq) >> 9; 874 return blk_rq_cur_bytes(rq) >> 9;
873} 875}
874 876
875static inline unsigned int blk_rq_err_sectors(const struct request *rq)
876{
877 return blk_rq_err_bytes(rq) >> 9;
878}
879
880/* 877/*
881 * Request issue related functions. 878 * Request issue related functions.
882 */ 879 */
@@ -924,10 +921,8 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
924extern void blk_cleanup_queue(struct request_queue *); 921extern void blk_cleanup_queue(struct request_queue *);
925extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 922extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
926extern void blk_queue_bounce_limit(struct request_queue *, u64); 923extern void blk_queue_bounce_limit(struct request_queue *, u64);
927extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
928extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 924extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 925extern void blk_queue_max_segments(struct request_queue *, unsigned short);
930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 926extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
932extern void blk_queue_max_discard_sectors(struct request_queue *q, 927extern void blk_queue_max_discard_sectors(struct request_queue *q,
933 unsigned int max_discard_sectors); 928 unsigned int max_discard_sectors);
@@ -942,6 +937,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
942extern void blk_set_default_limits(struct queue_limits *lim); 937extern void blk_set_default_limits(struct queue_limits *lim);
943extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 938extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
944 sector_t offset); 939 sector_t offset);
940extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
941 sector_t offset);
945extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 942extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
946 sector_t offset); 943 sector_t offset);
947extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 944extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@@ -1015,14 +1012,13 @@ static inline int sb_issue_discard(struct super_block *sb,
1015 1012
1016extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1013extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1017 1014
1018#define MAX_PHYS_SEGMENTS 128 1015enum blk_default_limits {
1019#define MAX_HW_SEGMENTS 128 1016 BLK_MAX_SEGMENTS = 128,
1020#define SAFE_MAX_SECTORS 255 1017 BLK_SAFE_MAX_SECTORS = 255,
1021#define BLK_DEF_MAX_SECTORS 1024 1018 BLK_DEF_MAX_SECTORS = 1024,
1022 1019 BLK_MAX_SEGMENT_SIZE = 65536,
1023#define MAX_SEGMENT_SIZE 65536 1020 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1024 1021};
1025#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL
1026 1022
1027#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1023#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1028 1024
@@ -1046,14 +1042,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1046 return q->limits.max_hw_sectors; 1042 return q->limits.max_hw_sectors;
1047} 1043}
1048 1044
1049static inline unsigned short queue_max_hw_segments(struct request_queue *q) 1045static inline unsigned short queue_max_segments(struct request_queue *q)
1050{
1051 return q->limits.max_hw_segments;
1052}
1053
1054static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1055{ 1046{
1056 return q->limits.max_phys_segments; 1047 return q->limits.max_segments;
1057} 1048}
1058 1049
1059static inline unsigned int queue_max_segment_size(struct request_queue *q) 1050static inline unsigned int queue_max_segment_size(struct request_queue *q)
@@ -1114,11 +1105,13 @@ static inline int queue_alignment_offset(struct request_queue *q)
1114 return q->limits.alignment_offset; 1105 return q->limits.alignment_offset;
1115} 1106}
1116 1107
1117static inline int queue_sector_alignment_offset(struct request_queue *q, 1108static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1118 sector_t sector)
1119{ 1109{
1120 return ((sector << 9) - q->limits.alignment_offset) 1110 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1121 & (q->limits.io_min - 1); 1111 unsigned int alignment = (sector << 9) & (granularity - 1);
1112
1113 return (granularity + lim->alignment_offset - alignment)
1114 & (granularity - 1);
1122} 1115}
1123 1116
1124static inline int bdev_alignment_offset(struct block_device *bdev) 1117static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1134,6 +1127,35 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
1134 return q->limits.alignment_offset; 1127 return q->limits.alignment_offset;
1135} 1128}
1136 1129
1130static inline int queue_discard_alignment(struct request_queue *q)
1131{
1132 if (q->limits.discard_misaligned)
1133 return -1;
1134
1135 return q->limits.discard_alignment;
1136}
1137
1138static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1139{
1140 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1141
1142 return (lim->discard_granularity + lim->discard_alignment - alignment)
1143 & (lim->discard_granularity - 1);
1144}
1145
1146static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1147{
1148 if (q->limits.discard_zeroes_data == 1)
1149 return 1;
1150
1151 return 0;
1152}
1153
1154static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1155{
1156 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1157}
1158
1137static inline int queue_dma_alignment(struct request_queue *q) 1159static inline int queue_dma_alignment(struct request_queue *q)
1138{ 1160{
1139 return q ? q->dma_alignment : 511; 1161 return q ? q->dma_alignment : 511;