diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 76 |
1 files changed, 34 insertions, 42 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 784a919aa0d0..6690e8bae7bb 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -158,7 +158,6 @@ enum rq_flag_bits { | |||
| 158 | struct request { | 158 | struct request { |
| 159 | struct list_head queuelist; | 159 | struct list_head queuelist; |
| 160 | struct call_single_data csd; | 160 | struct call_single_data csd; |
| 161 | int cpu; | ||
| 162 | 161 | ||
| 163 | struct request_queue *q; | 162 | struct request_queue *q; |
| 164 | 163 | ||
| @@ -166,9 +165,11 @@ struct request { | |||
| 166 | enum rq_cmd_type_bits cmd_type; | 165 | enum rq_cmd_type_bits cmd_type; |
| 167 | unsigned long atomic_flags; | 166 | unsigned long atomic_flags; |
| 168 | 167 | ||
| 168 | int cpu; | ||
| 169 | |||
| 169 | /* the following two fields are internal, NEVER access directly */ | 170 | /* the following two fields are internal, NEVER access directly */ |
| 170 | sector_t __sector; /* sector cursor */ | ||
| 171 | unsigned int __data_len; /* total data len */ | 171 | unsigned int __data_len; /* total data len */ |
| 172 | sector_t __sector; /* sector cursor */ | ||
| 172 | 173 | ||
| 173 | struct bio *bio; | 174 | struct bio *bio; |
| 174 | struct bio *biotail; | 175 | struct bio *biotail; |
| @@ -201,20 +202,20 @@ struct request { | |||
| 201 | 202 | ||
| 202 | unsigned short ioprio; | 203 | unsigned short ioprio; |
| 203 | 204 | ||
| 205 | int ref_count; | ||
| 206 | |||
| 204 | void *special; /* opaque pointer available for LLD use */ | 207 | void *special; /* opaque pointer available for LLD use */ |
| 205 | char *buffer; /* kaddr of the current segment if available */ | 208 | char *buffer; /* kaddr of the current segment if available */ |
| 206 | 209 | ||
| 207 | int tag; | 210 | int tag; |
| 208 | int errors; | 211 | int errors; |
| 209 | 212 | ||
| 210 | int ref_count; | ||
| 211 | |||
| 212 | /* | 213 | /* |
| 213 | * when request is used as a packet command carrier | 214 | * when request is used as a packet command carrier |
| 214 | */ | 215 | */ |
| 215 | unsigned short cmd_len; | ||
| 216 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
| 217 | unsigned char *cmd; | 217 | unsigned char *cmd; |
| 218 | unsigned short cmd_len; | ||
| 218 | 219 | ||
| 219 | unsigned int extra_len; /* length of alignment and padding */ | 220 | unsigned int extra_len; /* length of alignment and padding */ |
| 220 | unsigned int sense_len; | 221 | unsigned int sense_len; |
| @@ -316,8 +317,7 @@ struct queue_limits { | |||
| 316 | unsigned int discard_alignment; | 317 | unsigned int discard_alignment; |
| 317 | 318 | ||
| 318 | unsigned short logical_block_size; | 319 | unsigned short logical_block_size; |
| 319 | unsigned short max_hw_segments; | 320 | unsigned short max_segments; |
| 320 | unsigned short max_phys_segments; | ||
| 321 | 321 | ||
| 322 | unsigned char misaligned; | 322 | unsigned char misaligned; |
| 323 | unsigned char discard_misaligned; | 323 | unsigned char discard_misaligned; |
| @@ -461,8 +461,8 @@ struct request_queue | |||
| 461 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | 461 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
| 462 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 462 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 463 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 463 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
| 464 | #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ | 464 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
| 465 | #define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ | 465 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
| 466 | 466 | ||
| 467 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 467 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 468 | (1 << QUEUE_FLAG_CLUSTER) | \ | 468 | (1 << QUEUE_FLAG_CLUSTER) | \ |
| @@ -586,9 +586,10 @@ enum { | |||
| 586 | 586 | ||
| 587 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 587 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
| 588 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 588 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 589 | #define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags) | ||
| 590 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 589 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 591 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 590 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
| 591 | #define blk_queue_noxmerges(q) \ | ||
| 592 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | ||
| 592 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 593 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
| 593 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 594 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
| 594 | #define blk_queue_flushing(q) ((q)->ordseq) | 595 | #define blk_queue_flushing(q) ((q)->ordseq) |
| @@ -845,7 +846,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | |||
| 845 | * blk_rq_err_bytes() : bytes left till the next error boundary | 846 | * blk_rq_err_bytes() : bytes left till the next error boundary |
| 846 | * blk_rq_sectors() : sectors left in the entire request | 847 | * blk_rq_sectors() : sectors left in the entire request |
| 847 | * blk_rq_cur_sectors() : sectors left in the current segment | 848 | * blk_rq_cur_sectors() : sectors left in the current segment |
| 848 | * blk_rq_err_sectors() : sectors left till the next error boundary | ||
| 849 | */ | 849 | */ |
| 850 | static inline sector_t blk_rq_pos(const struct request *rq) | 850 | static inline sector_t blk_rq_pos(const struct request *rq) |
| 851 | { | 851 | { |
| @@ -874,11 +874,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
| 874 | return blk_rq_cur_bytes(rq) >> 9; | 874 | return blk_rq_cur_bytes(rq) >> 9; |
| 875 | } | 875 | } |
| 876 | 876 | ||
| 877 | static inline unsigned int blk_rq_err_sectors(const struct request *rq) | ||
| 878 | { | ||
| 879 | return blk_rq_err_bytes(rq) >> 9; | ||
| 880 | } | ||
| 881 | |||
| 882 | /* | 877 | /* |
| 883 | * Request issue related functions. | 878 | * Request issue related functions. |
| 884 | */ | 879 | */ |
| @@ -926,10 +921,8 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | |||
| 926 | extern void blk_cleanup_queue(struct request_queue *); | 921 | extern void blk_cleanup_queue(struct request_queue *); |
| 927 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 922 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 928 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 923 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 929 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | ||
| 930 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 924 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
| 931 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 925 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
| 932 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | ||
| 933 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 926 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 934 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 927 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
| 935 | unsigned int max_discard_sectors); | 928 | unsigned int max_discard_sectors); |
| @@ -944,6 +937,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | |||
| 944 | extern void blk_set_default_limits(struct queue_limits *lim); | 937 | extern void blk_set_default_limits(struct queue_limits *lim); |
| 945 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 938 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
| 946 | sector_t offset); | 939 | sector_t offset); |
| 940 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | ||
| 941 | sector_t offset); | ||
| 947 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 942 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
| 948 | sector_t offset); | 943 | sector_t offset); |
| 949 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 944 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
| @@ -1017,14 +1012,13 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
| 1017 | 1012 | ||
| 1018 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 1013 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
| 1019 | 1014 | ||
| 1020 | #define MAX_PHYS_SEGMENTS 128 | 1015 | enum blk_default_limits { |
| 1021 | #define MAX_HW_SEGMENTS 128 | 1016 | BLK_MAX_SEGMENTS = 128, |
| 1022 | #define SAFE_MAX_SECTORS 255 | 1017 | BLK_SAFE_MAX_SECTORS = 255, |
| 1023 | #define BLK_DEF_MAX_SECTORS 1024 | 1018 | BLK_DEF_MAX_SECTORS = 1024, |
| 1024 | 1019 | BLK_MAX_SEGMENT_SIZE = 65536, | |
| 1025 | #define MAX_SEGMENT_SIZE 65536 | 1020 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, |
| 1026 | 1021 | }; | |
| 1027 | #define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL | ||
| 1028 | 1022 | ||
| 1029 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1023 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
| 1030 | 1024 | ||
| @@ -1048,14 +1042,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | |||
| 1048 | return q->limits.max_hw_sectors; | 1042 | return q->limits.max_hw_sectors; |
| 1049 | } | 1043 | } |
| 1050 | 1044 | ||
| 1051 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | 1045 | static inline unsigned short queue_max_segments(struct request_queue *q) |
| 1052 | { | 1046 | { |
| 1053 | return q->limits.max_hw_segments; | 1047 | return q->limits.max_segments; |
| 1054 | } | ||
| 1055 | |||
| 1056 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
| 1057 | { | ||
| 1058 | return q->limits.max_phys_segments; | ||
| 1059 | } | 1048 | } |
| 1060 | 1049 | ||
| 1061 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | 1050 | static inline unsigned int queue_max_segment_size(struct request_queue *q) |
| @@ -1116,11 +1105,13 @@ static inline int queue_alignment_offset(struct request_queue *q) | |||
| 1116 | return q->limits.alignment_offset; | 1105 | return q->limits.alignment_offset; |
| 1117 | } | 1106 | } |
| 1118 | 1107 | ||
| 1119 | static inline int queue_sector_alignment_offset(struct request_queue *q, | 1108 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
| 1120 | sector_t sector) | ||
| 1121 | { | 1109 | { |
| 1122 | return ((sector << 9) - q->limits.alignment_offset) | 1110 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
| 1123 | & (q->limits.io_min - 1); | 1111 | unsigned int alignment = (sector << 9) & (granularity - 1); |
| 1112 | |||
| 1113 | return (granularity + lim->alignment_offset - alignment) | ||
| 1114 | & (granularity - 1); | ||
| 1124 | } | 1115 | } |
| 1125 | 1116 | ||
| 1126 | static inline int bdev_alignment_offset(struct block_device *bdev) | 1117 | static inline int bdev_alignment_offset(struct block_device *bdev) |
| @@ -1144,11 +1135,12 @@ static inline int queue_discard_alignment(struct request_queue *q) | |||
| 1144 | return q->limits.discard_alignment; | 1135 | return q->limits.discard_alignment; |
| 1145 | } | 1136 | } |
| 1146 | 1137 | ||
| 1147 | static inline int queue_sector_discard_alignment(struct request_queue *q, | 1138 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
| 1148 | sector_t sector) | ||
| 1149 | { | 1139 | { |
| 1150 | return ((sector << 9) - q->limits.discard_alignment) | 1140 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); |
| 1151 | & (q->limits.discard_granularity - 1); | 1141 | |
| 1142 | return (lim->discard_granularity + lim->discard_alignment - alignment) | ||
| 1143 | & (lim->discard_granularity - 1); | ||
| 1152 | } | 1144 | } |
| 1153 | 1145 | ||
| 1154 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 1146 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) |
