aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h65
1 files changed, 38 insertions, 27 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1896e868854f..ebd22dbed861 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -316,8 +316,7 @@ struct queue_limits {
316 unsigned int discard_alignment; 316 unsigned int discard_alignment;
317 317
318 unsigned short logical_block_size; 318 unsigned short logical_block_size;
319 unsigned short max_hw_segments; 319 unsigned short max_segments;
320 unsigned short max_phys_segments;
321 320
322 unsigned char misaligned; 321 unsigned char misaligned;
323 unsigned char discard_misaligned; 322 unsigned char discard_misaligned;
@@ -462,6 +461,7 @@ struct request_queue
462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 461#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 462#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
464#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 463#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
464#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
465 465
466#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 466#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
467 (1 << QUEUE_FLAG_CLUSTER) | \ 467 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -587,6 +587,8 @@ enum {
587#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 587#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
588#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 588#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
589#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 589#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
590#define blk_queue_noxmerges(q) \
591 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
590#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 592#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
591#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 593#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
592#define blk_queue_flushing(q) ((q)->ordseq) 594#define blk_queue_flushing(q) ((q)->ordseq)
@@ -918,10 +920,27 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
918extern void blk_cleanup_queue(struct request_queue *); 920extern void blk_cleanup_queue(struct request_queue *);
919extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 921extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
920extern void blk_queue_bounce_limit(struct request_queue *, u64); 922extern void blk_queue_bounce_limit(struct request_queue *, u64);
921extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
922extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 923extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
923extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 924
924extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 925/* Temporary compatibility wrapper */
926static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
927{
928 blk_queue_max_hw_sectors(q, max);
929}
930
931extern void blk_queue_max_segments(struct request_queue *, unsigned short);
932
933static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
934{
935 blk_queue_max_segments(q, max);
936}
937
938static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
939{
940 blk_queue_max_segments(q, max);
941}
942
943
925extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 944extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
926extern void blk_queue_max_discard_sectors(struct request_queue *q, 945extern void blk_queue_max_discard_sectors(struct request_queue *q,
927 unsigned int max_discard_sectors); 946 unsigned int max_discard_sectors);
@@ -1014,11 +1033,15 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1014#define MAX_PHYS_SEGMENTS 128 1033#define MAX_PHYS_SEGMENTS 128
1015#define MAX_HW_SEGMENTS 128 1034#define MAX_HW_SEGMENTS 128
1016#define SAFE_MAX_SECTORS 255 1035#define SAFE_MAX_SECTORS 255
1017#define BLK_DEF_MAX_SECTORS 1024
1018
1019#define MAX_SEGMENT_SIZE 65536 1036#define MAX_SEGMENT_SIZE 65536
1020 1037
1021#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL 1038enum blk_default_limits {
1039 BLK_MAX_SEGMENTS = 128,
1040 BLK_SAFE_MAX_SECTORS = 255,
1041 BLK_DEF_MAX_SECTORS = 1024,
1042 BLK_MAX_SEGMENT_SIZE = 65536,
1043 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1044};
1022 1045
1023#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1046#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1024 1047
@@ -1042,14 +1065,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1042 return q->limits.max_hw_sectors; 1065 return q->limits.max_hw_sectors;
1043} 1066}
1044 1067
1045static inline unsigned short queue_max_hw_segments(struct request_queue *q) 1068static inline unsigned short queue_max_segments(struct request_queue *q)
1046{
1047 return q->limits.max_hw_segments;
1048}
1049
1050static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1051{ 1069{
1052 return q->limits.max_phys_segments; 1070 return q->limits.max_segments;
1053} 1071}
1054 1072
1055static inline unsigned int queue_max_segment_size(struct request_queue *q) 1073static inline unsigned int queue_max_segment_size(struct request_queue *q)
@@ -1110,18 +1128,13 @@ static inline int queue_alignment_offset(struct request_queue *q)
1110 return q->limits.alignment_offset; 1128 return q->limits.alignment_offset;
1111} 1129}
1112 1130
1113static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset) 1131static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1114{ 1132{
1115 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1133 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1134 unsigned int alignment = (sector << 9) & (granularity - 1);
1116 1135
1117 offset &= granularity - 1; 1136 return (granularity + lim->alignment_offset - alignment)
1118 return (granularity + lim->alignment_offset - offset) & (granularity - 1); 1137 & (granularity - 1);
1119}
1120
1121static inline int queue_sector_alignment_offset(struct request_queue *q,
1122 sector_t sector)
1123{
1124 return queue_limit_alignment_offset(&q->limits, sector << 9);
1125} 1138}
1126 1139
1127static inline int bdev_alignment_offset(struct block_device *bdev) 1140static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1145,10 +1158,8 @@ static inline int queue_discard_alignment(struct request_queue *q)
1145 return q->limits.discard_alignment; 1158 return q->limits.discard_alignment;
1146} 1159}
1147 1160
1148static inline int queue_sector_discard_alignment(struct request_queue *q, 1161static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1149 sector_t sector)
1150{ 1162{
1151 struct queue_limits *lim = &q->limits;
1152 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1163 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1153 1164
1154 return (lim->discard_granularity + lim->discard_alignment - alignment) 1165 return (lim->discard_granularity + lim->discard_alignment - alignment)