diff options
author | Jeremy Erickson <jerickso@cs.unc.edu> | 2014-04-18 17:06:00 -0400 |
---|---|---|
committer | Jeremy Erickson <jerickso@cs.unc.edu> | 2014-04-18 17:06:00 -0400 |
commit | a215aa7b9ab3759c047201199fba64d3042d7f13 (patch) | |
tree | bca37493d9b2233450e6d3ffced1261d0e4f71fe /include/linux/blkdev.h | |
parent | d31199a77ef606f1d06894385f1852181ba6136b (diff) |
Update 2.6.36 to 2.6.36.4wip-dissipation2-jerickso
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c54906f678f..f142a7f300f8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -246,7 +246,7 @@ struct queue_limits { | |||
246 | 246 | ||
247 | unsigned char misaligned; | 247 | unsigned char misaligned; |
248 | unsigned char discard_misaligned; | 248 | unsigned char discard_misaligned; |
249 | unsigned char no_cluster; | 249 | unsigned char cluster; |
250 | signed char discard_zeroes_data; | 250 | signed char discard_zeroes_data; |
251 | }; | 251 | }; |
252 | 252 | ||
@@ -369,7 +369,6 @@ struct request_queue | |||
369 | #endif | 369 | #endif |
370 | }; | 370 | }; |
371 | 371 | ||
372 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | ||
373 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 372 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
374 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 373 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
375 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 374 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
@@ -392,7 +391,6 @@ struct request_queue | |||
392 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 391 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ |
393 | 392 | ||
394 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 393 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
395 | (1 << QUEUE_FLAG_CLUSTER) | \ | ||
396 | (1 << QUEUE_FLAG_STACKABLE) | \ | 394 | (1 << QUEUE_FLAG_STACKABLE) | \ |
397 | (1 << QUEUE_FLAG_SAME_COMP) | \ | 395 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
398 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 396 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
@@ -550,6 +548,11 @@ enum { | |||
550 | 548 | ||
551 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 549 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
552 | 550 | ||
551 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | ||
552 | { | ||
553 | return q->limits.cluster; | ||
554 | } | ||
555 | |||
553 | /* | 556 | /* |
554 | * We regard a request as sync, if either a read or a sync write | 557 | * We regard a request as sync, if either a read or a sync write |
555 | */ | 558 | */ |
@@ -851,7 +854,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
851 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 854 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
852 | unsigned int max_discard_sectors); | 855 | unsigned int max_discard_sectors); |
853 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 856 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
854 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 857 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
855 | extern void blk_queue_alignment_offset(struct request_queue *q, | 858 | extern void blk_queue_alignment_offset(struct request_queue *q, |
856 | unsigned int alignment); | 859 | unsigned int alignment); |
857 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 860 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
@@ -1004,7 +1007,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
1004 | return q->limits.physical_block_size; | 1007 | return q->limits.physical_block_size; |
1005 | } | 1008 | } |
1006 | 1009 | ||
1007 | static inline int bdev_physical_block_size(struct block_device *bdev) | 1010 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
1008 | { | 1011 | { |
1009 | return queue_physical_block_size(bdev_get_queue(bdev)); | 1012 | return queue_physical_block_size(bdev_get_queue(bdev)); |
1010 | } | 1013 | } |