diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5027a599077d..36ab42c9bb99 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -250,7 +250,7 @@ struct queue_limits { | |||
| 250 | 250 | ||
| 251 | unsigned char misaligned; | 251 | unsigned char misaligned; |
| 252 | unsigned char discard_misaligned; | 252 | unsigned char discard_misaligned; |
| 253 | unsigned char no_cluster; | 253 | unsigned char cluster; |
| 254 | signed char discard_zeroes_data; | 254 | signed char discard_zeroes_data; |
| 255 | }; | 255 | }; |
| 256 | 256 | ||
| @@ -380,7 +380,6 @@ struct request_queue | |||
| 380 | #endif | 380 | #endif |
| 381 | }; | 381 | }; |
| 382 | 382 | ||
| 383 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | ||
| 384 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 383 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| 385 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 384 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
| 386 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 385 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
| @@ -403,7 +402,6 @@ struct request_queue | |||
| 403 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 402 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ |
| 404 | 403 | ||
| 405 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 404 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 406 | (1 << QUEUE_FLAG_CLUSTER) | \ | ||
| 407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 405 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| 408 | (1 << QUEUE_FLAG_SAME_COMP) | \ | 406 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
| 409 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 407 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
| @@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 510 | 508 | ||
| 511 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 509 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
| 512 | 510 | ||
| 511 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | ||
| 512 | { | ||
| 513 | return q->limits.cluster; | ||
| 514 | } | ||
| 515 | |||
| 513 | /* | 516 | /* |
| 514 | * We regard a request as sync, if either a read or a sync write | 517 | * We regard a request as sync, if either a read or a sync write |
| 515 | */ | 518 | */ |
| @@ -552,8 +555,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
| 552 | * it already be started by driver. | 555 | * it already be started by driver. |
| 553 | */ | 556 | */ |
| 554 | #define RQ_NOMERGE_FLAGS \ | 557 | #define RQ_NOMERGE_FLAGS \ |
| 555 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ | 558 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) |
| 556 | REQ_FLUSH | REQ_FUA) | ||
| 557 | #define rq_mergeable(rq) \ | 559 | #define rq_mergeable(rq) \ |
| 558 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 560 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
| 559 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 561 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
| @@ -806,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | |||
| 806 | extern void blk_cleanup_queue(struct request_queue *); | 808 | extern void blk_cleanup_queue(struct request_queue *); |
| 807 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 809 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 808 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 810 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 811 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | ||
| 809 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 812 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
| 810 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 813 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
| 811 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 814 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
