diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-03-19 02:38:50 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-03-19 02:38:50 -0400 |
| commit | 97eb3f24352ec6632c2127b35d8087d2a809a9b9 (patch) | |
| tree | 722948059bbd325bbca232269490124231df80d4 /include/linux/blkdev.h | |
| parent | 439581ec07fa9cf3f519dd461a2cf41cfd3adcb4 (diff) | |
| parent | def179c271ac9b5020deca798470521f14d11edd (diff) | |
Merge branch 'next' into for-linus
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5027a599077d..4d18ff34670a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -115,6 +115,7 @@ struct request { | |||
| 115 | void *elevator_private3; | 115 | void *elevator_private3; |
| 116 | 116 | ||
| 117 | struct gendisk *rq_disk; | 117 | struct gendisk *rq_disk; |
| 118 | struct hd_struct *part; | ||
| 118 | unsigned long start_time; | 119 | unsigned long start_time; |
| 119 | #ifdef CONFIG_BLK_CGROUP | 120 | #ifdef CONFIG_BLK_CGROUP |
| 120 | unsigned long long start_time_ns; | 121 | unsigned long long start_time_ns; |
| @@ -250,7 +251,7 @@ struct queue_limits { | |||
| 250 | 251 | ||
| 251 | unsigned char misaligned; | 252 | unsigned char misaligned; |
| 252 | unsigned char discard_misaligned; | 253 | unsigned char discard_misaligned; |
| 253 | unsigned char no_cluster; | 254 | unsigned char cluster; |
| 254 | signed char discard_zeroes_data; | 255 | signed char discard_zeroes_data; |
| 255 | }; | 256 | }; |
| 256 | 257 | ||
| @@ -380,7 +381,6 @@ struct request_queue | |||
| 380 | #endif | 381 | #endif |
| 381 | }; | 382 | }; |
| 382 | 383 | ||
| 383 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | ||
| 384 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 384 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| 385 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 385 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
| 386 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 386 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
| @@ -403,7 +403,6 @@ struct request_queue | |||
| 403 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 403 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ |
| 404 | 404 | ||
| 405 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 405 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 406 | (1 << QUEUE_FLAG_CLUSTER) | \ | ||
| 407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 406 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| 408 | (1 << QUEUE_FLAG_SAME_COMP) | \ | 407 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
| 409 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 408 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
| @@ -510,6 +509,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 510 | 509 | ||
| 511 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 510 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
| 512 | 511 | ||
| 512 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | ||
| 513 | { | ||
| 514 | return q->limits.cluster; | ||
| 515 | } | ||
| 516 | |||
| 513 | /* | 517 | /* |
| 514 | * We regard a request as sync, if either a read or a sync write | 518 | * We regard a request as sync, if either a read or a sync write |
| 515 | */ | 519 | */ |
| @@ -552,8 +556,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
| 552 | * it already be started by driver. | 556 | * it already be started by driver. |
| 553 | */ | 557 | */ |
| 554 | #define RQ_NOMERGE_FLAGS \ | 558 | #define RQ_NOMERGE_FLAGS \ |
| 555 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ | 559 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) |
| 556 | REQ_FLUSH | REQ_FUA) | ||
| 557 | #define rq_mergeable(rq) \ | 560 | #define rq_mergeable(rq) \ |
| 558 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 561 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
| 559 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 562 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
| @@ -644,7 +647,6 @@ static inline void rq_flush_dcache_pages(struct request *rq) | |||
| 644 | 647 | ||
| 645 | extern int blk_register_queue(struct gendisk *disk); | 648 | extern int blk_register_queue(struct gendisk *disk); |
| 646 | extern void blk_unregister_queue(struct gendisk *disk); | 649 | extern void blk_unregister_queue(struct gendisk *disk); |
| 647 | extern void register_disk(struct gendisk *dev); | ||
| 648 | extern void generic_make_request(struct bio *bio); | 650 | extern void generic_make_request(struct bio *bio); |
| 649 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 651 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
| 650 | extern void blk_put_request(struct request *); | 652 | extern void blk_put_request(struct request *); |
| @@ -806,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | |||
| 806 | extern void blk_cleanup_queue(struct request_queue *); | 808 | extern void blk_cleanup_queue(struct request_queue *); |
| 807 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 809 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 808 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 810 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 811 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | ||
| 809 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 812 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
| 810 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 813 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
| 811 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 814 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| @@ -1253,6 +1256,9 @@ struct block_device_operations { | |||
| 1253 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1256 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1254 | int (*direct_access) (struct block_device *, sector_t, | 1257 | int (*direct_access) (struct block_device *, sector_t, |
| 1255 | void **, unsigned long *); | 1258 | void **, unsigned long *); |
| 1259 | unsigned int (*check_events) (struct gendisk *disk, | ||
| 1260 | unsigned int clearing); | ||
| 1261 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ | ||
| 1256 | int (*media_changed) (struct gendisk *); | 1262 | int (*media_changed) (struct gendisk *); |
| 1257 | void (*unlock_native_capacity) (struct gendisk *); | 1263 | void (*unlock_native_capacity) (struct gendisk *); |
| 1258 | int (*revalidate_disk) (struct gendisk *); | 1264 | int (*revalidate_disk) (struct gendisk *); |
