diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-08-24 06:25:44 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-08-24 06:25:54 -0400 |
| commit | 5f9ece02401116b29eb04396b99ea092acb75dd8 (patch) | |
| tree | e10386e2dc63c275646b4eb0bed857da7bf86c6a /include/linux/blkdev.h | |
| parent | 9f51e24ee8b5a1595b6a5ac0c2be278a16488e75 (diff) | |
| parent | 422bef879e84104fee6dc68ded0e371dbeb5f88e (diff) | |
Merge commit 'v2.6.31-rc7' into x86/cleanups
Merge reason: we were on -rc1 before - go up to -rc7
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 49ae07951d55..69103e053c92 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -70,11 +70,6 @@ enum rq_cmd_type_bits { | |||
| 70 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | enum { | ||
| 74 | BLK_RW_ASYNC = 0, | ||
| 75 | BLK_RW_SYNC = 1, | ||
| 76 | }; | ||
| 77 | |||
| 78 | /* | 73 | /* |
| 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 74 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
| 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 75 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
| @@ -723,6 +718,7 @@ struct rq_map_data { | |||
| 723 | int nr_entries; | 718 | int nr_entries; |
| 724 | unsigned long offset; | 719 | unsigned long offset; |
| 725 | int null_mapped; | 720 | int null_mapped; |
| 721 | int from_user; | ||
| 726 | }; | 722 | }; |
| 727 | 723 | ||
| 728 | struct req_iterator { | 724 | struct req_iterator { |
| @@ -779,18 +775,18 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 779 | * congested queues, and wake up anyone who was waiting for requests to be | 775 | * congested queues, and wake up anyone who was waiting for requests to be |
| 780 | * put back. | 776 | * put back. |
| 781 | */ | 777 | */ |
| 782 | static inline void blk_clear_queue_congested(struct request_queue *q, int rw) | 778 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) |
| 783 | { | 779 | { |
| 784 | clear_bdi_congested(&q->backing_dev_info, rw); | 780 | clear_bdi_congested(&q->backing_dev_info, sync); |
| 785 | } | 781 | } |
| 786 | 782 | ||
| 787 | /* | 783 | /* |
| 788 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 784 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
| 789 | * state flags and increment the global gounter of congested queues. | 785 | * state flags and increment the global gounter of congested queues. |
| 790 | */ | 786 | */ |
| 791 | static inline void blk_set_queue_congested(struct request_queue *q, int rw) | 787 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) |
| 792 | { | 788 | { |
| 793 | set_bdi_congested(&q->backing_dev_info, rw); | 789 | set_bdi_congested(&q->backing_dev_info, sync); |
| 794 | } | 790 | } |
| 795 | 791 | ||
| 796 | extern void blk_start_queue(struct request_queue *q); | 792 | extern void blk_start_queue(struct request_queue *q); |
| @@ -917,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short) | |||
| 917 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 913 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); |
| 918 | extern void blk_queue_alignment_offset(struct request_queue *q, | 914 | extern void blk_queue_alignment_offset(struct request_queue *q, |
| 919 | unsigned int alignment); | 915 | unsigned int alignment); |
| 916 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | ||
| 920 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 917 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
| 921 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 918 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
| 922 | extern void blk_set_default_limits(struct queue_limits *lim); | 919 | extern void blk_set_default_limits(struct queue_limits *lim); |
