diff options
author | Thomas Maier <balagi@justmail.de> | 2006-10-20 02:28:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-20 13:26:35 -0400 |
commit | 79e2de4bc53d7ca2a8eedee49e4a92479b4b530e (patch) | |
tree | f56d41a654eda0995ee35a97881bcdcb3f393528 | |
parent | 26da82058e62ea173559a26881b16d10089645ba (diff) |
[PATCH] export clear_queue_congested and set_queue_congested
Export the clear_queue_congested() and set_queue_congested() functions
located in ll_rw_blk.c
The functions are renamed to blk_clear_queue_congested() and
blk_set_queue_congested().
(needed in the pktcdvd driver's bio write congestion control)
Signed-off-by: Thomas Maier <balagi@justmail.de>
Cc: Peter Osterlund <petero2@telia.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | block/ll_rw_blk.c | 20 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 |
2 files changed, 12 insertions, 10 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c847e17e5caa..132a858ce2c5 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -117,7 +117,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q) | |||
117 | * congested queues, and wake up anyone who was waiting for requests to be | 117 | * congested queues, and wake up anyone who was waiting for requests to be |
118 | * put back. | 118 | * put back. |
119 | */ | 119 | */ |
120 | static void clear_queue_congested(request_queue_t *q, int rw) | 120 | void blk_clear_queue_congested(request_queue_t *q, int rw) |
121 | { | 121 | { |
122 | enum bdi_state bit; | 122 | enum bdi_state bit; |
123 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | 123 | wait_queue_head_t *wqh = &congestion_wqh[rw]; |
@@ -128,18 +128,20 @@ static void clear_queue_congested(request_queue_t *q, int rw) | |||
128 | if (waitqueue_active(wqh)) | 128 | if (waitqueue_active(wqh)) |
129 | wake_up(wqh); | 129 | wake_up(wqh); |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(blk_clear_queue_congested); | ||
131 | 132 | ||
132 | /* | 133 | /* |
133 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 134 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
134 | * state flags and increment the global gounter of congested queues. | 135 | * state flags and increment the global gounter of congested queues. |
135 | */ | 136 | */ |
136 | static void set_queue_congested(request_queue_t *q, int rw) | 137 | void blk_set_queue_congested(request_queue_t *q, int rw) |
137 | { | 138 | { |
138 | enum bdi_state bit; | 139 | enum bdi_state bit; |
139 | 140 | ||
140 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | 141 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; |
141 | set_bit(bit, &q->backing_dev_info.state); | 142 | set_bit(bit, &q->backing_dev_info.state); |
142 | } | 143 | } |
144 | EXPORT_SYMBOL(blk_set_queue_congested); | ||
143 | 145 | ||
144 | /** | 146 | /** |
145 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | 147 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info |
@@ -159,7 +161,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | |||
159 | ret = &q->backing_dev_info; | 161 | ret = &q->backing_dev_info; |
160 | return ret; | 162 | return ret; |
161 | } | 163 | } |
162 | |||
163 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 164 | EXPORT_SYMBOL(blk_get_backing_dev_info); |
164 | 165 | ||
165 | void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) | 166 | void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) |
@@ -167,7 +168,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) | |||
167 | q->activity_fn = fn; | 168 | q->activity_fn = fn; |
168 | q->activity_data = data; | 169 | q->activity_data = data; |
169 | } | 170 | } |
170 | |||
171 | EXPORT_SYMBOL(blk_queue_activity_fn); | 171 | EXPORT_SYMBOL(blk_queue_activity_fn); |
172 | 172 | ||
173 | /** | 173 | /** |
@@ -2067,7 +2067,7 @@ static void __freed_request(request_queue_t *q, int rw) | |||
2067 | struct request_list *rl = &q->rq; | 2067 | struct request_list *rl = &q->rq; |
2068 | 2068 | ||
2069 | if (rl->count[rw] < queue_congestion_off_threshold(q)) | 2069 | if (rl->count[rw] < queue_congestion_off_threshold(q)) |
2070 | clear_queue_congested(q, rw); | 2070 | blk_clear_queue_congested(q, rw); |
2071 | 2071 | ||
2072 | if (rl->count[rw] + 1 <= q->nr_requests) { | 2072 | if (rl->count[rw] + 1 <= q->nr_requests) { |
2073 | if (waitqueue_active(&rl->wait[rw])) | 2073 | if (waitqueue_active(&rl->wait[rw])) |
@@ -2137,7 +2137,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | |||
2137 | } | 2137 | } |
2138 | } | 2138 | } |
2139 | } | 2139 | } |
2140 | set_queue_congested(q, rw); | 2140 | blk_set_queue_congested(q, rw); |
2141 | } | 2141 | } |
2142 | 2142 | ||
2143 | /* | 2143 | /* |
@@ -3765,14 +3765,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
3765 | blk_queue_congestion_threshold(q); | 3765 | blk_queue_congestion_threshold(q); |
3766 | 3766 | ||
3767 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | 3767 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) |
3768 | set_queue_congested(q, READ); | 3768 | blk_set_queue_congested(q, READ); |
3769 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) | 3769 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) |
3770 | clear_queue_congested(q, READ); | 3770 | blk_clear_queue_congested(q, READ); |
3771 | 3771 | ||
3772 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) | 3772 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) |
3773 | set_queue_congested(q, WRITE); | 3773 | blk_set_queue_congested(q, WRITE); |
3774 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) | 3774 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) |
3775 | clear_queue_congested(q, WRITE); | 3775 | blk_clear_queue_congested(q, WRITE); |
3776 | 3776 | ||
3777 | if (rl->count[READ] >= q->nr_requests) { | 3777 | if (rl->count[READ] >= q->nr_requests) { |
3778 | blk_set_queue_full(q, READ); | 3778 | blk_set_queue_full(q, READ); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d370d2cfe138..9575e3a5ff2a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -651,6 +651,8 @@ extern void blk_recount_segments(request_queue_t *, struct bio *); | |||
651 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); | 651 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); |
652 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, | 652 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, |
653 | struct gendisk *, struct scsi_ioctl_command __user *); | 653 | struct gendisk *, struct scsi_ioctl_command __user *); |
654 | extern void blk_clear_queue_congested(request_queue_t *q, int rw); | ||
655 | extern void blk_set_queue_congested(request_queue_t *q, int rw); | ||
654 | extern void blk_start_queue(request_queue_t *q); | 656 | extern void blk_start_queue(request_queue_t *q); |
655 | extern void blk_stop_queue(request_queue_t *q); | 657 | extern void blk_stop_queue(request_queue_t *q); |
656 | extern void blk_sync_queue(struct request_queue *q); | 658 | extern void blk_sync_queue(struct request_queue *q); |