aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c847e17e5caa..132a858ce2c5 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -117,7 +117,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
117 * congested queues, and wake up anyone who was waiting for requests to be 117 * congested queues, and wake up anyone who was waiting for requests to be
118 * put back. 118 * put back.
119 */ 119 */
120static void clear_queue_congested(request_queue_t *q, int rw) 120void blk_clear_queue_congested(request_queue_t *q, int rw)
121{ 121{
122 enum bdi_state bit; 122 enum bdi_state bit;
123 wait_queue_head_t *wqh = &congestion_wqh[rw]; 123 wait_queue_head_t *wqh = &congestion_wqh[rw];
@@ -128,18 +128,20 @@ static void clear_queue_congested(request_queue_t *q, int rw)
128 if (waitqueue_active(wqh)) 128 if (waitqueue_active(wqh))
129 wake_up(wqh); 129 wake_up(wqh);
130} 130}
131EXPORT_SYMBOL(blk_clear_queue_congested);
131 132
132/* 133/*
133 * A queue has just entered congestion. Flag that in the queue's VM-visible 134 * A queue has just entered congestion. Flag that in the queue's VM-visible
134 * state flags and increment the global gounter of congested queues. 135 * state flags and increment the global gounter of congested queues.
135 */ 136 */
136static void set_queue_congested(request_queue_t *q, int rw) 137void blk_set_queue_congested(request_queue_t *q, int rw)
137{ 138{
138 enum bdi_state bit; 139 enum bdi_state bit;
139 140
140 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; 141 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
141 set_bit(bit, &q->backing_dev_info.state); 142 set_bit(bit, &q->backing_dev_info.state);
142} 143}
144EXPORT_SYMBOL(blk_set_queue_congested);
143 145
144/** 146/**
145 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 147 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
@@ -159,7 +161,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
159 ret = &q->backing_dev_info; 161 ret = &q->backing_dev_info;
160 return ret; 162 return ret;
161} 163}
162
163EXPORT_SYMBOL(blk_get_backing_dev_info); 164EXPORT_SYMBOL(blk_get_backing_dev_info);
164 165
165void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) 166void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
@@ -167,7 +168,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
167 q->activity_fn = fn; 168 q->activity_fn = fn;
168 q->activity_data = data; 169 q->activity_data = data;
169} 170}
170
171EXPORT_SYMBOL(blk_queue_activity_fn); 171EXPORT_SYMBOL(blk_queue_activity_fn);
172 172
173/** 173/**
@@ -2067,7 +2067,7 @@ static void __freed_request(request_queue_t *q, int rw)
2067 struct request_list *rl = &q->rq; 2067 struct request_list *rl = &q->rq;
2068 2068
2069 if (rl->count[rw] < queue_congestion_off_threshold(q)) 2069 if (rl->count[rw] < queue_congestion_off_threshold(q))
2070 clear_queue_congested(q, rw); 2070 blk_clear_queue_congested(q, rw);
2071 2071
2072 if (rl->count[rw] + 1 <= q->nr_requests) { 2072 if (rl->count[rw] + 1 <= q->nr_requests) {
2073 if (waitqueue_active(&rl->wait[rw])) 2073 if (waitqueue_active(&rl->wait[rw]))
@@ -2137,7 +2137,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
2137 } 2137 }
2138 } 2138 }
2139 } 2139 }
2140 set_queue_congested(q, rw); 2140 blk_set_queue_congested(q, rw);
2141 } 2141 }
2142 2142
2143 /* 2143 /*
@@ -3765,14 +3765,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
3765 blk_queue_congestion_threshold(q); 3765 blk_queue_congestion_threshold(q);
3766 3766
3767 if (rl->count[READ] >= queue_congestion_on_threshold(q)) 3767 if (rl->count[READ] >= queue_congestion_on_threshold(q))
3768 set_queue_congested(q, READ); 3768 blk_set_queue_congested(q, READ);
3769 else if (rl->count[READ] < queue_congestion_off_threshold(q)) 3769 else if (rl->count[READ] < queue_congestion_off_threshold(q))
3770 clear_queue_congested(q, READ); 3770 blk_clear_queue_congested(q, READ);
3771 3771
3772 if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) 3772 if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
3773 set_queue_congested(q, WRITE); 3773 blk_set_queue_congested(q, WRITE);
3774 else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) 3774 else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
3775 clear_queue_congested(q, WRITE); 3775 blk_clear_queue_congested(q, WRITE);
3776 3776
3777 if (rl->count[READ] >= q->nr_requests) { 3777 if (rl->count[READ] >= q->nr_requests) {
3778 blk_set_queue_full(q, READ); 3778 blk_set_queue_full(q, READ);