aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-10-20 02:28:16 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-20 13:26:35 -0400
commit3fcfab16c5b86eaa3db3a9a31adba550c5b67141 (patch)
treebd348fa081b8fbec2c79fbf8f173a306d70b2b2c /block
parent79e2de4bc53d7ca2a8eedee49e4a92479b4b530e (diff)
[PATCH] separate bdi congestion functions from queue congestion functions
Separate out the concept of "queue congestion" from "backing-dev congestion". Congestion is a backing-dev concept, not a queue concept. The blk_* congestion functions are retained, as wrappers around the core backing-dev congestion functions. This proper layering is needed so that NFS can cleanly use the congestion functions, and so that CONFIG_BLOCK=n actually links. Cc: "Thomas Maier" <balagi@justmail.de> Cc: "Jens Axboe" <jens.axboe@oracle.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: David Howells <dhowells@redhat.com> Cc: Peter Osterlund <petero2@telia.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'block')
-rw-r--r--block/ll_rw_blk.c71
1 files changed, 0 insertions, 71 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 132a858ce2c..136066583c6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -56,11 +56,6 @@ static kmem_cache_t *requestq_cachep;
56 */ 56 */
57static kmem_cache_t *iocontext_cachep; 57static kmem_cache_t *iocontext_cachep;
58 58
59static wait_queue_head_t congestion_wqh[2] = {
60 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
61 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
62 };
63
64/* 59/*
65 * Controlling structure to kblockd 60 * Controlling structure to kblockd
66 */ 61 */
@@ -112,37 +107,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
112 q->nr_congestion_off = nr; 107 q->nr_congestion_off = nr;
113} 108}
114 109
115/*
116 * A queue has just exitted congestion. Note this in the global counter of
117 * congested queues, and wake up anyone who was waiting for requests to be
118 * put back.
119 */
120void blk_clear_queue_congested(request_queue_t *q, int rw)
121{
122 enum bdi_state bit;
123 wait_queue_head_t *wqh = &congestion_wqh[rw];
124
125 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
126 clear_bit(bit, &q->backing_dev_info.state);
127 smp_mb__after_clear_bit();
128 if (waitqueue_active(wqh))
129 wake_up(wqh);
130}
131EXPORT_SYMBOL(blk_clear_queue_congested);
132
133/*
134 * A queue has just entered congestion. Flag that in the queue's VM-visible
135 * state flags and increment the global gounter of congested queues.
136 */
137void blk_set_queue_congested(request_queue_t *q, int rw)
138{
139 enum bdi_state bit;
140
141 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
142 set_bit(bit, &q->backing_dev_info.state);
143}
144EXPORT_SYMBOL(blk_set_queue_congested);
145
146/** 110/**
147 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 111 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
148 * @bdev: device 112 * @bdev: device
@@ -2755,41 +2719,6 @@ void blk_end_sync_rq(struct request *rq, int error)
2755} 2719}
2756EXPORT_SYMBOL(blk_end_sync_rq); 2720EXPORT_SYMBOL(blk_end_sync_rq);
2757 2721
2758/**
2759 * blk_congestion_wait - wait for a queue to become uncongested
2760 * @rw: READ or WRITE
2761 * @timeout: timeout in jiffies
2762 *
2763 * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
2764 * If no queues are congested then just wait for the next request to be
2765 * returned.
2766 */
2767long blk_congestion_wait(int rw, long timeout)
2768{
2769 long ret;
2770 DEFINE_WAIT(wait);
2771 wait_queue_head_t *wqh = &congestion_wqh[rw];
2772
2773 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
2774 ret = io_schedule_timeout(timeout);
2775 finish_wait(wqh, &wait);
2776 return ret;
2777}
2778
2779EXPORT_SYMBOL(blk_congestion_wait);
2780
2781/**
2782 * blk_congestion_end - wake up sleepers on a congestion queue
2783 * @rw: READ or WRITE
2784 */
2785void blk_congestion_end(int rw)
2786{
2787 wait_queue_head_t *wqh = &congestion_wqh[rw];
2788
2789 if (waitqueue_active(wqh))
2790 wake_up(wqh);
2791}
2792
2793/* 2722/*
2794 * Has to be called with the request spinlock acquired 2723 * Has to be called with the request spinlock acquired
2795 */ 2724 */