aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c108
1 files changed, 27 insertions, 81 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c847e17e5caa..9eaee6640535 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -56,11 +56,6 @@ static kmem_cache_t *requestq_cachep;
56 */ 56 */
57static kmem_cache_t *iocontext_cachep; 57static kmem_cache_t *iocontext_cachep;
58 58
59static wait_queue_head_t congestion_wqh[2] = {
60 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
61 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
62 };
63
64/* 59/*
65 * Controlling structure to kblockd 60 * Controlling structure to kblockd
66 */ 61 */
@@ -112,35 +107,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
112 q->nr_congestion_off = nr; 107 q->nr_congestion_off = nr;
113} 108}
114 109
115/*
116 * A queue has just exitted congestion. Note this in the global counter of
117 * congested queues, and wake up anyone who was waiting for requests to be
118 * put back.
119 */
120static void clear_queue_congested(request_queue_t *q, int rw)
121{
122 enum bdi_state bit;
123 wait_queue_head_t *wqh = &congestion_wqh[rw];
124
125 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
126 clear_bit(bit, &q->backing_dev_info.state);
127 smp_mb__after_clear_bit();
128 if (waitqueue_active(wqh))
129 wake_up(wqh);
130}
131
132/*
133 * A queue has just entered congestion. Flag that in the queue's VM-visible
134 * state flags and increment the global gounter of congested queues.
135 */
136static void set_queue_congested(request_queue_t *q, int rw)
137{
138 enum bdi_state bit;
139
140 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
141 set_bit(bit, &q->backing_dev_info.state);
142}
143
144/** 110/**
145 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 111 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
146 * @bdev: device 112 * @bdev: device
@@ -159,7 +125,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
159 ret = &q->backing_dev_info; 125 ret = &q->backing_dev_info;
160 return ret; 126 return ret;
161} 127}
162
163EXPORT_SYMBOL(blk_get_backing_dev_info); 128EXPORT_SYMBOL(blk_get_backing_dev_info);
164 129
165void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) 130void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
@@ -167,7 +132,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
167 q->activity_fn = fn; 132 q->activity_fn = fn;
168 q->activity_data = data; 133 q->activity_data = data;
169} 134}
170
171EXPORT_SYMBOL(blk_queue_activity_fn); 135EXPORT_SYMBOL(blk_queue_activity_fn);
172 136
173/** 137/**
@@ -2067,7 +2031,7 @@ static void __freed_request(request_queue_t *q, int rw)
2067 struct request_list *rl = &q->rq; 2031 struct request_list *rl = &q->rq;
2068 2032
2069 if (rl->count[rw] < queue_congestion_off_threshold(q)) 2033 if (rl->count[rw] < queue_congestion_off_threshold(q))
2070 clear_queue_congested(q, rw); 2034 blk_clear_queue_congested(q, rw);
2071 2035
2072 if (rl->count[rw] + 1 <= q->nr_requests) { 2036 if (rl->count[rw] + 1 <= q->nr_requests) {
2073 if (waitqueue_active(&rl->wait[rw])) 2037 if (waitqueue_active(&rl->wait[rw]))
@@ -2137,7 +2101,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
2137 } 2101 }
2138 } 2102 }
2139 } 2103 }
2140 set_queue_congested(q, rw); 2104 blk_set_queue_congested(q, rw);
2141 } 2105 }
2142 2106
2143 /* 2107 /*
@@ -2755,41 +2719,6 @@ void blk_end_sync_rq(struct request *rq, int error)
2755} 2719}
2756EXPORT_SYMBOL(blk_end_sync_rq); 2720EXPORT_SYMBOL(blk_end_sync_rq);
2757 2721
2758/**
2759 * blk_congestion_wait - wait for a queue to become uncongested
2760 * @rw: READ or WRITE
2761 * @timeout: timeout in jiffies
2762 *
2763 * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
2764 * If no queues are congested then just wait for the next request to be
2765 * returned.
2766 */
2767long blk_congestion_wait(int rw, long timeout)
2768{
2769 long ret;
2770 DEFINE_WAIT(wait);
2771 wait_queue_head_t *wqh = &congestion_wqh[rw];
2772
2773 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
2774 ret = io_schedule_timeout(timeout);
2775 finish_wait(wqh, &wait);
2776 return ret;
2777}
2778
2779EXPORT_SYMBOL(blk_congestion_wait);
2780
2781/**
2782 * blk_congestion_end - wake up sleepers on a congestion queue
2783 * @rw: READ or WRITE
2784 */
2785void blk_congestion_end(int rw)
2786{
2787 wait_queue_head_t *wqh = &congestion_wqh[rw];
2788
2789 if (waitqueue_active(wqh))
2790 wake_up(wqh);
2791}
2792
2793/* 2722/*
2794 * Has to be called with the request spinlock acquired 2723 * Has to be called with the request spinlock acquired
2795 */ 2724 */
@@ -3070,6 +2999,7 @@ void generic_make_request(struct bio *bio)
3070{ 2999{
3071 request_queue_t *q; 3000 request_queue_t *q;
3072 sector_t maxsector; 3001 sector_t maxsector;
3002 sector_t old_sector;
3073 int ret, nr_sectors = bio_sectors(bio); 3003 int ret, nr_sectors = bio_sectors(bio);
3074 dev_t old_dev; 3004 dev_t old_dev;
3075 3005
@@ -3098,7 +3028,7 @@ void generic_make_request(struct bio *bio)
3098 * NOTE: we don't repeat the blk_size check for each new device. 3028 * NOTE: we don't repeat the blk_size check for each new device.
3099 * Stacking drivers are expected to know what they are doing. 3029 * Stacking drivers are expected to know what they are doing.
3100 */ 3030 */
3101 maxsector = -1; 3031 old_sector = -1;
3102 old_dev = 0; 3032 old_dev = 0;
3103 do { 3033 do {
3104 char b[BDEVNAME_SIZE]; 3034 char b[BDEVNAME_SIZE];
@@ -3132,15 +3062,31 @@ end_io:
3132 */ 3062 */
3133 blk_partition_remap(bio); 3063 blk_partition_remap(bio);
3134 3064
3135 if (maxsector != -1) 3065 if (old_sector != -1)
3136 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 3066 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
3137 maxsector); 3067 old_sector);
3138 3068
3139 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 3069 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
3140 3070
3141 maxsector = bio->bi_sector; 3071 old_sector = bio->bi_sector;
3142 old_dev = bio->bi_bdev->bd_dev; 3072 old_dev = bio->bi_bdev->bd_dev;
3143 3073
3074 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3075 if (maxsector) {
3076 sector_t sector = bio->bi_sector;
3077
3078 if (maxsector < nr_sectors ||
3079 maxsector - nr_sectors < sector) {
3080 /*
3081 * This may well happen - partitions are not
3082 * checked to make sure they are within the size
3083 * of the whole device.
3084 */
3085 handle_bad_sector(bio);
3086 goto end_io;
3087 }
3088 }
3089
3144 ret = q->make_request_fn(q, bio); 3090 ret = q->make_request_fn(q, bio);
3145 } while (ret); 3091 } while (ret);
3146} 3092}
@@ -3765,14 +3711,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
3765 blk_queue_congestion_threshold(q); 3711 blk_queue_congestion_threshold(q);
3766 3712
3767 if (rl->count[READ] >= queue_congestion_on_threshold(q)) 3713 if (rl->count[READ] >= queue_congestion_on_threshold(q))
3768 set_queue_congested(q, READ); 3714 blk_set_queue_congested(q, READ);
3769 else if (rl->count[READ] < queue_congestion_off_threshold(q)) 3715 else if (rl->count[READ] < queue_congestion_off_threshold(q))
3770 clear_queue_congested(q, READ); 3716 blk_clear_queue_congested(q, READ);
3771 3717
3772 if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) 3718 if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
3773 set_queue_congested(q, WRITE); 3719 blk_set_queue_congested(q, WRITE);
3774 else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) 3720 else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
3775 clear_queue_congested(q, WRITE); 3721 blk_clear_queue_congested(q, WRITE);
3776 3722
3777 if (rl->count[READ] >= q->nr_requests) { 3723 if (rl->count[READ] >= q->nr_requests) {
3778 blk_set_queue_full(q, READ); 3724 blk_set_queue_full(q, READ);