aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-03 15:43:36 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-03 15:43:36 -0400
commitb4f555081fdd27d13e6ff39d455d5aefae9d2c0c (patch)
tree917acaae9556ad2c372d001bf786cfbcf8102684
parent160acc2e899f26356bde92bc257253b7ca78f0c3 (diff)
parent51fd77bd9f512ab6cc9df0733ba1caaab89eb957 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: [BLOCK] Don't allow empty barriers to be passed down to queues that don't grok them dm: bounce_pfn limit added Deadline iosched: Fix batching fairness Deadline iosched: Reset batch for ordered requests Deadline iosched: Factor out finding latter reques
-rw-r--r--block/deadline-iosched.c53
-rw-r--r--block/ll_rw_blk.c7
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--include/linux/device-mapper.h1
4 files changed, 40 insertions, 28 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 1a511ffaf8a4..a054eef8dff6 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -55,6 +55,20 @@ static void deadline_move_request(struct deadline_data *, struct request *);
55 55
56#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) 56#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
57 57
58/*
59 * get the request after `rq' in sector-sorted order
60 */
61static inline struct request *
62deadline_latter_request(struct request *rq)
63{
64 struct rb_node *node = rb_next(&rq->rb_node);
65
66 if (node)
67 return rb_entry_rq(node);
68
69 return NULL;
70}
71
58static void 72static void
59deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) 73deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
60{ 74{
@@ -74,13 +88,8 @@ deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
74{ 88{
75 const int data_dir = rq_data_dir(rq); 89 const int data_dir = rq_data_dir(rq);
76 90
77 if (dd->next_rq[data_dir] == rq) { 91 if (dd->next_rq[data_dir] == rq)
78 struct rb_node *rbnext = rb_next(&rq->rb_node); 92 dd->next_rq[data_dir] = deadline_latter_request(rq);
79
80 dd->next_rq[data_dir] = NULL;
81 if (rbnext)
82 dd->next_rq[data_dir] = rb_entry_rq(rbnext);
83 }
84 93
85 elv_rb_del(RQ_RB_ROOT(dd, rq), rq); 94 elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
86} 95}
@@ -198,14 +207,11 @@ static void
198deadline_move_request(struct deadline_data *dd, struct request *rq) 207deadline_move_request(struct deadline_data *dd, struct request *rq)
199{ 208{
200 const int data_dir = rq_data_dir(rq); 209 const int data_dir = rq_data_dir(rq);
201 struct rb_node *rbnext = rb_next(&rq->rb_node);
202 210
203 dd->next_rq[READ] = NULL; 211 dd->next_rq[READ] = NULL;
204 dd->next_rq[WRITE] = NULL; 212 dd->next_rq[WRITE] = NULL;
213 dd->next_rq[data_dir] = deadline_latter_request(rq);
205 214
206 if (rbnext)
207 dd->next_rq[data_dir] = rb_entry_rq(rbnext);
208
209 dd->last_sector = rq->sector + rq->nr_sectors; 215 dd->last_sector = rq->sector + rq->nr_sectors;
210 216
211 /* 217 /*
@@ -301,30 +307,23 @@ dispatch_find_request:
301 /* 307 /*
302 * we are not running a batch, find best request for selected data_dir 308 * we are not running a batch, find best request for selected data_dir
303 */ 309 */
304 if (deadline_check_fifo(dd, data_dir)) { 310 if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
305 /* An expired request exists - satisfy it */ 311 /*
306 dd->batching = 0; 312 * A deadline has expired, the last request was in the other
313 * direction, or we have run out of higher-sectored requests.
314 * Start again from the request with the earliest expiry time.
315 */
307 rq = rq_entry_fifo(dd->fifo_list[data_dir].next); 316 rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
308 317 } else {
309 } else if (dd->next_rq[data_dir]) {
310 /* 318 /*
311 * The last req was the same dir and we have a next request in 319 * The last req was the same dir and we have a next request in
312 * sort order. No expired requests so continue on from here. 320 * sort order. No expired requests so continue on from here.
313 */ 321 */
314 rq = dd->next_rq[data_dir]; 322 rq = dd->next_rq[data_dir];
315 } else {
316 struct rb_node *node;
317 /*
318 * The last req was the other direction or we have run out of
319 * higher-sectored requests. Go back to the lowest sectored
320 * request (1 way elevator) and start a new batch.
321 */
322 dd->batching = 0;
323 node = rb_first(&dd->sort_list[data_dir]);
324 if (node)
325 rq = rb_entry_rq(node);
326 } 323 }
327 324
325 dd->batching = 0;
326
328dispatch_request: 327dispatch_request:
329 /* 328 /*
330 * rq is the selected appropriate request. 329 * rq is the selected appropriate request.
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 54fd38589674..75c98d58f4dd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3221,6 +3221,7 @@ static inline void __generic_make_request(struct bio *bio)
3221 sector_t old_sector; 3221 sector_t old_sector;
3222 int ret, nr_sectors = bio_sectors(bio); 3222 int ret, nr_sectors = bio_sectors(bio);
3223 dev_t old_dev; 3223 dev_t old_dev;
3224 int err = -EIO;
3224 3225
3225 might_sleep(); 3226 might_sleep();
3226 3227
@@ -3248,7 +3249,7 @@ static inline void __generic_make_request(struct bio *bio)
3248 bdevname(bio->bi_bdev, b), 3249 bdevname(bio->bi_bdev, b),
3249 (long long) bio->bi_sector); 3250 (long long) bio->bi_sector);
3250end_io: 3251end_io:
3251 bio_endio(bio, -EIO); 3252 bio_endio(bio, err);
3252 break; 3253 break;
3253 } 3254 }
3254 3255
@@ -3283,6 +3284,10 @@ end_io:
3283 3284
3284 if (bio_check_eod(bio, nr_sectors)) 3285 if (bio_check_eod(bio, nr_sectors))
3285 goto end_io; 3286 goto end_io;
3287 if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
3288 err = -EOPNOTSUPP;
3289 goto end_io;
3290 }
3286 3291
3287 ret = q->make_request_fn(q, bio); 3292 ret = q->make_request_fn(q, bio);
3288 } while (ret); 3293 } while (ret);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8939e6105088..5a7eb650181e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -102,6 +102,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
102 lhs->seg_boundary_mask = 102 lhs->seg_boundary_mask =
103 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); 103 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
104 104
105 lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
106
105 lhs->no_cluster |= rhs->no_cluster; 107 lhs->no_cluster |= rhs->no_cluster;
106} 108}
107 109
@@ -566,6 +568,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
566 min_not_zero(rs->seg_boundary_mask, 568 min_not_zero(rs->seg_boundary_mask,
567 q->seg_boundary_mask); 569 q->seg_boundary_mask);
568 570
571 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
572
569 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 573 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
570} 574}
571EXPORT_SYMBOL_GPL(dm_set_device_limits); 575EXPORT_SYMBOL_GPL(dm_set_device_limits);
@@ -707,6 +711,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
707 rs->max_segment_size = MAX_SEGMENT_SIZE; 711 rs->max_segment_size = MAX_SEGMENT_SIZE;
708 if (!rs->seg_boundary_mask) 712 if (!rs->seg_boundary_mask)
709 rs->seg_boundary_mask = -1; 713 rs->seg_boundary_mask = -1;
714 if (!rs->bounce_pfn)
715 rs->bounce_pfn = -1;
710} 716}
711 717
712int dm_table_add_target(struct dm_table *t, const char *type, 718int dm_table_add_target(struct dm_table *t, const char *type,
@@ -891,6 +897,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
891 q->hardsect_size = t->limits.hardsect_size; 897 q->hardsect_size = t->limits.hardsect_size;
892 q->max_segment_size = t->limits.max_segment_size; 898 q->max_segment_size = t->limits.max_segment_size;
893 q->seg_boundary_mask = t->limits.seg_boundary_mask; 899 q->seg_boundary_mask = t->limits.seg_boundary_mask;
900 q->bounce_pfn = t->limits.bounce_pfn;
894 if (t->limits.no_cluster) 901 if (t->limits.no_cluster)
895 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); 902 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
896 else 903 else
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 37c66d1254b5..b8b7c51389fe 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -116,6 +116,7 @@ struct io_restrictions {
116 unsigned short hardsect_size; 116 unsigned short hardsect_size;
117 unsigned int max_segment_size; 117 unsigned int max_segment_size;
118 unsigned long seg_boundary_mask; 118 unsigned long seg_boundary_mask;
119 unsigned long bounce_pfn;
119 unsigned char no_cluster; /* inverted so that 0 is default */ 120 unsigned char no_cluster; /* inverted so that 0 is default */
120}; 121};
121 122