diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2006-12-20 05:04:12 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2006-12-20 05:04:12 -0500 |
commit | da775265021b61d5eb81df155e36cb0810f6df53 (patch) | |
tree | f7b28991b281a8c581bd3ab0bb470e158ae2206f /block | |
parent | 8e5cfc45e7527eb5c8a9a22d56a7b9227e7c0913 (diff) |
[PATCH] cfq-iosched: don't allow sync merges across queues
Currently we allow any merge, even if the io originates from different
processes. This can cause really bad starvation and unfairness, if those
ios happen to be synchronous (reads or direct writes).
So add a allow_merge hook to the io scheduler ops, so an io scheduler can
help decide whether a bio/process combination may be merged with an
existing request.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 33 | ||||
-rw-r--r-- | block/elevator.c | 26 |
2 files changed, 55 insertions, 4 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 533a2938ffd6..9fc5eafa6c0e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, | |||
568 | cfq_remove_request(next); | 568 | cfq_remove_request(next); |
569 | } | 569 | } |
570 | 570 | ||
571 | static int cfq_allow_merge(request_queue_t *q, struct request *rq, | ||
572 | struct bio *bio) | ||
573 | { | ||
574 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
575 | const int rw = bio_data_dir(bio); | ||
576 | struct cfq_queue *cfqq; | ||
577 | pid_t key; | ||
578 | |||
579 | /* | ||
580 | * If bio is async or a write, always allow merge | ||
581 | */ | ||
582 | if (!bio_sync(bio) || rw == WRITE) | ||
583 | return 1; | ||
584 | |||
585 | /* | ||
586 | * bio is sync. if request is not, disallow. | ||
587 | */ | ||
588 | if (!rq_is_sync(rq)) | ||
589 | return 0; | ||
590 | |||
591 | /* | ||
592 | * Ok, both bio and request are sync. Allow merge if they are | ||
593 | * from the same queue. | ||
594 | */ | ||
595 | key = cfq_queue_pid(current, rw, 1); | ||
596 | cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio); | ||
597 | if (cfqq != RQ_CFQQ(rq)) | ||
598 | return 0; | ||
599 | |||
600 | return 1; | ||
601 | } | ||
602 | |||
571 | static inline void | 603 | static inline void |
572 | __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 604 | __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
573 | { | 605 | { |
@@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = { | |||
2125 | .elevator_merge_fn = cfq_merge, | 2157 | .elevator_merge_fn = cfq_merge, |
2126 | .elevator_merged_fn = cfq_merged_request, | 2158 | .elevator_merged_fn = cfq_merged_request, |
2127 | .elevator_merge_req_fn = cfq_merged_requests, | 2159 | .elevator_merge_req_fn = cfq_merged_requests, |
2160 | .elevator_allow_merge_fn = cfq_allow_merge, | ||
2128 | .elevator_dispatch_fn = cfq_dispatch_requests, | 2161 | .elevator_dispatch_fn = cfq_dispatch_requests, |
2129 | .elevator_add_req_fn = cfq_insert_request, | 2162 | .elevator_add_req_fn = cfq_insert_request, |
2130 | .elevator_activate_req_fn = cfq_activate_request, | 2163 | .elevator_activate_req_fn = cfq_activate_request, |
diff --git a/block/elevator.c b/block/elevator.c index c0063f345c5d..62c7a3069d3a 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -51,6 +51,21 @@ static const int elv_hash_shift = 6; | |||
51 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | 51 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Query io scheduler to see if the current process issuing bio may be | ||
55 | * merged with rq. | ||
56 | */ | ||
57 | static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | ||
58 | { | ||
59 | request_queue_t *q = rq->q; | ||
60 | elevator_t *e = q->elevator; | ||
61 | |||
62 | if (e->ops->elevator_allow_merge_fn) | ||
63 | return e->ops->elevator_allow_merge_fn(q, rq, bio); | ||
64 | |||
65 | return 1; | ||
66 | } | ||
67 | |||
68 | /* | ||
54 | * can we safely merge with this request? | 69 | * can we safely merge with this request? |
55 | */ | 70 | */ |
56 | inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) | 71 | inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) |
@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
65 | return 0; | 80 | return 0; |
66 | 81 | ||
67 | /* | 82 | /* |
68 | * same device and no special stuff set, merge is ok | 83 | * must be same device and not a special request |
69 | */ | 84 | */ |
70 | if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special) | 85 | if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special) |
71 | return 1; | 86 | return 0; |
72 | 87 | ||
73 | return 0; | 88 | if (!elv_iosched_allow_merge(rq, bio)) |
89 | return 0; | ||
90 | |||
91 | return 1; | ||
74 | } | 92 | } |
75 | EXPORT_SYMBOL(elv_rq_merge_ok); | 93 | EXPORT_SYMBOL(elv_rq_merge_ok); |
76 | 94 | ||