diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2010-02-22 07:48:51 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-02-22 07:48:51 -0500 |
commit | f11cbd74c5ff3614f6390b4de67a6ffdc614c378 (patch) | |
tree | 6a30920ade9eeaac5bf6d6263b5d09712e882eb0 /block | |
parent | 429c42c9d246f5bda868495c09974312a0177328 (diff) | |
parent | aea187c46f7d03ce985e55eb1398d0776a15b928 (diff) |
Merge branch 'master' into for-2.6.34
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 4 | ||||
-rw-r--r-- | block/cfq-iosched.c | 57 |
2 files changed, 25 insertions, 36 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1fa2654db0a6..e7dbbaf5fb3e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -147,16 +147,16 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |||
147 | return -EINVAL; | 147 | return -EINVAL; |
148 | 148 | ||
149 | blkcg = cgroup_to_blkio_cgroup(cgroup); | 149 | blkcg = cgroup_to_blkio_cgroup(cgroup); |
150 | spin_lock(&blkio_list_lock); | ||
150 | spin_lock_irq(&blkcg->lock); | 151 | spin_lock_irq(&blkcg->lock); |
151 | blkcg->weight = (unsigned int)val; | 152 | blkcg->weight = (unsigned int)val; |
152 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | 153 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
153 | spin_lock(&blkio_list_lock); | ||
154 | list_for_each_entry(blkiop, &blkio_list, list) | 154 | list_for_each_entry(blkiop, &blkio_list, list) |
155 | blkiop->ops.blkio_update_group_weight_fn(blkg, | 155 | blkiop->ops.blkio_update_group_weight_fn(blkg, |
156 | blkcg->weight); | 156 | blkcg->weight); |
157 | spin_unlock(&blkio_list_lock); | ||
158 | } | 157 | } |
159 | spin_unlock_irq(&blkcg->lock); | 158 | spin_unlock_irq(&blkcg->lock); |
159 | spin_unlock(&blkio_list_lock); | ||
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | 162 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 918c7fd9aeb1..023f4e69a337 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4; | |||
42 | */ | 42 | */ |
43 | #define CFQ_MIN_TT (2) | 43 | #define CFQ_MIN_TT (2) |
44 | 44 | ||
45 | /* | ||
46 | * Allow merged cfqqs to perform this amount of seeky I/O before | ||
47 | * deciding to break the queues up again. | ||
48 | */ | ||
49 | #define CFQQ_COOP_TOUT (HZ) | ||
50 | |||
51 | #define CFQ_SLICE_SCALE (5) | 45 | #define CFQ_SLICE_SCALE (5) |
52 | #define CFQ_HW_QUEUE_MIN (5) | 46 | #define CFQ_HW_QUEUE_MIN (5) |
53 | #define CFQ_SERVICE_SHIFT 12 | 47 | #define CFQ_SERVICE_SHIFT 12 |
54 | 48 | ||
49 | #define CFQQ_SEEK_THR 8 * 1024 | ||
50 | #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) | ||
51 | |||
55 | #define RQ_CIC(rq) \ | 52 | #define RQ_CIC(rq) \ |
56 | ((struct cfq_io_context *) (rq)->elevator_private) | 53 | ((struct cfq_io_context *) (rq)->elevator_private) |
57 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) | 54 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) |
@@ -137,7 +134,6 @@ struct cfq_queue { | |||
137 | u64 seek_total; | 134 | u64 seek_total; |
138 | sector_t seek_mean; | 135 | sector_t seek_mean; |
139 | sector_t last_request_pos; | 136 | sector_t last_request_pos; |
140 | unsigned long seeky_start; | ||
141 | 137 | ||
142 | pid_t pid; | 138 | pid_t pid; |
143 | 139 | ||
@@ -314,6 +310,7 @@ enum cfqq_state_flags { | |||
314 | CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ | 310 | CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ |
315 | CFQ_CFQQ_FLAG_sync, /* synchronous queue */ | 311 | CFQ_CFQQ_FLAG_sync, /* synchronous queue */ |
316 | CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ | 312 | CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ |
313 | CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ | ||
317 | CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ | 314 | CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ |
318 | CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ | 315 | CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ |
319 | }; | 316 | }; |
@@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed); | |||
342 | CFQ_CFQQ_FNS(slice_new); | 339 | CFQ_CFQQ_FNS(slice_new); |
343 | CFQ_CFQQ_FNS(sync); | 340 | CFQ_CFQQ_FNS(sync); |
344 | CFQ_CFQQ_FNS(coop); | 341 | CFQ_CFQQ_FNS(coop); |
342 | CFQ_CFQQ_FNS(split_coop); | ||
345 | CFQ_CFQQ_FNS(deep); | 343 | CFQ_CFQQ_FNS(deep); |
346 | CFQ_CFQQ_FNS(wait_busy); | 344 | CFQ_CFQQ_FNS(wait_busy); |
347 | #undef CFQ_CFQQ_FNS | 345 | #undef CFQ_CFQQ_FNS |
@@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1566 | cfq_clear_cfqq_wait_busy(cfqq); | 1564 | cfq_clear_cfqq_wait_busy(cfqq); |
1567 | 1565 | ||
1568 | /* | 1566 | /* |
1567 | * If this cfqq is shared between multiple processes, check to | ||
1568 | * make sure that those processes are still issuing I/Os within | ||
1569 | * the mean seek distance. If not, it may be time to break the | ||
1570 | * queues apart again. | ||
1571 | */ | ||
1572 | if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) | ||
1573 | cfq_mark_cfqq_split_coop(cfqq); | ||
1574 | |||
1575 | /* | ||
1569 | * store what was left of this slice, if the queue idled/timed out | 1576 | * store what was left of this slice, if the queue idled/timed out |
1570 | */ | 1577 | */ |
1571 | if (timed_out && !cfq_cfqq_slice_new(cfqq)) { | 1578 | if (timed_out && !cfq_cfqq_slice_new(cfqq)) { |
@@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, | |||
1663 | return cfqd->last_position - blk_rq_pos(rq); | 1670 | return cfqd->last_position - blk_rq_pos(rq); |
1664 | } | 1671 | } |
1665 | 1672 | ||
1666 | #define CFQQ_SEEK_THR 8 * 1024 | ||
1667 | #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) | ||
1668 | |||
1669 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1673 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1670 | struct request *rq, bool for_preempt) | 1674 | struct request *rq, bool for_preempt) |
1671 | { | 1675 | { |
@@ -1803,7 +1807,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1803 | * Otherwise, we do only if they are the last ones | 1807 | * Otherwise, we do only if they are the last ones |
1804 | * in their service tree. | 1808 | * in their service tree. |
1805 | */ | 1809 | */ |
1806 | return service_tree->count == 1; | 1810 | return service_tree->count == 1 && cfq_cfqq_sync(cfqq); |
1807 | } | 1811 | } |
1808 | 1812 | ||
1809 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) | 1813 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
@@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3000 | total = cfqq->seek_total + (cfqq->seek_samples/2); | 3004 | total = cfqq->seek_total + (cfqq->seek_samples/2); |
3001 | do_div(total, cfqq->seek_samples); | 3005 | do_div(total, cfqq->seek_samples); |
3002 | cfqq->seek_mean = (sector_t)total; | 3006 | cfqq->seek_mean = (sector_t)total; |
3003 | |||
3004 | /* | ||
3005 | * If this cfqq is shared between multiple processes, check to | ||
3006 | * make sure that those processes are still issuing I/Os within | ||
3007 | * the mean seek distance. If not, it may be time to break the | ||
3008 | * queues apart again. | ||
3009 | */ | ||
3010 | if (cfq_cfqq_coop(cfqq)) { | ||
3011 | if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start) | ||
3012 | cfqq->seeky_start = jiffies; | ||
3013 | else if (!CFQQ_SEEKY(cfqq)) | ||
3014 | cfqq->seeky_start = 0; | ||
3015 | } | ||
3016 | } | 3007 | } |
3017 | 3008 | ||
3018 | /* | 3009 | /* |
@@ -3077,6 +3068,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
3077 | return true; | 3068 | return true; |
3078 | 3069 | ||
3079 | /* | 3070 | /* |
3071 | * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. | ||
3072 | */ | ||
3073 | if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) | ||
3074 | return false; | ||
3075 | |||
3076 | /* | ||
3080 | * if the new request is sync, but the currently running queue is | 3077 | * if the new request is sync, but the currently running queue is |
3081 | * not, let the sync request have priority. | 3078 | * not, let the sync request have priority. |
3082 | */ | 3079 | */ |
@@ -3447,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, | |||
3447 | return cic_to_cfqq(cic, 1); | 3444 | return cic_to_cfqq(cic, 1); |
3448 | } | 3445 | } |
3449 | 3446 | ||
3450 | static int should_split_cfqq(struct cfq_queue *cfqq) | ||
3451 | { | ||
3452 | if (cfqq->seeky_start && | ||
3453 | time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT)) | ||
3454 | return 1; | ||
3455 | return 0; | ||
3456 | } | ||
3457 | |||
3458 | /* | 3447 | /* |
3459 | * Returns NULL if a new cfqq should be allocated, or the old cfqq if this | 3448 | * Returns NULL if a new cfqq should be allocated, or the old cfqq if this |
3460 | * was the last process referring to said cfqq. | 3449 | * was the last process referring to said cfqq. |
@@ -3463,9 +3452,9 @@ static struct cfq_queue * | |||
3463 | split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) | 3452 | split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) |
3464 | { | 3453 | { |
3465 | if (cfqq_process_refs(cfqq) == 1) { | 3454 | if (cfqq_process_refs(cfqq) == 1) { |
3466 | cfqq->seeky_start = 0; | ||
3467 | cfqq->pid = current->pid; | 3455 | cfqq->pid = current->pid; |
3468 | cfq_clear_cfqq_coop(cfqq); | 3456 | cfq_clear_cfqq_coop(cfqq); |
3457 | cfq_clear_cfqq_split_coop(cfqq); | ||
3469 | return cfqq; | 3458 | return cfqq; |
3470 | } | 3459 | } |
3471 | 3460 | ||
@@ -3504,7 +3493,7 @@ new_queue: | |||
3504 | /* | 3493 | /* |
3505 | * If the queue was seeky for too long, break it apart. | 3494 | * If the queue was seeky for too long, break it apart. |
3506 | */ | 3495 | */ |
3507 | if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { | 3496 | if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { |
3508 | cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); | 3497 | cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); |
3509 | cfqq = split_cfqq(cic, cfqq); | 3498 | cfqq = split_cfqq(cic, cfqq); |
3510 | if (!cfqq) | 3499 | if (!cfqq) |