diff options
author | Jeff Moyer <jmoyer@redhat.com> | 2009-10-23 17:14:49 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-10-26 09:34:46 -0400 |
commit | b2c18e1e08a5a9663094d57bb4be2f02226ee61c (patch) | |
tree | 55c092706a0d8a9e645f245bf2ecc45c29ba5aa7 /block | |
parent | c30f33437c3f85ec48353a1ef811e148217a2aaf (diff) |
cfq: calculate the seek_mean per cfq_queue not per cfq_io_context
async cfq_queue's are already shared between processes within the same
priority, and forthcoming patches will change the mapping of cic to sync
cfq_queue from 1:1 to 1:N. So, calculate the seekiness of a process
based on the cfq_queue instead of the cfq_io_context.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 68 |
1 files changed, 33 insertions, 35 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 069a61017c02..78cc8ee5da41 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -112,6 +112,11 @@ struct cfq_queue { | |||
112 | unsigned short ioprio, org_ioprio; | 112 | unsigned short ioprio, org_ioprio; |
113 | unsigned short ioprio_class, org_ioprio_class; | 113 | unsigned short ioprio_class, org_ioprio_class; |
114 | 114 | ||
115 | unsigned int seek_samples; | ||
116 | u64 seek_total; | ||
117 | sector_t seek_mean; | ||
118 | sector_t last_request_pos; | ||
119 | |||
115 | pid_t pid; | 120 | pid_t pid; |
116 | }; | 121 | }; |
117 | 122 | ||
@@ -962,16 +967,16 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, | |||
962 | return cfqd->last_position - blk_rq_pos(rq); | 967 | return cfqd->last_position - blk_rq_pos(rq); |
963 | } | 968 | } |
964 | 969 | ||
965 | #define CIC_SEEK_THR 8 * 1024 | 970 | #define CFQQ_SEEK_THR 8 * 1024 |
966 | #define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR) | 971 | #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) |
967 | 972 | ||
968 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) | 973 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
974 | struct request *rq) | ||
969 | { | 975 | { |
970 | struct cfq_io_context *cic = cfqd->active_cic; | 976 | sector_t sdist = cfqq->seek_mean; |
971 | sector_t sdist = cic->seek_mean; | ||
972 | 977 | ||
973 | if (!sample_valid(cic->seek_samples)) | 978 | if (!sample_valid(cfqq->seek_samples)) |
974 | sdist = CIC_SEEK_THR; | 979 | sdist = CFQQ_SEEK_THR; |
975 | 980 | ||
976 | return cfq_dist_from_last(cfqd, rq) <= sdist; | 981 | return cfq_dist_from_last(cfqd, rq) <= sdist; |
977 | } | 982 | } |
@@ -1000,7 +1005,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
1000 | * will contain the closest sector. | 1005 | * will contain the closest sector. |
1001 | */ | 1006 | */ |
1002 | __cfqq = rb_entry(parent, struct cfq_queue, p_node); | 1007 | __cfqq = rb_entry(parent, struct cfq_queue, p_node); |
1003 | if (cfq_rq_close(cfqd, __cfqq->next_rq)) | 1008 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) |
1004 | return __cfqq; | 1009 | return __cfqq; |
1005 | 1010 | ||
1006 | if (blk_rq_pos(__cfqq->next_rq) < sector) | 1011 | if (blk_rq_pos(__cfqq->next_rq) < sector) |
@@ -1011,7 +1016,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
1011 | return NULL; | 1016 | return NULL; |
1012 | 1017 | ||
1013 | __cfqq = rb_entry(node, struct cfq_queue, p_node); | 1018 | __cfqq = rb_entry(node, struct cfq_queue, p_node); |
1014 | if (cfq_rq_close(cfqd, __cfqq->next_rq)) | 1019 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) |
1015 | return __cfqq; | 1020 | return __cfqq; |
1016 | 1021 | ||
1017 | return NULL; | 1022 | return NULL; |
@@ -1034,13 +1039,6 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | |||
1034 | struct cfq_queue *cfqq; | 1039 | struct cfq_queue *cfqq; |
1035 | 1040 | ||
1036 | /* | 1041 | /* |
1037 | * A valid cfq_io_context is necessary to compare requests against | ||
1038 | * the seek_mean of the current cfqq. | ||
1039 | */ | ||
1040 | if (!cfqd->active_cic) | ||
1041 | return NULL; | ||
1042 | |||
1043 | /* | ||
1044 | * We should notice if some of the queues are cooperating, eg | 1042 | * We should notice if some of the queues are cooperating, eg |
1045 | * working closely on the same area of the disk. In that case, | 1043 | * working closely on the same area of the disk. In that case, |
1046 | * we can group them together and don't waste time idling. | 1044 | * we can group them together and don't waste time idling. |
@@ -1110,7 +1108,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1110 | * seeks. so allow a little bit of time for him to submit a new rq | 1108 | * seeks. so allow a little bit of time for him to submit a new rq |
1111 | */ | 1109 | */ |
1112 | sl = cfqd->cfq_slice_idle; | 1110 | sl = cfqd->cfq_slice_idle; |
1113 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) | 1111 | if (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq)) |
1114 | sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); | 1112 | sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); |
1115 | 1113 | ||
1116 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 1114 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
@@ -1947,33 +1945,33 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) | |||
1947 | } | 1945 | } |
1948 | 1946 | ||
1949 | static void | 1947 | static void |
1950 | cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, | 1948 | cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1951 | struct request *rq) | 1949 | struct request *rq) |
1952 | { | 1950 | { |
1953 | sector_t sdist; | 1951 | sector_t sdist; |
1954 | u64 total; | 1952 | u64 total; |
1955 | 1953 | ||
1956 | if (!cic->last_request_pos) | 1954 | if (!cfqq->last_request_pos) |
1957 | sdist = 0; | 1955 | sdist = 0; |
1958 | else if (cic->last_request_pos < blk_rq_pos(rq)) | 1956 | else if (cfqq->last_request_pos < blk_rq_pos(rq)) |
1959 | sdist = blk_rq_pos(rq) - cic->last_request_pos; | 1957 | sdist = blk_rq_pos(rq) - cfqq->last_request_pos; |
1960 | else | 1958 | else |
1961 | sdist = cic->last_request_pos - blk_rq_pos(rq); | 1959 | sdist = cfqq->last_request_pos - blk_rq_pos(rq); |
1962 | 1960 | ||
1963 | /* | 1961 | /* |
1964 | * Don't allow the seek distance to get too large from the | 1962 | * Don't allow the seek distance to get too large from the |
1965 | * odd fragment, pagein, etc | 1963 | * odd fragment, pagein, etc |
1966 | */ | 1964 | */ |
1967 | if (cic->seek_samples <= 60) /* second&third seek */ | 1965 | if (cfqq->seek_samples <= 60) /* second&third seek */ |
1968 | sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); | 1966 | sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024); |
1969 | else | 1967 | else |
1970 | sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); | 1968 | sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64); |
1971 | 1969 | ||
1972 | cic->seek_samples = (7*cic->seek_samples + 256) / 8; | 1970 | cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8; |
1973 | cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; | 1971 | cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8; |
1974 | total = cic->seek_total + (cic->seek_samples/2); | 1972 | total = cfqq->seek_total + (cfqq->seek_samples/2); |
1975 | do_div(total, cic->seek_samples); | 1973 | do_div(total, cfqq->seek_samples); |
1976 | cic->seek_mean = (sector_t)total; | 1974 | cfqq->seek_mean = (sector_t)total; |
1977 | } | 1975 | } |
1978 | 1976 | ||
1979 | /* | 1977 | /* |
@@ -1995,11 +1993,11 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1995 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); | 1993 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
1996 | 1994 | ||
1997 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 1995 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
1998 | (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) | 1996 | (!cfqd->cfq_latency && cfqd->hw_tag && CFQQ_SEEKY(cfqq))) |
1999 | enable_idle = 0; | 1997 | enable_idle = 0; |
2000 | else if (sample_valid(cic->ttime_samples)) { | 1998 | else if (sample_valid(cic->ttime_samples)) { |
2001 | unsigned int slice_idle = cfqd->cfq_slice_idle; | 1999 | unsigned int slice_idle = cfqd->cfq_slice_idle; |
2002 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) | 2000 | if (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq)) |
2003 | slice_idle = msecs_to_jiffies(CFQ_MIN_TT); | 2001 | slice_idle = msecs_to_jiffies(CFQ_MIN_TT); |
2004 | if (cic->ttime_mean > slice_idle) | 2002 | if (cic->ttime_mean > slice_idle) |
2005 | enable_idle = 0; | 2003 | enable_idle = 0; |
@@ -2066,7 +2064,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
2066 | * if this request is as-good as one we would expect from the | 2064 | * if this request is as-good as one we would expect from the |
2067 | * current cfqq, let it preempt | 2065 | * current cfqq, let it preempt |
2068 | */ | 2066 | */ |
2069 | if (cfq_rq_close(cfqd, rq)) | 2067 | if (cfq_rq_close(cfqd, cfqq, rq)) |
2070 | return true; | 2068 | return true; |
2071 | 2069 | ||
2072 | return false; | 2070 | return false; |
@@ -2108,10 +2106,10 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2108 | cfqq->meta_pending++; | 2106 | cfqq->meta_pending++; |
2109 | 2107 | ||
2110 | cfq_update_io_thinktime(cfqd, cic); | 2108 | cfq_update_io_thinktime(cfqd, cic); |
2111 | cfq_update_io_seektime(cfqd, cic, rq); | 2109 | cfq_update_io_seektime(cfqd, cfqq, rq); |
2112 | cfq_update_idle_window(cfqd, cfqq, cic); | 2110 | cfq_update_idle_window(cfqd, cfqq, cic); |
2113 | 2111 | ||
2114 | cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); | 2112 | cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); |
2115 | 2113 | ||
2116 | if (cfqq == cfqd->active_queue) { | 2114 | if (cfqq == cfqd->active_queue) { |
2117 | /* | 2115 | /* |