aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCorrado Zoccolo <czoccolo@gmail.com>2010-02-27 13:45:39 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-02-28 13:41:25 -0500
commit3dde36ddea3e07dd025c4c1ba47edec91606fec0 (patch)
tree0b8bbdb3fa9a08a3e4c6b0181a7b3325d578b111
parent6fc2de06ef1e691d032aa572478c905b7495a274 (diff)
cfq-iosched: rework seeky detection
Current seeky detection is based on average seek lenght. This is suboptimal, since the average will not distinguish between: * a process doing medium sized seeks * a process doing some sequential requests interleaved with larger seeks and even a medium seek can take lot of time, if the requested sector happens to be behind the disk head in the rotation (50% probability). Therefore, we change the seeky queue detection to work as follows: * each request can be classified as sequential if it is very close to the current head position, i.e. it is likely in the disk cache (disks usually read more data than requested, and put it in cache for subsequent reads). Otherwise, the request is classified as seeky. * an history window of the last 32 requests is kept, storing the classification result. * A queue is marked as seeky if more than 1/8 of the last 32 requests were seeky. This patch fixes a regression reported by Yanmin, on mmap 64k random reads. Reported-by: Yanmin Zhang <yanmin_zhang@linux.intel.com> Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/cfq-iosched.c54
1 files changed, 14 insertions, 40 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 10eb286f1f49..3fd8afc2174e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -46,8 +46,8 @@ static const int cfq_hist_divisor = 4;
46#define CFQ_HW_QUEUE_MIN (5) 46#define CFQ_HW_QUEUE_MIN (5)
47#define CFQ_SERVICE_SHIFT 12 47#define CFQ_SERVICE_SHIFT 12
48 48
49#define CFQQ_SEEK_THR 8 * 1024 49#define CFQQ_SEEK_THR (sector_t)(8 * 100)
50#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) 50#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
51 51
52#define RQ_CIC(rq) \ 52#define RQ_CIC(rq) \
53 ((struct cfq_io_context *) (rq)->elevator_private) 53 ((struct cfq_io_context *) (rq)->elevator_private)
@@ -132,9 +132,7 @@ struct cfq_queue {
132 132
133 pid_t pid; 133 pid_t pid;
134 134
135 unsigned int seek_samples; 135 u32 seek_history;
136 u64 seek_total;
137 sector_t seek_mean;
138 sector_t last_request_pos; 136 sector_t last_request_pos;
139 137
140 struct cfq_rb_root *service_tree; 138 struct cfq_rb_root *service_tree;
@@ -1668,16 +1666,7 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1668static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1666static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1669 struct request *rq, bool for_preempt) 1667 struct request *rq, bool for_preempt)
1670{ 1668{
1671 sector_t sdist = cfqq->seek_mean; 1669 return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR;
1672
1673 if (!sample_valid(cfqq->seek_samples))
1674 sdist = CFQQ_SEEK_THR;
1675
1676 /* if seek_mean is big, using it as close criteria is meaningless */
1677 if (sdist > CFQQ_SEEK_THR && !for_preempt)
1678 sdist = CFQQ_SEEK_THR;
1679
1680 return cfq_dist_from_last(cfqd, rq) <= sdist;
1681} 1670}
1682 1671
1683static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, 1672static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
@@ -2975,30 +2964,16 @@ static void
2975cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2964cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2976 struct request *rq) 2965 struct request *rq)
2977{ 2966{
2978 sector_t sdist; 2967 sector_t sdist = 0;
2979 u64 total; 2968 if (cfqq->last_request_pos) {
2980 2969 if (cfqq->last_request_pos < blk_rq_pos(rq))
2981 if (!cfqq->last_request_pos) 2970 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2982 sdist = 0; 2971 else
2983 else if (cfqq->last_request_pos < blk_rq_pos(rq)) 2972 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2984 sdist = blk_rq_pos(rq) - cfqq->last_request_pos; 2973 }
2985 else
2986 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2987
2988 /*
2989 * Don't allow the seek distance to get too large from the
2990 * odd fragment, pagein, etc
2991 */
2992 if (cfqq->seek_samples <= 60) /* second&third seek */
2993 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
2994 else
2995 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
2996 2974
2997 cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8; 2975 cfqq->seek_history <<= 1;
2998 cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8; 2976 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
2999 total = cfqq->seek_total + (cfqq->seek_samples/2);
3000 do_div(total, cfqq->seek_samples);
3001 cfqq->seek_mean = (sector_t)total;
3002} 2977}
3003 2978
3004/* 2979/*
@@ -3023,8 +2998,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3023 cfq_mark_cfqq_deep(cfqq); 2998 cfq_mark_cfqq_deep(cfqq);
3024 2999
3025 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 3000 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3026 (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples) 3001 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3027 && CFQQ_SEEKY(cfqq)))
3028 enable_idle = 0; 3002 enable_idle = 0;
3029 else if (sample_valid(cic->ttime_samples)) { 3003 else if (sample_valid(cic->ttime_samples)) {
3030 if (cic->ttime_mean > cfqd->cfq_slice_idle) 3004 if (cic->ttime_mean > cfqd->cfq_slice_idle)