diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-07-10 07:43:25 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-07-10 07:43:25 -0400 |
commit | 15c31be4d5bd2402c6f5a288d56a24edc9252b71 (patch) | |
tree | 0fca6e97186080d83ff3f36bb359bcb4ef06a9e2 | |
parent | 72d3a38ee083a96c09032e608a4c7e047ce26760 (diff) |
cfq-iosched: fix async queue behaviour
With the cfq_queue hash removal, we inadvertently got rid of the
async queue sharing. This was not intentional, in fact CFQ purposely
shares the async queue per priority level to get good merging for
async writes.
So put some logic in cfq_get_queue() to track the shared queues.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/cfq-iosched.c | 39 | ||||
-rw-r--r-- | include/linux/ioprio.h | 6 |
2 files changed, 40 insertions, 5 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index baef5fc7cff8..e0aa4dad6742 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -92,6 +92,8 @@ struct cfq_data { | |||
92 | struct cfq_queue *active_queue; | 92 | struct cfq_queue *active_queue; |
93 | struct cfq_io_context *active_cic; | 93 | struct cfq_io_context *active_cic; |
94 | 94 | ||
95 | struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; | ||
96 | |||
95 | struct timer_list idle_class_timer; | 97 | struct timer_list idle_class_timer; |
96 | 98 | ||
97 | sector_t last_position; | 99 | sector_t last_position; |
@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1351 | } | 1353 | } |
1352 | 1354 | ||
1353 | static struct cfq_queue * | 1355 | static struct cfq_queue * |
1354 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | 1356 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
1355 | gfp_t gfp_mask) | 1357 | struct task_struct *tsk, gfp_t gfp_mask) |
1356 | { | 1358 | { |
1357 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1359 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
1358 | struct cfq_io_context *cic; | 1360 | struct cfq_io_context *cic; |
@@ -1405,12 +1407,35 @@ retry: | |||
1405 | if (new_cfqq) | 1407 | if (new_cfqq) |
1406 | kmem_cache_free(cfq_pool, new_cfqq); | 1408 | kmem_cache_free(cfq_pool, new_cfqq); |
1407 | 1409 | ||
1408 | atomic_inc(&cfqq->ref); | ||
1409 | out: | 1410 | out: |
1410 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); | 1411 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); |
1411 | return cfqq; | 1412 | return cfqq; |
1412 | } | 1413 | } |
1413 | 1414 | ||
1415 | static struct cfq_queue * | ||
1416 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | ||
1417 | gfp_t gfp_mask) | ||
1418 | { | ||
1419 | const int ioprio = task_ioprio(tsk); | ||
1420 | struct cfq_queue *cfqq = NULL; | ||
1421 | |||
1422 | if (!is_sync) | ||
1423 | cfqq = cfqd->async_cfqq[ioprio]; | ||
1424 | if (!cfqq) | ||
1425 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); | ||
1426 | |||
1427 | /* | ||
1428 | * pin the queue now that it's allocated, scheduler exit will prune it | ||
1429 | */ | ||
1430 | if (!is_sync && !cfqd->async_cfqq[ioprio]) { | ||
1431 | atomic_inc(&cfqq->ref); | ||
1432 | cfqd->async_cfqq[ioprio] = cfqq; | ||
1433 | } | ||
1434 | |||
1435 | atomic_inc(&cfqq->ref); | ||
1436 | return cfqq; | ||
1437 | } | ||
1438 | |||
1414 | /* | 1439 | /* |
1415 | * We drop cfq io contexts lazily, so we may find a dead one. | 1440 | * We drop cfq io contexts lazily, so we may find a dead one. |
1416 | */ | 1441 | */ |
@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e) | |||
2019 | { | 2044 | { |
2020 | struct cfq_data *cfqd = e->elevator_data; | 2045 | struct cfq_data *cfqd = e->elevator_data; |
2021 | request_queue_t *q = cfqd->queue; | 2046 | request_queue_t *q = cfqd->queue; |
2047 | int i; | ||
2022 | 2048 | ||
2023 | cfq_shutdown_timer_wq(cfqd); | 2049 | cfq_shutdown_timer_wq(cfqd); |
2024 | 2050 | ||
@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e) | |||
2035 | __cfq_exit_single_io_context(cfqd, cic); | 2061 | __cfq_exit_single_io_context(cfqd, cic); |
2036 | } | 2062 | } |
2037 | 2063 | ||
2064 | /* | ||
2065 | * Put the async queues | ||
2066 | */ | ||
2067 | for (i = 0; i < IOPRIO_BE_NR; i++) | ||
2068 | if (cfqd->async_cfqq[i]) | ||
2069 | cfq_put_queue(cfqd->async_cfqq[i]); | ||
2070 | |||
2038 | spin_unlock_irq(q->queue_lock); | 2071 | spin_unlock_irq(q->queue_lock); |
2039 | 2072 | ||
2040 | cfq_shutdown_timer_wq(cfqd); | 2073 | cfq_shutdown_timer_wq(cfqd); |
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 8e2042b9d471..2eaa142cd061 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h | |||
@@ -47,8 +47,10 @@ enum { | |||
47 | #define IOPRIO_NORM (4) | 47 | #define IOPRIO_NORM (4) |
48 | static inline int task_ioprio(struct task_struct *task) | 48 | static inline int task_ioprio(struct task_struct *task) |
49 | { | 49 | { |
50 | WARN_ON(!ioprio_valid(task->ioprio)); | 50 | if (ioprio_valid(task->ioprio)) |
51 | return IOPRIO_PRIO_DATA(task->ioprio); | 51 | return IOPRIO_PRIO_DATA(task->ioprio); |
52 | |||
53 | return IOPRIO_NORM; | ||
52 | } | 54 | } |
53 | 55 | ||
54 | static inline int task_nice_ioprio(struct task_struct *task) | 56 | static inline int task_nice_ioprio(struct task_struct *task) |