diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 39 |
1 files changed, 36 insertions, 3 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index baef5fc7cff8..e0aa4dad6742 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -92,6 +92,8 @@ struct cfq_data { | |||
92 | struct cfq_queue *active_queue; | 92 | struct cfq_queue *active_queue; |
93 | struct cfq_io_context *active_cic; | 93 | struct cfq_io_context *active_cic; |
94 | 94 | ||
95 | struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; | ||
96 | |||
95 | struct timer_list idle_class_timer; | 97 | struct timer_list idle_class_timer; |
96 | 98 | ||
97 | sector_t last_position; | 99 | sector_t last_position; |
@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1351 | } | 1353 | } |
1352 | 1354 | ||
1353 | static struct cfq_queue * | 1355 | static struct cfq_queue * |
1354 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | 1356 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
1355 | gfp_t gfp_mask) | 1357 | struct task_struct *tsk, gfp_t gfp_mask) |
1356 | { | 1358 | { |
1357 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1359 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
1358 | struct cfq_io_context *cic; | 1360 | struct cfq_io_context *cic; |
@@ -1405,12 +1407,35 @@ retry: | |||
1405 | if (new_cfqq) | 1407 | if (new_cfqq) |
1406 | kmem_cache_free(cfq_pool, new_cfqq); | 1408 | kmem_cache_free(cfq_pool, new_cfqq); |
1407 | 1409 | ||
1408 | atomic_inc(&cfqq->ref); | ||
1409 | out: | 1410 | out: |
1410 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); | 1411 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); |
1411 | return cfqq; | 1412 | return cfqq; |
1412 | } | 1413 | } |
1413 | 1414 | ||
1415 | static struct cfq_queue * | ||
1416 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | ||
1417 | gfp_t gfp_mask) | ||
1418 | { | ||
1419 | const int ioprio = task_ioprio(tsk); | ||
1420 | struct cfq_queue *cfqq = NULL; | ||
1421 | |||
1422 | if (!is_sync) | ||
1423 | cfqq = cfqd->async_cfqq[ioprio]; | ||
1424 | if (!cfqq) | ||
1425 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); | ||
1426 | |||
1427 | /* | ||
1428 | * pin the queue now that it's allocated, scheduler exit will prune it | ||
1429 | */ | ||
1430 | if (!is_sync && !cfqd->async_cfqq[ioprio]) { | ||
1431 | atomic_inc(&cfqq->ref); | ||
1432 | cfqd->async_cfqq[ioprio] = cfqq; | ||
1433 | } | ||
1434 | |||
1435 | atomic_inc(&cfqq->ref); | ||
1436 | return cfqq; | ||
1437 | } | ||
1438 | |||
1414 | /* | 1439 | /* |
1415 | * We drop cfq io contexts lazily, so we may find a dead one. | 1440 | * We drop cfq io contexts lazily, so we may find a dead one. |
1416 | */ | 1441 | */ |
@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e) | |||
2019 | { | 2044 | { |
2020 | struct cfq_data *cfqd = e->elevator_data; | 2045 | struct cfq_data *cfqd = e->elevator_data; |
2021 | request_queue_t *q = cfqd->queue; | 2046 | request_queue_t *q = cfqd->queue; |
2047 | int i; | ||
2022 | 2048 | ||
2023 | cfq_shutdown_timer_wq(cfqd); | 2049 | cfq_shutdown_timer_wq(cfqd); |
2024 | 2050 | ||
@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e) | |||
2035 | __cfq_exit_single_io_context(cfqd, cic); | 2061 | __cfq_exit_single_io_context(cfqd, cic); |
2036 | } | 2062 | } |
2037 | 2063 | ||
2064 | /* | ||
2065 | * Put the async queues | ||
2066 | */ | ||
2067 | for (i = 0; i < IOPRIO_BE_NR; i++) | ||
2068 | if (cfqd->async_cfqq[i]) | ||
2069 | cfq_put_queue(cfqd->async_cfqq[i]); | ||
2070 | |||
2038 | spin_unlock_irq(q->queue_lock); | 2071 | spin_unlock_irq(q->queue_lock); |
2039 | 2072 | ||
2040 | cfq_shutdown_timer_wq(cfqd); | 2073 | cfq_shutdown_timer_wq(cfqd); |