aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-03-28 01:59:01 -0500
committerJens Axboe <axboe@suse.de>2006-03-28 01:59:01 -0500
commite2d74ac0664c89757bde8fb18c98cd7bf53da61c (patch)
tree1e858044a9180766eae4ec694d4200c4ae850406 /block/ll_rw_blk.c
parent329b10bb0feacb7fb9a41389313ff0a51ae56f2a (diff)
[PATCH] [BLOCK] cfq-iosched: change cfq io context linking from list to tree
On setups with many disks, we spend a considerable amount of time looking up the process-disk mapping on each queue of io. Testing with a NULL based block driver, this costs 40-50% reduction in throughput for 1000 disks. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 82469db25100..cb608768ca37 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3539,11 +3539,15 @@ void put_io_context(struct io_context *ioc)
3539 BUG_ON(atomic_read(&ioc->refcount) == 0); 3539 BUG_ON(atomic_read(&ioc->refcount) == 0);
3540 3540
3541 if (atomic_dec_and_test(&ioc->refcount)) { 3541 if (atomic_dec_and_test(&ioc->refcount)) {
3542 struct cfq_io_context *cic;
3543
3542 rcu_read_lock(); 3544 rcu_read_lock();
3543 if (ioc->aic && ioc->aic->dtor) 3545 if (ioc->aic && ioc->aic->dtor)
3544 ioc->aic->dtor(ioc->aic); 3546 ioc->aic->dtor(ioc->aic);
3545 if (ioc->cic && ioc->cic->dtor) 3547 if (ioc->cic_root.rb_node != NULL) {
3546 ioc->cic->dtor(ioc->cic); 3548 cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
3549 cic->dtor(ioc);
3550 }
3547 rcu_read_unlock(); 3551 rcu_read_unlock();
3548 3552
3549 kmem_cache_free(iocontext_cachep, ioc); 3553 kmem_cache_free(iocontext_cachep, ioc);
@@ -3556,6 +3560,7 @@ void exit_io_context(void)
3556{ 3560{
3557 unsigned long flags; 3561 unsigned long flags;
3558 struct io_context *ioc; 3562 struct io_context *ioc;
3563 struct cfq_io_context *cic;
3559 3564
3560 local_irq_save(flags); 3565 local_irq_save(flags);
3561 task_lock(current); 3566 task_lock(current);
@@ -3567,9 +3572,11 @@ void exit_io_context(void)
3567 3572
3568 if (ioc->aic && ioc->aic->exit) 3573 if (ioc->aic && ioc->aic->exit)
3569 ioc->aic->exit(ioc->aic); 3574 ioc->aic->exit(ioc->aic);
3570 if (ioc->cic && ioc->cic->exit) 3575 if (ioc->cic_root.rb_node != NULL) {
3571 ioc->cic->exit(ioc->cic); 3576 cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
3572 3577 cic->exit(ioc);
3578 }
3579
3573 put_io_context(ioc); 3580 put_io_context(ioc);
3574} 3581}
3575 3582
@@ -3598,7 +3605,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
3598 ret->last_waited = jiffies; /* doesn't matter... */ 3605 ret->last_waited = jiffies; /* doesn't matter... */
3599 ret->nr_batch_requests = 0; /* because this is 0 */ 3606 ret->nr_batch_requests = 0; /* because this is 0 */
3600 ret->aic = NULL; 3607 ret->aic = NULL;
3601 ret->cic = NULL; 3608 ret->cic_root.rb_node = NULL;
3602 tsk->io_context = ret; 3609 tsk->io_context = ret;
3603 } 3610 }
3604 3611