diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-01-24 02:44:49 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 04:50:33 -0500 |
commit | 4ac845a2e9a816ed5a7b301f56dcc0a3d0b1ba4d (patch) | |
tree | 602f15808d0f3dcdfcd7cc4491b2cc2ccd266fd2 /block/ll_rw_blk.c | |
parent | 66dac98ed0de7a1125fb0dd7907f238f6b9d2f60 (diff) |
block: cfq: make the io contect sharing lockless
The io context sharing introduced a per-ioc spinlock, that would protect
the cfq io context lookup. That is a regression from the original, since
we never needed any locking there because the ioc/cic were process private.
The cic lookup is changed from an rbtree construct to a radix tree, which
we can then use RCU to make the reader side lockless. That is the performance
critical path, modifying the radix tree is only done on process creation
(when that process first does IO, actually) and on process exit (if that
process has done IO).
As it so happens, radix trees are also much faster for this type of
lookup where the key is a pointer. It's a very sparse tree.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 49 |
1 files changed, 34 insertions, 15 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index d4550ecae443..b901db63f6ae 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -3853,6 +3853,21 @@ int __init blk_dev_init(void) | |||
3853 | return 0; | 3853 | return 0; |
3854 | } | 3854 | } |
3855 | 3855 | ||
3856 | static void cfq_dtor(struct io_context *ioc) | ||
3857 | { | ||
3858 | struct cfq_io_context *cic[1]; | ||
3859 | int r; | ||
3860 | |||
3861 | /* | ||
3862 | * We don't have a specific key to lookup with, so use the gang | ||
3863 | * lookup to just retrieve the first item stored. The cfq exit | ||
3864 | * function will iterate the full tree, so any member will do. | ||
3865 | */ | ||
3866 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
3867 | if (r > 0) | ||
3868 | cic[0]->dtor(ioc); | ||
3869 | } | ||
3870 | |||
3856 | /* | 3871 | /* |
3857 | * IO Context helper functions. put_io_context() returns 1 if there are no | 3872 | * IO Context helper functions. put_io_context() returns 1 if there are no |
3858 | * more users of this io context, 0 otherwise. | 3873 | * more users of this io context, 0 otherwise. |
@@ -3865,18 +3880,11 @@ int put_io_context(struct io_context *ioc) | |||
3865 | BUG_ON(atomic_read(&ioc->refcount) == 0); | 3880 | BUG_ON(atomic_read(&ioc->refcount) == 0); |
3866 | 3881 | ||
3867 | if (atomic_dec_and_test(&ioc->refcount)) { | 3882 | if (atomic_dec_and_test(&ioc->refcount)) { |
3868 | struct cfq_io_context *cic; | ||
3869 | |||
3870 | rcu_read_lock(); | 3883 | rcu_read_lock(); |
3871 | if (ioc->aic && ioc->aic->dtor) | 3884 | if (ioc->aic && ioc->aic->dtor) |
3872 | ioc->aic->dtor(ioc->aic); | 3885 | ioc->aic->dtor(ioc->aic); |
3873 | if (ioc->cic_root.rb_node != NULL) { | ||
3874 | struct rb_node *n = rb_first(&ioc->cic_root); | ||
3875 | |||
3876 | cic = rb_entry(n, struct cfq_io_context, rb_node); | ||
3877 | cic->dtor(ioc); | ||
3878 | } | ||
3879 | rcu_read_unlock(); | 3886 | rcu_read_unlock(); |
3887 | cfq_dtor(ioc); | ||
3880 | 3888 | ||
3881 | kmem_cache_free(iocontext_cachep, ioc); | 3889 | kmem_cache_free(iocontext_cachep, ioc); |
3882 | return 1; | 3890 | return 1; |
@@ -3885,11 +3893,26 @@ int put_io_context(struct io_context *ioc) | |||
3885 | } | 3893 | } |
3886 | EXPORT_SYMBOL(put_io_context); | 3894 | EXPORT_SYMBOL(put_io_context); |
3887 | 3895 | ||
3896 | static void cfq_exit(struct io_context *ioc) | ||
3897 | { | ||
3898 | struct cfq_io_context *cic[1]; | ||
3899 | int r; | ||
3900 | |||
3901 | rcu_read_lock(); | ||
3902 | /* | ||
3903 | * See comment for cfq_dtor() | ||
3904 | */ | ||
3905 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
3906 | rcu_read_unlock(); | ||
3907 | |||
3908 | if (r > 0) | ||
3909 | cic[0]->exit(ioc); | ||
3910 | } | ||
3911 | |||
3888 | /* Called by the exitting task */ | 3912 | /* Called by the exitting task */ |
3889 | void exit_io_context(void) | 3913 | void exit_io_context(void) |
3890 | { | 3914 | { |
3891 | struct io_context *ioc; | 3915 | struct io_context *ioc; |
3892 | struct cfq_io_context *cic; | ||
3893 | 3916 | ||
3894 | task_lock(current); | 3917 | task_lock(current); |
3895 | ioc = current->io_context; | 3918 | ioc = current->io_context; |
@@ -3899,11 +3922,7 @@ void exit_io_context(void) | |||
3899 | if (atomic_dec_and_test(&ioc->nr_tasks)) { | 3922 | if (atomic_dec_and_test(&ioc->nr_tasks)) { |
3900 | if (ioc->aic && ioc->aic->exit) | 3923 | if (ioc->aic && ioc->aic->exit) |
3901 | ioc->aic->exit(ioc->aic); | 3924 | ioc->aic->exit(ioc->aic); |
3902 | if (ioc->cic_root.rb_node != NULL) { | 3925 | cfq_exit(ioc); |
3903 | cic = rb_entry(rb_first(&ioc->cic_root), | ||
3904 | struct cfq_io_context, rb_node); | ||
3905 | cic->exit(ioc); | ||
3906 | } | ||
3907 | 3926 | ||
3908 | put_io_context(ioc); | 3927 | put_io_context(ioc); |
3909 | } | 3928 | } |
@@ -3923,7 +3942,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | |||
3923 | ret->last_waited = jiffies; /* doesn't matter... */ | 3942 | ret->last_waited = jiffies; /* doesn't matter... */ |
3924 | ret->nr_batch_requests = 0; /* because this is 0 */ | 3943 | ret->nr_batch_requests = 0; /* because this is 0 */ |
3925 | ret->aic = NULL; | 3944 | ret->aic = NULL; |
3926 | ret->cic_root.rb_node = NULL; | 3945 | INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); |
3927 | ret->ioc_data = NULL; | 3946 | ret->ioc_data = NULL; |
3928 | } | 3947 | } |
3929 | 3948 | ||