diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-01-24 02:53:35 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 04:50:31 -0500 |
commit | d38ecf935fcb10264a6bc190855d9595165e6eeb (patch) | |
tree | 64e3146ef76678ad3ae8f75c32df9f25ea470953 /block | |
parent | fd0928df98b9578be8a786ac0cb78a47a5e17a20 (diff) |
io context sharing: preliminary support
Detach task state from ioc, instead keep track of how many processes
are accessing the ioc.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 46 |
1 files changed, 30 insertions, 16 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b9bb02e845cd..d4550ecae443 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -3854,12 +3854,13 @@ int __init blk_dev_init(void) | |||
3854 | } | 3854 | } |
3855 | 3855 | ||
3856 | /* | 3856 | /* |
3857 | * IO Context helper functions | 3857 | * IO Context helper functions. put_io_context() returns 1 if there are no |
3858 | * more users of this io context, 0 otherwise. | ||
3858 | */ | 3859 | */ |
3859 | void put_io_context(struct io_context *ioc) | 3860 | int put_io_context(struct io_context *ioc) |
3860 | { | 3861 | { |
3861 | if (ioc == NULL) | 3862 | if (ioc == NULL) |
3862 | return; | 3863 | return 1; |
3863 | 3864 | ||
3864 | BUG_ON(atomic_read(&ioc->refcount) == 0); | 3865 | BUG_ON(atomic_read(&ioc->refcount) == 0); |
3865 | 3866 | ||
@@ -3878,7 +3879,9 @@ void put_io_context(struct io_context *ioc) | |||
3878 | rcu_read_unlock(); | 3879 | rcu_read_unlock(); |
3879 | 3880 | ||
3880 | kmem_cache_free(iocontext_cachep, ioc); | 3881 | kmem_cache_free(iocontext_cachep, ioc); |
3882 | return 1; | ||
3881 | } | 3883 | } |
3884 | return 0; | ||
3882 | } | 3885 | } |
3883 | EXPORT_SYMBOL(put_io_context); | 3886 | EXPORT_SYMBOL(put_io_context); |
3884 | 3887 | ||
@@ -3893,15 +3896,17 @@ void exit_io_context(void) | |||
3893 | current->io_context = NULL; | 3896 | current->io_context = NULL; |
3894 | task_unlock(current); | 3897 | task_unlock(current); |
3895 | 3898 | ||
3896 | ioc->task = NULL; | 3899 | if (atomic_dec_and_test(&ioc->nr_tasks)) { |
3897 | if (ioc->aic && ioc->aic->exit) | 3900 | if (ioc->aic && ioc->aic->exit) |
3898 | ioc->aic->exit(ioc->aic); | 3901 | ioc->aic->exit(ioc->aic); |
3899 | if (ioc->cic_root.rb_node != NULL) { | 3902 | if (ioc->cic_root.rb_node != NULL) { |
3900 | cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); | 3903 | cic = rb_entry(rb_first(&ioc->cic_root), |
3901 | cic->exit(ioc); | 3904 | struct cfq_io_context, rb_node); |
3902 | } | 3905 | cic->exit(ioc); |
3906 | } | ||
3903 | 3907 | ||
3904 | put_io_context(ioc); | 3908 | put_io_context(ioc); |
3909 | } | ||
3905 | } | 3910 | } |
3906 | 3911 | ||
3907 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | 3912 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) |
@@ -3911,7 +3916,8 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | |||
3911 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | 3916 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); |
3912 | if (ret) { | 3917 | if (ret) { |
3913 | atomic_set(&ret->refcount, 1); | 3918 | atomic_set(&ret->refcount, 1); |
3914 | ret->task = current; | 3919 | atomic_set(&ret->nr_tasks, 1); |
3920 | spin_lock_init(&ret->lock); | ||
3915 | ret->ioprio_changed = 0; | 3921 | ret->ioprio_changed = 0; |
3916 | ret->ioprio = 0; | 3922 | ret->ioprio = 0; |
3917 | ret->last_waited = jiffies; /* doesn't matter... */ | 3923 | ret->last_waited = jiffies; /* doesn't matter... */ |
@@ -3959,10 +3965,18 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node) | |||
3959 | */ | 3965 | */ |
3960 | struct io_context *get_io_context(gfp_t gfp_flags, int node) | 3966 | struct io_context *get_io_context(gfp_t gfp_flags, int node) |
3961 | { | 3967 | { |
3962 | struct io_context *ret; | 3968 | struct io_context *ret = NULL; |
3963 | ret = current_io_context(gfp_flags, node); | 3969 | |
3964 | if (likely(ret)) | 3970 | /* |
3965 | atomic_inc(&ret->refcount); | 3971 | * Check for unlikely race with exiting task. ioc ref count is |
3972 | * zero when ioc is being detached. | ||
3973 | */ | ||
3974 | do { | ||
3975 | ret = current_io_context(gfp_flags, node); | ||
3976 | if (unlikely(!ret)) | ||
3977 | break; | ||
3978 | } while (!atomic_inc_not_zero(&ret->refcount)); | ||
3979 | |||
3966 | return ret; | 3980 | return ret; |
3967 | } | 3981 | } |
3968 | EXPORT_SYMBOL(get_io_context); | 3982 | EXPORT_SYMBOL(get_io_context); |