aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-01-24 02:53:35 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-01-28 04:50:31 -0500
commitd38ecf935fcb10264a6bc190855d9595165e6eeb (patch)
tree64e3146ef76678ad3ae8f75c32df9f25ea470953
parentfd0928df98b9578be8a786ac0cb78a47a5e17a20 (diff)
io context sharing: preliminary support
Detach task state from ioc, instead keep track of how many processes are accessing the ioc. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/ll_rw_blk.c46
-rw-r--r--fs/ioprio.c1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/iocontext.h22
-rw-r--r--kernel/fork.c1
5 files changed, 49 insertions, 23 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index b9bb02e845cd..d4550ecae443 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3854,12 +3854,13 @@ int __init blk_dev_init(void)
3854} 3854}
3855 3855
3856/* 3856/*
3857 * IO Context helper functions 3857 * IO Context helper functions. put_io_context() returns 1 if there are no
3858 * more users of this io context, 0 otherwise.
3858 */ 3859 */
3859void put_io_context(struct io_context *ioc) 3860int put_io_context(struct io_context *ioc)
3860{ 3861{
3861 if (ioc == NULL) 3862 if (ioc == NULL)
3862 return; 3863 return 1;
3863 3864
3864 BUG_ON(atomic_read(&ioc->refcount) == 0); 3865 BUG_ON(atomic_read(&ioc->refcount) == 0);
3865 3866
@@ -3878,7 +3879,9 @@ void put_io_context(struct io_context *ioc)
3878 rcu_read_unlock(); 3879 rcu_read_unlock();
3879 3880
3880 kmem_cache_free(iocontext_cachep, ioc); 3881 kmem_cache_free(iocontext_cachep, ioc);
3882 return 1;
3881 } 3883 }
3884 return 0;
3882} 3885}
3883EXPORT_SYMBOL(put_io_context); 3886EXPORT_SYMBOL(put_io_context);
3884 3887
@@ -3893,15 +3896,17 @@ void exit_io_context(void)
3893 current->io_context = NULL; 3896 current->io_context = NULL;
3894 task_unlock(current); 3897 task_unlock(current);
3895 3898
3896 ioc->task = NULL; 3899 if (atomic_dec_and_test(&ioc->nr_tasks)) {
3897 if (ioc->aic && ioc->aic->exit) 3900 if (ioc->aic && ioc->aic->exit)
3898 ioc->aic->exit(ioc->aic); 3901 ioc->aic->exit(ioc->aic);
3899 if (ioc->cic_root.rb_node != NULL) { 3902 if (ioc->cic_root.rb_node != NULL) {
3900 cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); 3903 cic = rb_entry(rb_first(&ioc->cic_root),
3901 cic->exit(ioc); 3904 struct cfq_io_context, rb_node);
3902 } 3905 cic->exit(ioc);
3906 }
3903 3907
3904 put_io_context(ioc); 3908 put_io_context(ioc);
3909 }
3905} 3910}
3906 3911
3907struct io_context *alloc_io_context(gfp_t gfp_flags, int node) 3912struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
@@ -3911,7 +3916,8 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
3911 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); 3916 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
3912 if (ret) { 3917 if (ret) {
3913 atomic_set(&ret->refcount, 1); 3918 atomic_set(&ret->refcount, 1);
3914 ret->task = current; 3919 atomic_set(&ret->nr_tasks, 1);
3920 spin_lock_init(&ret->lock);
3915 ret->ioprio_changed = 0; 3921 ret->ioprio_changed = 0;
3916 ret->ioprio = 0; 3922 ret->ioprio = 0;
3917 ret->last_waited = jiffies; /* doesn't matter... */ 3923 ret->last_waited = jiffies; /* doesn't matter... */
@@ -3959,10 +3965,18 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
3959 */ 3965 */
3960struct io_context *get_io_context(gfp_t gfp_flags, int node) 3966struct io_context *get_io_context(gfp_t gfp_flags, int node)
3961{ 3967{
3962 struct io_context *ret; 3968 struct io_context *ret = NULL;
3963 ret = current_io_context(gfp_flags, node); 3969
3964 if (likely(ret)) 3970 /*
3965 atomic_inc(&ret->refcount); 3971 * Check for unlikely race with exiting task. ioc ref count is
3972 * zero when ioc is being detached.
3973 */
3974 do {
3975 ret = current_io_context(gfp_flags, node);
3976 if (unlikely(!ret))
3977 break;
3978 } while (!atomic_inc_not_zero(&ret->refcount));
3979
3966 return ret; 3980 return ret;
3967} 3981}
3968EXPORT_SYMBOL(get_io_context); 3982EXPORT_SYMBOL(get_io_context);
diff --git a/fs/ioprio.c b/fs/ioprio.c
index a7600401ecf7..06b5d97c5fdd 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -54,7 +54,6 @@ static int set_task_ioprio(struct task_struct *task, int ioprio)
54 break; 54 break;
55 } 55 }
56 task->io_context = ioc; 56 task->io_context = ioc;
57 ioc->task = task;
58 } while (1); 57 } while (1);
59 58
60 if (!err) { 59 if (!err) {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 510a18ba1ec5..2483a05231c7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -34,7 +34,7 @@ struct sg_io_hdr;
34#define BLKDEV_MIN_RQ 4 34#define BLKDEV_MIN_RQ 4
35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 35#define BLKDEV_MAX_RQ 128 /* Default maximum */
36 36
37void put_io_context(struct io_context *ioc); 37int put_io_context(struct io_context *ioc);
38void exit_io_context(void); 38void exit_io_context(void);
39struct io_context *get_io_context(gfp_t gfp_flags, int node); 39struct io_context *get_io_context(gfp_t gfp_flags, int node);
40struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 40struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 186807ea62e2..cd44d458124a 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -54,13 +54,15 @@ struct cfq_io_context {
54}; 54};
55 55
56/* 56/*
57 * This is the per-process I/O subsystem state. It is refcounted and 57 * I/O subsystem state of the associated processes. It is refcounted
58 * kmalloc'ed. Currently all fields are modified in process io context 58 * and kmalloc'ed. These could be shared between processes.
59 * (apart from the atomic refcount), so require no locking.
60 */ 59 */
61struct io_context { 60struct io_context {
62 atomic_t refcount; 61 atomic_t refcount;
63 struct task_struct *task; 62 atomic_t nr_tasks;
63
64 /* all the fields below are protected by this lock */
65 spinlock_t lock;
64 66
65 unsigned short ioprio; 67 unsigned short ioprio;
66 unsigned short ioprio_changed; 68 unsigned short ioprio_changed;
@@ -76,4 +78,16 @@ struct io_context {
76 void *ioc_data; 78 void *ioc_data;
77}; 79};
78 80
81static inline struct io_context *ioc_task_link(struct io_context *ioc)
82{
83 /*
84 * if ref count is zero, don't allow sharing (ioc is going away, it's
85 * a race).
86 */
87 if (ioc && atomic_inc_not_zero(&ioc->refcount))
88 return ioc;
89
90 return NULL;
91}
92
79#endif 93#endif
diff --git a/kernel/fork.c b/kernel/fork.c
index 2a86c9dff744..1987c57abb08 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -805,7 +805,6 @@ static int copy_io(struct task_struct *tsk)
805 if (unlikely(!tsk->io_context)) 805 if (unlikely(!tsk->io_context))
806 return -ENOMEM; 806 return -ENOMEM;
807 807
808 tsk->io_context->task = tsk;
809 tsk->io_context->ioprio = ioc->ioprio; 808 tsk->io_context->ioprio = ioc->ioprio;
810 } 809 }
811#endif 810#endif