aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c99
1 files changed, 66 insertions, 33 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 8bebf06bac76..b13ed96776c2 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -16,6 +16,19 @@
16 */ 16 */
17static struct kmem_cache *iocontext_cachep; 17static struct kmem_cache *iocontext_cachep;
18 18
19/**
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
22 *
23 * Increment reference count to @ioc.
24 */
25void get_io_context(struct io_context *ioc)
26{
27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
19static void cfq_dtor(struct io_context *ioc) 32static void cfq_dtor(struct io_context *ioc)
20{ 33{
21 if (!hlist_empty(&ioc->cic_list)) { 34 if (!hlist_empty(&ioc->cic_list)) {
@@ -71,6 +84,9 @@ void exit_io_context(struct task_struct *task)
71{ 84{
72 struct io_context *ioc; 85 struct io_context *ioc;
73 86
87 /* PF_EXITING prevents new io_context from being attached to @task */
88 WARN_ON_ONCE(!(current->flags & PF_EXITING));
89
74 task_lock(task); 90 task_lock(task);
75 ioc = task->io_context; 91 ioc = task->io_context;
76 task->io_context = NULL; 92 task->io_context = NULL;
@@ -82,7 +98,9 @@ void exit_io_context(struct task_struct *task)
82 put_io_context(ioc); 98 put_io_context(ioc);
83} 99}
84 100
85struct io_context *alloc_io_context(gfp_t gfp_flags, int node) 101static struct io_context *create_task_io_context(struct task_struct *task,
102 gfp_t gfp_flags, int node,
103 bool take_ref)
86{ 104{
87 struct io_context *ioc; 105 struct io_context *ioc;
88 106
@@ -98,6 +116,20 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
98 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); 116 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
99 INIT_HLIST_HEAD(&ioc->cic_list); 117 INIT_HLIST_HEAD(&ioc->cic_list);
100 118
119 /* try to install, somebody might already have beaten us to it */
120 task_lock(task);
121
122 if (!task->io_context && !(task->flags & PF_EXITING)) {
123 task->io_context = ioc;
124 } else {
125 kmem_cache_free(iocontext_cachep, ioc);
126 ioc = task->io_context;
127 }
128
129 if (ioc && take_ref)
130 get_io_context(ioc);
131
132 task_unlock(task);
101 return ioc; 133 return ioc;
102} 134}
103 135
@@ -114,46 +146,47 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
114 */ 146 */
115struct io_context *current_io_context(gfp_t gfp_flags, int node) 147struct io_context *current_io_context(gfp_t gfp_flags, int node)
116{ 148{
117 struct task_struct *tsk = current; 149 might_sleep_if(gfp_flags & __GFP_WAIT);
118 struct io_context *ret;
119
120 ret = tsk->io_context;
121 if (likely(ret))
122 return ret;
123
124 ret = alloc_io_context(gfp_flags, node);
125 if (ret) {
126 /* make sure set_task_ioprio() sees the settings above */
127 smp_wmb();
128 tsk->io_context = ret;
129 }
130 150
131 return ret; 151 if (current->io_context)
152 return current->io_context;
153
154 return create_task_io_context(current, gfp_flags, node, false);
132} 155}
156EXPORT_SYMBOL(current_io_context);
133 157
134/* 158/**
135 * If the current task has no IO context then create one and initialise it. 159 * get_task_io_context - get io_context of a task
136 * If it does have a context, take a ref on it. 160 * @task: task of interest
161 * @gfp_flags: allocation flags, used if allocation is necessary
162 * @node: allocation node, used if allocation is necessary
163 *
164 * Return io_context of @task. If it doesn't exist, it is created with
165 * @gfp_flags and @node. The returned io_context has its reference count
166 * incremented.
137 * 167 *
138 * This is always called in the context of the task which submitted the I/O. 168 * This function always goes through task_lock() and it's better to use
169 * current_io_context() + get_io_context() for %current.
139 */ 170 */
140struct io_context *get_io_context(gfp_t gfp_flags, int node) 171struct io_context *get_task_io_context(struct task_struct *task,
172 gfp_t gfp_flags, int node)
141{ 173{
142 struct io_context *ioc = NULL; 174 struct io_context *ioc;
143
144 /*
145 * Check for unlikely race with exiting task. ioc ref count is
146 * zero when ioc is being detached.
147 */
148 do {
149 ioc = current_io_context(gfp_flags, node);
150 if (unlikely(!ioc))
151 break;
152 } while (!atomic_long_inc_not_zero(&ioc->refcount));
153 175
154 return ioc; 176 might_sleep_if(gfp_flags & __GFP_WAIT);
177
178 task_lock(task);
179 ioc = task->io_context;
180 if (likely(ioc)) {
181 get_io_context(ioc);
182 task_unlock(task);
183 return ioc;
184 }
185 task_unlock(task);
186
187 return create_task_io_context(task, gfp_flags, node, true);
155} 188}
156EXPORT_SYMBOL(get_io_context); 189EXPORT_SYMBOL(get_task_io_context);
157 190
158static int __init blk_ioc_init(void) 191static int __init blk_ioc_init(void)
159{ 192{