aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:40 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:40 -0500
commitf2dbd76a0a994bc1d5a3d0e7c844cc373832e86c (patch)
treecae6a4333ee6e5eb76ef133dfdee95e1943c0ab1 /block/blk-ioc.c
parent1238033c79e92e5c315af12e45396f1a78c73dec (diff)
block, cfq: replace current_io_context() with create_io_context()
When called under queue_lock, current_io_context() triggers lockdep warning if it hits allocation path. This is because io_context installation is protected by task_lock which is not IRQ safe, so it triggers irq-unsafe-lock -> irq -> irq-safe-lock -> irq-unsafe-lock deadlock warning. Given the restriction, accessor + creator rolled into one doesn't work too well. Drop current_io_context() and let the users access task->io_context directly inside queue_lock combined with explicit creation using create_io_context(). Future ioc updates will further consolidate ioc access and the create interface will be unexported. While at it, relocate ioc internal interface declarations in blk.h and add section comments before and after. This patch does not introduce functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c62
1 files changed, 17 insertions, 45 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fb23965595da..e23c797b4685 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -205,16 +205,15 @@ void exit_io_context(struct task_struct *task)
205 put_io_context(ioc, NULL); 205 put_io_context(ioc, NULL);
206} 206}
207 207
208static struct io_context *create_task_io_context(struct task_struct *task, 208void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
209 gfp_t gfp_flags, int node, 209 int node)
210 bool take_ref)
211{ 210{
212 struct io_context *ioc; 211 struct io_context *ioc;
213 212
214 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, 213 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
215 node); 214 node);
216 if (unlikely(!ioc)) 215 if (unlikely(!ioc))
217 return NULL; 216 return;
218 217
219 /* initialize */ 218 /* initialize */
220 atomic_long_set(&ioc->refcount, 1); 219 atomic_long_set(&ioc->refcount, 1);
@@ -226,42 +225,13 @@ static struct io_context *create_task_io_context(struct task_struct *task,
226 225
227 /* try to install, somebody might already have beaten us to it */ 226 /* try to install, somebody might already have beaten us to it */
228 task_lock(task); 227 task_lock(task);
229 228 if (!task->io_context && !(task->flags & PF_EXITING))
230 if (!task->io_context && !(task->flags & PF_EXITING)) {
231 task->io_context = ioc; 229 task->io_context = ioc;
232 } else { 230 else
233 kmem_cache_free(iocontext_cachep, ioc); 231 kmem_cache_free(iocontext_cachep, ioc);
234 ioc = task->io_context;
235 }
236
237 if (ioc && take_ref)
238 get_io_context(ioc);
239
240 task_unlock(task); 232 task_unlock(task);
241 return ioc;
242} 233}
243 234EXPORT_SYMBOL(create_io_context_slowpath);
244/**
245 * current_io_context - get io_context of %current
246 * @gfp_flags: allocation flags, used if allocation is necessary
247 * @node: allocation node, used if allocation is necessary
248 *
249 * Return io_context of %current. If it doesn't exist, it is created with
250 * @gfp_flags and @node. The returned io_context does NOT have its
251 * reference count incremented. Because io_context is exited only on task
252 * exit, %current can be sure that the returned io_context is valid and
253 * alive as long as it is executing.
254 */
255struct io_context *current_io_context(gfp_t gfp_flags, int node)
256{
257 might_sleep_if(gfp_flags & __GFP_WAIT);
258
259 if (current->io_context)
260 return current->io_context;
261
262 return create_task_io_context(current, gfp_flags, node, false);
263}
264EXPORT_SYMBOL(current_io_context);
265 235
266/** 236/**
267 * get_task_io_context - get io_context of a task 237 * get_task_io_context - get io_context of a task
@@ -274,7 +244,7 @@ EXPORT_SYMBOL(current_io_context);
274 * incremented. 244 * incremented.
275 * 245 *
276 * This function always goes through task_lock() and it's better to use 246 * This function always goes through task_lock() and it's better to use
277 * current_io_context() + get_io_context() for %current. 247 * %current->io_context + get_io_context() for %current.
278 */ 248 */
279struct io_context *get_task_io_context(struct task_struct *task, 249struct io_context *get_task_io_context(struct task_struct *task,
280 gfp_t gfp_flags, int node) 250 gfp_t gfp_flags, int node)
@@ -283,16 +253,18 @@ struct io_context *get_task_io_context(struct task_struct *task,
283 253
284 might_sleep_if(gfp_flags & __GFP_WAIT); 254 might_sleep_if(gfp_flags & __GFP_WAIT);
285 255
286 task_lock(task); 256 do {
287 ioc = task->io_context; 257 task_lock(task);
288 if (likely(ioc)) { 258 ioc = task->io_context;
289 get_io_context(ioc); 259 if (likely(ioc)) {
260 get_io_context(ioc);
261 task_unlock(task);
262 return ioc;
263 }
290 task_unlock(task); 264 task_unlock(task);
291 return ioc; 265 } while (create_io_context(task, gfp_flags, node));
292 }
293 task_unlock(task);
294 266
295 return create_task_io_context(task, gfp_flags, node, true); 267 return NULL;
296} 268}
297EXPORT_SYMBOL(get_task_io_context); 269EXPORT_SYMBOL(get_task_io_context);
298 270