diff options
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 60 |
1 files changed, 59 insertions, 1 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 0910a5584d38..c04d16b02225 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -289,7 +289,6 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, | |||
289 | kmem_cache_free(iocontext_cachep, ioc); | 289 | kmem_cache_free(iocontext_cachep, ioc); |
290 | task_unlock(task); | 290 | task_unlock(task); |
291 | } | 291 | } |
292 | EXPORT_SYMBOL(create_io_context_slowpath); | ||
293 | 292 | ||
294 | /** | 293 | /** |
295 | * get_task_io_context - get io_context of a task | 294 | * get_task_io_context - get io_context of a task |
@@ -362,6 +361,65 @@ out: | |||
362 | } | 361 | } |
363 | EXPORT_SYMBOL(ioc_lookup_icq); | 362 | EXPORT_SYMBOL(ioc_lookup_icq); |
364 | 363 | ||
364 | /** | ||
365 | * ioc_create_icq - create and link io_cq | ||
366 | * @q: request_queue of interest | ||
367 | * @gfp_mask: allocation mask | ||
368 | * | ||
369 | * Make sure io_cq linking %current->io_context and @q exists. If either | ||
370 | * io_context and/or icq don't exist, they will be created using @gfp_mask. | ||
371 | * | ||
372 | * The caller is responsible for ensuring @ioc won't go away and @q is | ||
373 | * alive and will stay alive until this function returns. | ||
374 | */ | ||
375 | struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) | ||
376 | { | ||
377 | struct elevator_type *et = q->elevator->type; | ||
378 | struct io_context *ioc; | ||
379 | struct io_cq *icq; | ||
380 | |||
381 | /* allocate stuff */ | ||
382 | ioc = create_io_context(current, gfp_mask, q->node); | ||
383 | if (!ioc) | ||
384 | return NULL; | ||
385 | |||
386 | icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, | ||
387 | q->node); | ||
388 | if (!icq) | ||
389 | return NULL; | ||
390 | |||
391 | if (radix_tree_preload(gfp_mask) < 0) { | ||
392 | kmem_cache_free(et->icq_cache, icq); | ||
393 | return NULL; | ||
394 | } | ||
395 | |||
396 | icq->ioc = ioc; | ||
397 | icq->q = q; | ||
398 | INIT_LIST_HEAD(&icq->q_node); | ||
399 | INIT_HLIST_NODE(&icq->ioc_node); | ||
400 | |||
401 | /* lock both q and ioc and try to link @icq */ | ||
402 | spin_lock_irq(q->queue_lock); | ||
403 | spin_lock(&ioc->lock); | ||
404 | |||
405 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { | ||
406 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); | ||
407 | list_add(&icq->q_node, &q->icq_list); | ||
408 | if (et->ops.elevator_init_icq_fn) | ||
409 | et->ops.elevator_init_icq_fn(icq); | ||
410 | } else { | ||
411 | kmem_cache_free(et->icq_cache, icq); | ||
412 | icq = ioc_lookup_icq(ioc, q); | ||
413 | if (!icq) | ||
414 | printk(KERN_ERR "cfq: icq link failed!\n"); | ||
415 | } | ||
416 | |||
417 | spin_unlock(&ioc->lock); | ||
418 | spin_unlock_irq(q->queue_lock); | ||
419 | radix_tree_preload_end(); | ||
420 | return icq; | ||
421 | } | ||
422 | |||
365 | void ioc_set_changed(struct io_context *ioc, int which) | 423 | void ioc_set_changed(struct io_context *ioc, int which) |
366 | { | 424 | { |
367 | struct io_cq *icq; | 425 | struct io_cq *icq; |