aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:37 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:37 -0500
commit42ec57a8f68311bbbf4ff96a5d33c8a2e90b9d05 (patch)
tree3aead2658435f53bd637967c5722358039acc390 /block
parenta73f730d013ff2788389fd0c46ad3e5510f124e6 (diff)
block: misc ioc cleanups
* int return from put_io_context() wasn't used by anybody. Make it return void like other put functions and docbook-fy the function comment. * Reorder dummy declarations for !CONFIG_BLOCK case a bit. * Make alloc_ioc_context() use __GFP_ZERO allocation, take init out of if block and drop 0'ing. * Docbook-fy current_io_context() comment. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-ioc.c72
1 files changed, 36 insertions, 36 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 6f9bbd978653..8bebf06bac76 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -27,26 +27,28 @@ static void cfq_dtor(struct io_context *ioc)
27 } 27 }
28} 28}
29 29
30/* 30/**
31 * IO Context helper functions. put_io_context() returns 1 if there are no 31 * put_io_context - put a reference of io_context
32 * more users of this io context, 0 otherwise. 32 * @ioc: io_context to put
33 *
34 * Decrement reference count of @ioc and release it if the count reaches
35 * zero.
33 */ 36 */
34int put_io_context(struct io_context *ioc) 37void put_io_context(struct io_context *ioc)
35{ 38{
36 if (ioc == NULL) 39 if (ioc == NULL)
37 return 1; 40 return;
38 41
39 BUG_ON(atomic_long_read(&ioc->refcount) == 0); 42 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
40 43
41 if (atomic_long_dec_and_test(&ioc->refcount)) { 44 if (!atomic_long_dec_and_test(&ioc->refcount))
42 rcu_read_lock(); 45 return;
43 cfq_dtor(ioc);
44 rcu_read_unlock();
45 46
46 kmem_cache_free(iocontext_cachep, ioc); 47 rcu_read_lock();
47 return 1; 48 cfq_dtor(ioc);
48 } 49 rcu_read_unlock();
49 return 0; 50
51 kmem_cache_free(iocontext_cachep, ioc);
50} 52}
51EXPORT_SYMBOL(put_io_context); 53EXPORT_SYMBOL(put_io_context);
52 54
@@ -84,33 +86,31 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
84{ 86{
85 struct io_context *ioc; 87 struct io_context *ioc;
86 88
87 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); 89 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
88 if (ioc) { 90 node);
89 atomic_long_set(&ioc->refcount, 1); 91 if (unlikely(!ioc))
90 atomic_set(&ioc->nr_tasks, 1); 92 return NULL;
91 spin_lock_init(&ioc->lock); 93
92 ioc->ioprio_changed = 0; 94 /* initialize */
93 ioc->ioprio = 0; 95 atomic_long_set(&ioc->refcount, 1);
94 ioc->last_waited = 0; /* doesn't matter... */ 96 atomic_set(&ioc->nr_tasks, 1);
95 ioc->nr_batch_requests = 0; /* because this is 0 */ 97 spin_lock_init(&ioc->lock);
96 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); 98 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
97 INIT_HLIST_HEAD(&ioc->cic_list); 99 INIT_HLIST_HEAD(&ioc->cic_list);
98 ioc->ioc_data = NULL;
99#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
100 ioc->cgroup_changed = 0;
101#endif
102 }
103 100
104 return ioc; 101 return ioc;
105} 102}
106 103
107/* 104/**
108 * If the current task has no IO context then create one and initialise it. 105 * current_io_context - get io_context of %current
109 * Otherwise, return its existing IO context. 106 * @gfp_flags: allocation flags, used if allocation is necessary
107 * @node: allocation node, used if allocation is necessary
110 * 108 *
111 * This returned IO context doesn't have a specifically elevated refcount, 109 * Return io_context of %current. If it doesn't exist, it is created with
112 * but since the current task itself holds a reference, the context can be 110 * @gfp_flags and @node. The returned io_context does NOT have its
113 * used in general code, so long as it stays within `current` context. 111 * reference count incremented. Because io_context is exited only on task
112 * exit, %current can be sure that the returned io_context is valid and
113 * alive as long as it is executing.
114 */ 114 */
115struct io_context *current_io_context(gfp_t gfp_flags, int node) 115struct io_context *current_io_context(gfp_t gfp_flags, int node)
116{ 116{