aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-ioc.c99
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c18
4 files changed, 81 insertions, 46 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8f630cec906e..4b001dcd85b0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1645,11 +1645,12 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1645{ 1645{
1646 struct io_context *ioc; 1646 struct io_context *ioc;
1647 1647
1648 task_lock(tsk); 1648 /* we don't lose anything even if ioc allocation fails */
1649 ioc = tsk->io_context; 1649 ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
1650 if (ioc) 1650 if (ioc) {
1651 ioc->cgroup_changed = 1; 1651 ioc->cgroup_changed = 1;
1652 task_unlock(tsk); 1652 put_io_context(ioc);
1653 }
1653} 1654}
1654 1655
1655void blkio_policy_register(struct blkio_policy_type *blkiop) 1656void blkio_policy_register(struct blkio_policy_type *blkiop)
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 8bebf06bac76..b13ed96776c2 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -16,6 +16,19 @@
16 */ 16 */
17static struct kmem_cache *iocontext_cachep; 17static struct kmem_cache *iocontext_cachep;
18 18
19/**
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
22 *
23 * Increment reference count to @ioc.
24 */
25void get_io_context(struct io_context *ioc)
26{
27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
19static void cfq_dtor(struct io_context *ioc) 32static void cfq_dtor(struct io_context *ioc)
20{ 33{
21 if (!hlist_empty(&ioc->cic_list)) { 34 if (!hlist_empty(&ioc->cic_list)) {
@@ -71,6 +84,9 @@ void exit_io_context(struct task_struct *task)
71{ 84{
72 struct io_context *ioc; 85 struct io_context *ioc;
73 86
87 /* PF_EXITING prevents new io_context from being attached to @task */
88 WARN_ON_ONCE(!(current->flags & PF_EXITING));
89
74 task_lock(task); 90 task_lock(task);
75 ioc = task->io_context; 91 ioc = task->io_context;
76 task->io_context = NULL; 92 task->io_context = NULL;
@@ -82,7 +98,9 @@ void exit_io_context(struct task_struct *task)
82 put_io_context(ioc); 98 put_io_context(ioc);
83} 99}
84 100
85struct io_context *alloc_io_context(gfp_t gfp_flags, int node) 101static struct io_context *create_task_io_context(struct task_struct *task,
102 gfp_t gfp_flags, int node,
103 bool take_ref)
86{ 104{
87 struct io_context *ioc; 105 struct io_context *ioc;
88 106
@@ -98,6 +116,20 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
98 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); 116 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
99 INIT_HLIST_HEAD(&ioc->cic_list); 117 INIT_HLIST_HEAD(&ioc->cic_list);
100 118
119 /* try to install, somebody might already have beaten us to it */
120 task_lock(task);
121
122 if (!task->io_context && !(task->flags & PF_EXITING)) {
123 task->io_context = ioc;
124 } else {
125 kmem_cache_free(iocontext_cachep, ioc);
126 ioc = task->io_context;
127 }
128
129 if (ioc && take_ref)
130 get_io_context(ioc);
131
132 task_unlock(task);
101 return ioc; 133 return ioc;
102} 134}
103 135
@@ -114,46 +146,47 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
114 */ 146 */
115struct io_context *current_io_context(gfp_t gfp_flags, int node) 147struct io_context *current_io_context(gfp_t gfp_flags, int node)
116{ 148{
117 struct task_struct *tsk = current; 149 might_sleep_if(gfp_flags & __GFP_WAIT);
118 struct io_context *ret;
119
120 ret = tsk->io_context;
121 if (likely(ret))
122 return ret;
123
124 ret = alloc_io_context(gfp_flags, node);
125 if (ret) {
126 /* make sure set_task_ioprio() sees the settings above */
127 smp_wmb();
128 tsk->io_context = ret;
129 }
130 150
131 return ret; 151 if (current->io_context)
152 return current->io_context;
153
154 return create_task_io_context(current, gfp_flags, node, false);
132} 155}
156EXPORT_SYMBOL(current_io_context);
133 157
134/* 158/**
135 * If the current task has no IO context then create one and initialise it. 159 * get_task_io_context - get io_context of a task
136 * If it does have a context, take a ref on it. 160 * @task: task of interest
161 * @gfp_flags: allocation flags, used if allocation is necessary
162 * @node: allocation node, used if allocation is necessary
163 *
164 * Return io_context of @task. If it doesn't exist, it is created with
165 * @gfp_flags and @node. The returned io_context has its reference count
166 * incremented.
137 * 167 *
138 * This is always called in the context of the task which submitted the I/O. 168 * This function always goes through task_lock() and it's better to use
169 * current_io_context() + get_io_context() for %current.
139 */ 170 */
140struct io_context *get_io_context(gfp_t gfp_flags, int node) 171struct io_context *get_task_io_context(struct task_struct *task,
172 gfp_t gfp_flags, int node)
141{ 173{
142 struct io_context *ioc = NULL; 174 struct io_context *ioc;
143
144 /*
145 * Check for unlikely race with exiting task. ioc ref count is
146 * zero when ioc is being detached.
147 */
148 do {
149 ioc = current_io_context(gfp_flags, node);
150 if (unlikely(!ioc))
151 break;
152 } while (!atomic_long_inc_not_zero(&ioc->refcount));
153 175
154 return ioc; 176 might_sleep_if(gfp_flags & __GFP_WAIT);
177
178 task_lock(task);
179 ioc = task->io_context;
180 if (likely(ioc)) {
181 get_io_context(ioc);
182 task_unlock(task);
183 return ioc;
184 }
185 task_unlock(task);
186
187 return create_task_io_context(task, gfp_flags, node, true);
155} 188}
156EXPORT_SYMBOL(get_io_context); 189EXPORT_SYMBOL(get_task_io_context);
157 190
158static int __init blk_ioc_init(void) 191static int __init blk_ioc_init(void)
159{ 192{
diff --git a/block/blk.h b/block/blk.h
index aae4d88fc523..fc3c41b2fd24 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -122,6 +122,7 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
122} 122}
123#endif 123#endif
124 124
125void get_io_context(struct io_context *ioc);
125struct io_context *current_io_context(gfp_t gfp_flags, int node); 126struct io_context *current_io_context(gfp_t gfp_flags, int node);
126 127
127int ll_back_merge_fn(struct request_queue *q, struct request *req, 128int ll_back_merge_fn(struct request_queue *q, struct request *req,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ec3f5e8ba564..d42d89ccce1b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -14,6 +14,7 @@
14#include <linux/rbtree.h> 14#include <linux/rbtree.h>
15#include <linux/ioprio.h> 15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h> 16#include <linux/blktrace_api.h>
17#include "blk.h"
17#include "cfq.h" 18#include "cfq.h"
18 19
19/* 20/*
@@ -3194,13 +3195,13 @@ static struct cfq_io_context *
3194cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 3195cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3195{ 3196{
3196 struct io_context *ioc = NULL; 3197 struct io_context *ioc = NULL;
3197 struct cfq_io_context *cic; 3198 struct cfq_io_context *cic = NULL;
3198 3199
3199 might_sleep_if(gfp_mask & __GFP_WAIT); 3200 might_sleep_if(gfp_mask & __GFP_WAIT);
3200 3201
3201 ioc = get_io_context(gfp_mask, cfqd->queue->node); 3202 ioc = current_io_context(gfp_mask, cfqd->queue->node);
3202 if (!ioc) 3203 if (!ioc)
3203 return NULL; 3204 goto err;
3204 3205
3205 cic = cfq_cic_lookup(cfqd, ioc); 3206 cic = cfq_cic_lookup(cfqd, ioc);
3206 if (cic) 3207 if (cic)
@@ -3211,10 +3212,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3211 goto err; 3212 goto err;
3212 3213
3213 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) 3214 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3214 goto err_free; 3215 goto err;
3215
3216out: 3216out:
3217 smp_read_barrier_depends(); 3217 get_io_context(ioc);
3218
3218 if (unlikely(ioc->ioprio_changed)) 3219 if (unlikely(ioc->ioprio_changed))
3219 cfq_ioc_set_ioprio(ioc); 3220 cfq_ioc_set_ioprio(ioc);
3220 3221
@@ -3223,10 +3224,9 @@ out:
3223 cfq_ioc_set_cgroup(ioc); 3224 cfq_ioc_set_cgroup(ioc);
3224#endif 3225#endif
3225 return cic; 3226 return cic;
3226err_free:
3227 cfq_cic_free(cic);
3228err: 3227err:
3229 put_io_context(ioc); 3228 if (cic)
3229 cfq_cic_free(cic);
3230 return NULL; 3230 return NULL;
3231} 3231}
3232 3232