aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /block/blk-ioc.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c410
1 files changed, 83 insertions, 327 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fab4cdd3f7b..6f9bbd97865 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -16,185 +16,52 @@
16 */ 16 */
17static struct kmem_cache *iocontext_cachep; 17static struct kmem_cache *iocontext_cachep;
18 18
19/** 19static void cfq_dtor(struct io_context *ioc)
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
22 *
23 * Increment reference count to @ioc.
24 */
25void get_io_context(struct io_context *ioc)
26{
27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
32static void icq_free_icq_rcu(struct rcu_head *head)
33{
34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35
36 kmem_cache_free(icq->__rcu_icq_cache, icq);
37}
38
39/* Exit an icq. Called with both ioc and q locked. */
40static void ioc_exit_icq(struct io_cq *icq)
41{
42 struct elevator_type *et = icq->q->elevator->type;
43
44 if (icq->flags & ICQ_EXITED)
45 return;
46
47 if (et->ops.elevator_exit_icq_fn)
48 et->ops.elevator_exit_icq_fn(icq);
49
50 icq->flags |= ICQ_EXITED;
51}
52
53/* Release an icq. Called with both ioc and q locked. */
54static void ioc_destroy_icq(struct io_cq *icq)
55{
56 struct io_context *ioc = icq->ioc;
57 struct request_queue *q = icq->q;
58 struct elevator_type *et = q->elevator->type;
59
60 lockdep_assert_held(&ioc->lock);
61 lockdep_assert_held(q->queue_lock);
62
63 radix_tree_delete(&ioc->icq_tree, icq->q->id);
64 hlist_del_init(&icq->ioc_node);
65 list_del_init(&icq->q_node);
66
67 /*
68 * Both setting lookup hint to and clearing it from @icq are done
69 * under queue_lock. If it's not pointing to @icq now, it never
70 * will. Hint assignment itself can race safely.
71 */
72 if (rcu_dereference_raw(ioc->icq_hint) == icq)
73 rcu_assign_pointer(ioc->icq_hint, NULL);
74
75 ioc_exit_icq(icq);
76
77 /*
78 * @icq->q might have gone away by the time RCU callback runs
79 * making it impossible to determine icq_cache. Record it in @icq.
80 */
81 icq->__rcu_icq_cache = et->icq_cache;
82 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
83}
84
85/*
86 * Slow path for ioc release in put_io_context(). Performs double-lock
87 * dancing to unlink all icq's and then frees ioc.
88 */
89static void ioc_release_fn(struct work_struct *work)
90{ 20{
91 struct io_context *ioc = container_of(work, struct io_context, 21 if (!hlist_empty(&ioc->cic_list)) {
92 release_work); 22 struct cfq_io_context *cic;
93 unsigned long flags;
94 23
95 /* 24 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
96 * Exiting icq may call into put_io_context() through elevator 25 cic_list);
97 * which will trigger lockdep warning. The ioc's are guaranteed to 26 cic->dtor(ioc);
98 * be different, use a different locking subclass here. Use
99 * irqsave variant as there's no spin_lock_irq_nested().
100 */
101 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
102
103 while (!hlist_empty(&ioc->icq_list)) {
104 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
105 struct io_cq, ioc_node);
106 struct request_queue *q = icq->q;
107
108 if (spin_trylock(q->queue_lock)) {
109 ioc_destroy_icq(icq);
110 spin_unlock(q->queue_lock);
111 } else {
112 spin_unlock_irqrestore(&ioc->lock, flags);
113 cpu_relax();
114 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
115 }
116 } 27 }
117
118 spin_unlock_irqrestore(&ioc->lock, flags);
119
120 kmem_cache_free(iocontext_cachep, ioc);
121} 28}
122 29
123/** 30/*
124 * put_io_context - put a reference of io_context 31 * IO Context helper functions. put_io_context() returns 1 if there are no
125 * @ioc: io_context to put 32 * more users of this io context, 0 otherwise.
126 *
127 * Decrement reference count of @ioc and release it if the count reaches
128 * zero.
129 */ 33 */
130void put_io_context(struct io_context *ioc) 34int put_io_context(struct io_context *ioc)
131{ 35{
132 unsigned long flags;
133 bool free_ioc = false;
134
135 if (ioc == NULL) 36 if (ioc == NULL)
136 return; 37 return 1;
137 38
138 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 39 BUG_ON(atomic_long_read(&ioc->refcount) == 0);
139 40
140 /*
141 * Releasing ioc requires reverse order double locking and we may
142 * already be holding a queue_lock. Do it asynchronously from wq.
143 */
144 if (atomic_long_dec_and_test(&ioc->refcount)) { 41 if (atomic_long_dec_and_test(&ioc->refcount)) {
145 spin_lock_irqsave(&ioc->lock, flags); 42 rcu_read_lock();
146 if (!hlist_empty(&ioc->icq_list)) 43 cfq_dtor(ioc);
147 schedule_work(&ioc->release_work); 44 rcu_read_unlock();
148 else
149 free_ioc = true;
150 spin_unlock_irqrestore(&ioc->lock, flags);
151 }
152 45
153 if (free_ioc)
154 kmem_cache_free(iocontext_cachep, ioc); 46 kmem_cache_free(iocontext_cachep, ioc);
47 return 1;
48 }
49 return 0;
155} 50}
156EXPORT_SYMBOL(put_io_context); 51EXPORT_SYMBOL(put_io_context);
157 52
158/** 53static void cfq_exit(struct io_context *ioc)
159 * put_io_context_active - put active reference on ioc
160 * @ioc: ioc of interest
161 *
162 * Undo get_io_context_active(). If active reference reaches zero after
163 * put, @ioc can never issue further IOs and ioscheds are notified.
164 */
165void put_io_context_active(struct io_context *ioc)
166{ 54{
167 struct hlist_node *n; 55 rcu_read_lock();
168 unsigned long flags;
169 struct io_cq *icq;
170 56
171 if (!atomic_dec_and_test(&ioc->active_ref)) { 57 if (!hlist_empty(&ioc->cic_list)) {
172 put_io_context(ioc); 58 struct cfq_io_context *cic;
173 return;
174 }
175 59
176 /* 60 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
177 * Need ioc lock to walk icq_list and q lock to exit icq. Perform 61 cic_list);
178 * reverse double locking. Read comment in ioc_release_fn() for 62 cic->exit(ioc);
179 * explanation on the nested locking annotation.
180 */
181retry:
182 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
184 if (icq->flags & ICQ_EXITED)
185 continue;
186 if (spin_trylock(icq->q->queue_lock)) {
187 ioc_exit_icq(icq);
188 spin_unlock(icq->q->queue_lock);
189 } else {
190 spin_unlock_irqrestore(&ioc->lock, flags);
191 cpu_relax();
192 goto retry;
193 }
194 } 63 }
195 spin_unlock_irqrestore(&ioc->lock, flags); 64 rcu_read_unlock();
196
197 put_io_context(ioc);
198} 65}
199 66
200/* Called by the exiting task */ 67/* Called by the exiting task */
@@ -207,197 +74,86 @@ void exit_io_context(struct task_struct *task)
207 task->io_context = NULL; 74 task->io_context = NULL;
208 task_unlock(task); 75 task_unlock(task);
209 76
210 atomic_dec(&ioc->nr_tasks); 77 if (atomic_dec_and_test(&ioc->nr_tasks))
211 put_io_context_active(ioc); 78 cfq_exit(ioc);
212}
213 79
214/** 80 put_io_context(ioc);
215 * ioc_clear_queue - break any ioc association with the specified queue
216 * @q: request_queue being cleared
217 *
218 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
219 */
220void ioc_clear_queue(struct request_queue *q)
221{
222 lockdep_assert_held(q->queue_lock);
223
224 while (!list_empty(&q->icq_list)) {
225 struct io_cq *icq = list_entry(q->icq_list.next,
226 struct io_cq, q_node);
227 struct io_context *ioc = icq->ioc;
228
229 spin_lock(&ioc->lock);
230 ioc_destroy_icq(icq);
231 spin_unlock(&ioc->lock);
232 }
233} 81}
234 82
235int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) 83struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
236{ 84{
237 struct io_context *ioc; 85 struct io_context *ioc;
238 int ret;
239
240 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
241 node);
242 if (unlikely(!ioc))
243 return -ENOMEM;
244
245 /* initialize */
246 atomic_long_set(&ioc->refcount, 1);
247 atomic_set(&ioc->nr_tasks, 1);
248 atomic_set(&ioc->active_ref, 1);
249 spin_lock_init(&ioc->lock);
250 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
251 INIT_HLIST_HEAD(&ioc->icq_list);
252 INIT_WORK(&ioc->release_work, ioc_release_fn);
253 86
254 /* 87 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
255 * Try to install. ioc shouldn't be installed if someone else 88 if (ioc) {
256 * already did or @task, which isn't %current, is exiting. Note 89 atomic_long_set(&ioc->refcount, 1);
257 * that we need to allow ioc creation on exiting %current as exit 90 atomic_set(&ioc->nr_tasks, 1);
258 * path may issue IOs from e.g. exit_files(). The exit path is 91 spin_lock_init(&ioc->lock);
259 * responsible for not issuing IO after exit_io_context(). 92 ioc->ioprio_changed = 0;
260 */ 93 ioc->ioprio = 0;
261 task_lock(task); 94 ioc->last_waited = 0; /* doesn't matter... */
262 if (!task->io_context && 95 ioc->nr_batch_requests = 0; /* because this is 0 */
263 (task == current || !(task->flags & PF_EXITING))) 96 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
264 task->io_context = ioc; 97 INIT_HLIST_HEAD(&ioc->cic_list);
265 else 98 ioc->ioc_data = NULL;
266 kmem_cache_free(iocontext_cachep, ioc); 99#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
267 100 ioc->cgroup_changed = 0;
268 ret = task->io_context ? 0 : -EBUSY; 101#endif
269 102 }
270 task_unlock(task);
271 103
272 return ret; 104 return ioc;
273} 105}
274 106
275/** 107/*
276 * get_task_io_context - get io_context of a task 108 * If the current task has no IO context then create one and initialise it.
277 * @task: task of interest 109 * Otherwise, return its existing IO context.
278 * @gfp_flags: allocation flags, used if allocation is necessary
279 * @node: allocation node, used if allocation is necessary
280 *
281 * Return io_context of @task. If it doesn't exist, it is created with
282 * @gfp_flags and @node. The returned io_context has its reference count
283 * incremented.
284 * 110 *
285 * This function always goes through task_lock() and it's better to use 111 * This returned IO context doesn't have a specifically elevated refcount,
286 * %current->io_context + get_io_context() for %current. 112 * but since the current task itself holds a reference, the context can be
113 * used in general code, so long as it stays within `current` context.
287 */ 114 */
288struct io_context *get_task_io_context(struct task_struct *task, 115struct io_context *current_io_context(gfp_t gfp_flags, int node)
289 gfp_t gfp_flags, int node)
290{ 116{
291 struct io_context *ioc; 117 struct task_struct *tsk = current;
292 118 struct io_context *ret;
293 might_sleep_if(gfp_flags & __GFP_WAIT); 119
294 120 ret = tsk->io_context;
295 do { 121 if (likely(ret))
296 task_lock(task); 122 return ret;
297 ioc = task->io_context; 123
298 if (likely(ioc)) { 124 ret = alloc_io_context(gfp_flags, node);
299 get_io_context(ioc); 125 if (ret) {
300 task_unlock(task); 126 /* make sure set_task_ioprio() sees the settings above */
301 return ioc; 127 smp_wmb();
302 } 128 tsk->io_context = ret;
303 task_unlock(task); 129 }
304 } while (!create_task_io_context(task, gfp_flags, node));
305 130
306 return NULL; 131 return ret;
307} 132}
308EXPORT_SYMBOL(get_task_io_context);
309 133
310/** 134/*
311 * ioc_lookup_icq - lookup io_cq from ioc 135 * If the current task has no IO context then create one and initialise it.
312 * @ioc: the associated io_context 136 * If it does have a context, take a ref on it.
313 * @q: the associated request_queue
314 * 137 *
315 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called 138 * This is always called in the context of the task which submitted the I/O.
316 * with @q->queue_lock held.
317 */ 139 */
318struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) 140struct io_context *get_io_context(gfp_t gfp_flags, int node)
319{ 141{
320 struct io_cq *icq; 142 struct io_context *ioc = NULL;
321
322 lockdep_assert_held(q->queue_lock);
323 143
324 /* 144 /*
325 * icq's are indexed from @ioc using radix tree and hint pointer, 145 * Check for unlikely race with exiting task. ioc ref count is
326 * both of which are protected with RCU. All removals are done 146 * zero when ioc is being detached.
327 * holding both q and ioc locks, and we're holding q lock - if we
328 * find a icq which points to us, it's guaranteed to be valid.
329 */ 147 */
330 rcu_read_lock(); 148 do {
331 icq = rcu_dereference(ioc->icq_hint); 149 ioc = current_io_context(gfp_flags, node);
332 if (icq && icq->q == q) 150 if (unlikely(!ioc))
333 goto out; 151 break;
334 152 } while (!atomic_long_inc_not_zero(&ioc->refcount));
335 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
336 if (icq && icq->q == q)
337 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
338 else
339 icq = NULL;
340out:
341 rcu_read_unlock();
342 return icq;
343}
344EXPORT_SYMBOL(ioc_lookup_icq);
345
346/**
347 * ioc_create_icq - create and link io_cq
348 * @ioc: io_context of interest
349 * @q: request_queue of interest
350 * @gfp_mask: allocation mask
351 *
352 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
353 * will be created using @gfp_mask.
354 *
355 * The caller is responsible for ensuring @ioc won't go away and @q is
356 * alive and will stay alive until this function returns.
357 */
358struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
359 gfp_t gfp_mask)
360{
361 struct elevator_type *et = q->elevator->type;
362 struct io_cq *icq;
363
364 /* allocate stuff */
365 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
366 q->node);
367 if (!icq)
368 return NULL;
369
370 if (radix_tree_preload(gfp_mask) < 0) {
371 kmem_cache_free(et->icq_cache, icq);
372 return NULL;
373 }
374
375 icq->ioc = ioc;
376 icq->q = q;
377 INIT_LIST_HEAD(&icq->q_node);
378 INIT_HLIST_NODE(&icq->ioc_node);
379
380 /* lock both q and ioc and try to link @icq */
381 spin_lock_irq(q->queue_lock);
382 spin_lock(&ioc->lock);
383
384 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
385 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
386 list_add(&icq->q_node, &q->icq_list);
387 if (et->ops.elevator_init_icq_fn)
388 et->ops.elevator_init_icq_fn(icq);
389 } else {
390 kmem_cache_free(et->icq_cache, icq);
391 icq = ioc_lookup_icq(ioc, q);
392 if (!icq)
393 printk(KERN_ERR "cfq: icq link failed!\n");
394 }
395 153
396 spin_unlock(&ioc->lock); 154 return ioc;
397 spin_unlock_irq(q->queue_lock);
398 radix_tree_preload_end();
399 return icq;
400} 155}
156EXPORT_SYMBOL(get_io_context);
401 157
402static int __init blk_ioc_init(void) 158static int __init blk_ioc_init(void)
403{ 159{