diff options
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 485 |
1 files changed, 400 insertions, 85 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 6f9bbd978653..27a06e00eaec 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -16,53 +16,214 @@ | |||
16 | */ | 16 | */ |
17 | static struct kmem_cache *iocontext_cachep; | 17 | static struct kmem_cache *iocontext_cachep; |
18 | 18 | ||
19 | static void cfq_dtor(struct io_context *ioc) | 19 | /** |
20 | * get_io_context - increment reference count to io_context | ||
21 | * @ioc: io_context to get | ||
22 | * | ||
23 | * Increment reference count to @ioc. | ||
24 | */ | ||
25 | void get_io_context(struct io_context *ioc) | ||
26 | { | ||
27 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | ||
28 | atomic_long_inc(&ioc->refcount); | ||
29 | } | ||
30 | EXPORT_SYMBOL(get_io_context); | ||
31 | |||
32 | /* | ||
33 | * Releasing ioc may nest into another put_io_context() leading to nested | ||
34 | * fast path release. As the ioc's can't be the same, this is okay but | ||
35 | * makes lockdep whine. Keep track of nesting and use it as subclass. | ||
36 | */ | ||
37 | #ifdef CONFIG_LOCKDEP | ||
38 | #define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0) | ||
39 | #define ioc_release_depth_inc(q) (q)->ioc_release_depth++ | ||
40 | #define ioc_release_depth_dec(q) (q)->ioc_release_depth-- | ||
41 | #else | ||
42 | #define ioc_release_depth(q) 0 | ||
43 | #define ioc_release_depth_inc(q) do { } while (0) | ||
44 | #define ioc_release_depth_dec(q) do { } while (0) | ||
45 | #endif | ||
46 | |||
47 | static void icq_free_icq_rcu(struct rcu_head *head) | ||
48 | { | ||
49 | struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); | ||
50 | |||
51 | kmem_cache_free(icq->__rcu_icq_cache, icq); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Exit and free an icq. Called with both ioc and q locked. | ||
56 | */ | ||
57 | static void ioc_exit_icq(struct io_cq *icq) | ||
20 | { | 58 | { |
21 | if (!hlist_empty(&ioc->cic_list)) { | 59 | struct io_context *ioc = icq->ioc; |
22 | struct cfq_io_context *cic; | 60 | struct request_queue *q = icq->q; |
61 | struct elevator_type *et = q->elevator->type; | ||
62 | |||
63 | lockdep_assert_held(&ioc->lock); | ||
64 | lockdep_assert_held(q->queue_lock); | ||
65 | |||
66 | radix_tree_delete(&ioc->icq_tree, icq->q->id); | ||
67 | hlist_del_init(&icq->ioc_node); | ||
68 | list_del_init(&icq->q_node); | ||
69 | |||
70 | /* | ||
71 | * Both setting lookup hint to and clearing it from @icq are done | ||
72 | * under queue_lock. If it's not pointing to @icq now, it never | ||
73 | * will. Hint assignment itself can race safely. | ||
74 | */ | ||
75 | if (rcu_dereference_raw(ioc->icq_hint) == icq) | ||
76 | rcu_assign_pointer(ioc->icq_hint, NULL); | ||
23 | 77 | ||
24 | cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, | 78 | if (et->ops.elevator_exit_icq_fn) { |
25 | cic_list); | 79 | ioc_release_depth_inc(q); |
26 | cic->dtor(ioc); | 80 | et->ops.elevator_exit_icq_fn(icq); |
81 | ioc_release_depth_dec(q); | ||
27 | } | 82 | } |
83 | |||
84 | /* | ||
85 | * @icq->q might have gone away by the time RCU callback runs | ||
86 | * making it impossible to determine icq_cache. Record it in @icq. | ||
87 | */ | ||
88 | icq->__rcu_icq_cache = et->icq_cache; | ||
89 | call_rcu(&icq->__rcu_head, icq_free_icq_rcu); | ||
28 | } | 90 | } |
29 | 91 | ||
30 | /* | 92 | /* |
31 | * IO Context helper functions. put_io_context() returns 1 if there are no | 93 | * Slow path for ioc release in put_io_context(). Performs double-lock |
32 | * more users of this io context, 0 otherwise. | 94 | * dancing to unlink all icq's and then frees ioc. |
33 | */ | 95 | */ |
34 | int put_io_context(struct io_context *ioc) | 96 | static void ioc_release_fn(struct work_struct *work) |
35 | { | 97 | { |
36 | if (ioc == NULL) | 98 | struct io_context *ioc = container_of(work, struct io_context, |
37 | return 1; | 99 | release_work); |
100 | struct request_queue *last_q = NULL; | ||
38 | 101 | ||
39 | BUG_ON(atomic_long_read(&ioc->refcount) == 0); | 102 | spin_lock_irq(&ioc->lock); |
40 | 103 | ||
41 | if (atomic_long_dec_and_test(&ioc->refcount)) { | 104 | while (!hlist_empty(&ioc->icq_list)) { |
42 | rcu_read_lock(); | 105 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
43 | cfq_dtor(ioc); | 106 | struct io_cq, ioc_node); |
44 | rcu_read_unlock(); | 107 | struct request_queue *this_q = icq->q; |
45 | 108 | ||
46 | kmem_cache_free(iocontext_cachep, ioc); | 109 | if (this_q != last_q) { |
47 | return 1; | 110 | /* |
111 | * Need to switch to @this_q. Once we release | ||
112 | * @ioc->lock, it can go away along with @cic. | ||
113 | * Hold on to it. | ||
114 | */ | ||
115 | __blk_get_queue(this_q); | ||
116 | |||
117 | /* | ||
118 | * blk_put_queue() might sleep thanks to kobject | ||
119 | * idiocy. Always release both locks, put and | ||
120 | * restart. | ||
121 | */ | ||
122 | if (last_q) { | ||
123 | spin_unlock(last_q->queue_lock); | ||
124 | spin_unlock_irq(&ioc->lock); | ||
125 | blk_put_queue(last_q); | ||
126 | } else { | ||
127 | spin_unlock_irq(&ioc->lock); | ||
128 | } | ||
129 | |||
130 | last_q = this_q; | ||
131 | spin_lock_irq(this_q->queue_lock); | ||
132 | spin_lock(&ioc->lock); | ||
133 | continue; | ||
134 | } | ||
135 | ioc_exit_icq(icq); | ||
48 | } | 136 | } |
49 | return 0; | 137 | |
138 | if (last_q) { | ||
139 | spin_unlock(last_q->queue_lock); | ||
140 | spin_unlock_irq(&ioc->lock); | ||
141 | blk_put_queue(last_q); | ||
142 | } else { | ||
143 | spin_unlock_irq(&ioc->lock); | ||
144 | } | ||
145 | |||
146 | kmem_cache_free(iocontext_cachep, ioc); | ||
50 | } | 147 | } |
51 | EXPORT_SYMBOL(put_io_context); | ||
52 | 148 | ||
53 | static void cfq_exit(struct io_context *ioc) | 149 | /** |
150 | * put_io_context - put a reference of io_context | ||
151 | * @ioc: io_context to put | ||
152 | * @locked_q: request_queue the caller is holding queue_lock of (hint) | ||
153 | * | ||
154 | * Decrement reference count of @ioc and release it if the count reaches | ||
155 | * zero. If the caller is holding queue_lock of a queue, it can indicate | ||
156 | * that with @locked_q. This is an optimization hint and the caller is | ||
157 | * allowed to pass in %NULL even when it's holding a queue_lock. | ||
158 | */ | ||
159 | void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | ||
54 | { | 160 | { |
55 | rcu_read_lock(); | 161 | struct request_queue *last_q = locked_q; |
162 | unsigned long flags; | ||
163 | |||
164 | if (ioc == NULL) | ||
165 | return; | ||
166 | |||
167 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | ||
168 | if (locked_q) | ||
169 | lockdep_assert_held(locked_q->queue_lock); | ||
56 | 170 | ||
57 | if (!hlist_empty(&ioc->cic_list)) { | 171 | if (!atomic_long_dec_and_test(&ioc->refcount)) |
58 | struct cfq_io_context *cic; | 172 | return; |
173 | |||
174 | /* | ||
175 | * Destroy @ioc. This is a bit messy because icq's are chained | ||
176 | * from both ioc and queue, and ioc->lock nests inside queue_lock. | ||
177 | * The inner ioc->lock should be held to walk our icq_list and then | ||
178 | * for each icq the outer matching queue_lock should be grabbed. | ||
179 | * ie. We need to do reverse-order double lock dancing. | ||
180 | * | ||
181 | * Another twist is that we are often called with one of the | ||
182 | * matching queue_locks held as indicated by @locked_q, which | ||
183 | * prevents performing double-lock dance for other queues. | ||
184 | * | ||
185 | * So, we do it in two stages. The fast path uses the queue_lock | ||
186 | * the caller is holding and, if other queues need to be accessed, | ||
187 | * uses trylock to avoid introducing locking dependency. This can | ||
188 | * handle most cases, especially if @ioc was performing IO on only | ||
189 | * single device. | ||
190 | * | ||
191 | * If trylock doesn't cut it, we defer to @ioc->release_work which | ||
192 | * can do all the double-locking dancing. | ||
193 | */ | ||
194 | spin_lock_irqsave_nested(&ioc->lock, flags, | ||
195 | ioc_release_depth(locked_q)); | ||
59 | 196 | ||
60 | cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, | 197 | while (!hlist_empty(&ioc->icq_list)) { |
61 | cic_list); | 198 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
62 | cic->exit(ioc); | 199 | struct io_cq, ioc_node); |
200 | struct request_queue *this_q = icq->q; | ||
201 | |||
202 | if (this_q != last_q) { | ||
203 | if (last_q && last_q != locked_q) | ||
204 | spin_unlock(last_q->queue_lock); | ||
205 | last_q = NULL; | ||
206 | |||
207 | if (!spin_trylock(this_q->queue_lock)) | ||
208 | break; | ||
209 | last_q = this_q; | ||
210 | continue; | ||
211 | } | ||
212 | ioc_exit_icq(icq); | ||
63 | } | 213 | } |
64 | rcu_read_unlock(); | 214 | |
215 | if (last_q && last_q != locked_q) | ||
216 | spin_unlock(last_q->queue_lock); | ||
217 | |||
218 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
219 | |||
220 | /* if no icq is left, we're done; otherwise, kick release_work */ | ||
221 | if (hlist_empty(&ioc->icq_list)) | ||
222 | kmem_cache_free(iocontext_cachep, ioc); | ||
223 | else | ||
224 | schedule_work(&ioc->release_work); | ||
65 | } | 225 | } |
226 | EXPORT_SYMBOL(put_io_context); | ||
66 | 227 | ||
67 | /* Called by the exiting task */ | 228 | /* Called by the exiting task */ |
68 | void exit_io_context(struct task_struct *task) | 229 | void exit_io_context(struct task_struct *task) |
@@ -74,86 +235,240 @@ void exit_io_context(struct task_struct *task) | |||
74 | task->io_context = NULL; | 235 | task->io_context = NULL; |
75 | task_unlock(task); | 236 | task_unlock(task); |
76 | 237 | ||
77 | if (atomic_dec_and_test(&ioc->nr_tasks)) | 238 | atomic_dec(&ioc->nr_tasks); |
78 | cfq_exit(ioc); | 239 | put_io_context(ioc, NULL); |
240 | } | ||
241 | |||
242 | /** | ||
243 | * ioc_clear_queue - break any ioc association with the specified queue | ||
244 | * @q: request_queue being cleared | ||
245 | * | ||
246 | * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. | ||
247 | */ | ||
248 | void ioc_clear_queue(struct request_queue *q) | ||
249 | { | ||
250 | lockdep_assert_held(q->queue_lock); | ||
251 | |||
252 | while (!list_empty(&q->icq_list)) { | ||
253 | struct io_cq *icq = list_entry(q->icq_list.next, | ||
254 | struct io_cq, q_node); | ||
255 | struct io_context *ioc = icq->ioc; | ||
79 | 256 | ||
80 | put_io_context(ioc); | 257 | spin_lock(&ioc->lock); |
258 | ioc_exit_icq(icq); | ||
259 | spin_unlock(&ioc->lock); | ||
260 | } | ||
81 | } | 261 | } |
82 | 262 | ||
83 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | 263 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, |
264 | int node) | ||
84 | { | 265 | { |
85 | struct io_context *ioc; | 266 | struct io_context *ioc; |
86 | 267 | ||
87 | ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | 268 | ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, |
88 | if (ioc) { | 269 | node); |
89 | atomic_long_set(&ioc->refcount, 1); | 270 | if (unlikely(!ioc)) |
90 | atomic_set(&ioc->nr_tasks, 1); | 271 | return; |
91 | spin_lock_init(&ioc->lock); | ||
92 | ioc->ioprio_changed = 0; | ||
93 | ioc->ioprio = 0; | ||
94 | ioc->last_waited = 0; /* doesn't matter... */ | ||
95 | ioc->nr_batch_requests = 0; /* because this is 0 */ | ||
96 | INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); | ||
97 | INIT_HLIST_HEAD(&ioc->cic_list); | ||
98 | ioc->ioc_data = NULL; | ||
99 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) | ||
100 | ioc->cgroup_changed = 0; | ||
101 | #endif | ||
102 | } | ||
103 | 272 | ||
104 | return ioc; | 273 | /* initialize */ |
274 | atomic_long_set(&ioc->refcount, 1); | ||
275 | atomic_set(&ioc->nr_tasks, 1); | ||
276 | spin_lock_init(&ioc->lock); | ||
277 | INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); | ||
278 | INIT_HLIST_HEAD(&ioc->icq_list); | ||
279 | INIT_WORK(&ioc->release_work, ioc_release_fn); | ||
280 | |||
281 | /* | ||
282 | * Try to install. ioc shouldn't be installed if someone else | ||
283 | * already did or @task, which isn't %current, is exiting. Note | ||
284 | * that we need to allow ioc creation on exiting %current as exit | ||
285 | * path may issue IOs from e.g. exit_files(). The exit path is | ||
286 | * responsible for not issuing IO after exit_io_context(). | ||
287 | */ | ||
288 | task_lock(task); | ||
289 | if (!task->io_context && | ||
290 | (task == current || !(task->flags & PF_EXITING))) | ||
291 | task->io_context = ioc; | ||
292 | else | ||
293 | kmem_cache_free(iocontext_cachep, ioc); | ||
294 | task_unlock(task); | ||
105 | } | 295 | } |
106 | 296 | ||
107 | /* | 297 | /** |
108 | * If the current task has no IO context then create one and initialise it. | 298 | * get_task_io_context - get io_context of a task |
109 | * Otherwise, return its existing IO context. | 299 | * @task: task of interest |
300 | * @gfp_flags: allocation flags, used if allocation is necessary | ||
301 | * @node: allocation node, used if allocation is necessary | ||
110 | * | 302 | * |
111 | * This returned IO context doesn't have a specifically elevated refcount, | 303 | * Return io_context of @task. If it doesn't exist, it is created with |
112 | * but since the current task itself holds a reference, the context can be | 304 | * @gfp_flags and @node. The returned io_context has its reference count |
113 | * used in general code, so long as it stays within `current` context. | 305 | * incremented. |
306 | * | ||
307 | * This function always goes through task_lock() and it's better to use | ||
308 | * %current->io_context + get_io_context() for %current. | ||
114 | */ | 309 | */ |
115 | struct io_context *current_io_context(gfp_t gfp_flags, int node) | 310 | struct io_context *get_task_io_context(struct task_struct *task, |
311 | gfp_t gfp_flags, int node) | ||
116 | { | 312 | { |
117 | struct task_struct *tsk = current; | 313 | struct io_context *ioc; |
118 | struct io_context *ret; | ||
119 | |||
120 | ret = tsk->io_context; | ||
121 | if (likely(ret)) | ||
122 | return ret; | ||
123 | |||
124 | ret = alloc_io_context(gfp_flags, node); | ||
125 | if (ret) { | ||
126 | /* make sure set_task_ioprio() sees the settings above */ | ||
127 | smp_wmb(); | ||
128 | tsk->io_context = ret; | ||
129 | } | ||
130 | 314 | ||
131 | return ret; | 315 | might_sleep_if(gfp_flags & __GFP_WAIT); |
316 | |||
317 | do { | ||
318 | task_lock(task); | ||
319 | ioc = task->io_context; | ||
320 | if (likely(ioc)) { | ||
321 | get_io_context(ioc); | ||
322 | task_unlock(task); | ||
323 | return ioc; | ||
324 | } | ||
325 | task_unlock(task); | ||
326 | } while (create_io_context(task, gfp_flags, node)); | ||
327 | |||
328 | return NULL; | ||
132 | } | 329 | } |
330 | EXPORT_SYMBOL(get_task_io_context); | ||
133 | 331 | ||
134 | /* | 332 | /** |
135 | * If the current task has no IO context then create one and initialise it. | 333 | * ioc_lookup_icq - lookup io_cq from ioc |
136 | * If it does have a context, take a ref on it. | 334 | * @ioc: the associated io_context |
335 | * @q: the associated request_queue | ||
137 | * | 336 | * |
138 | * This is always called in the context of the task which submitted the I/O. | 337 | * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called |
338 | * with @q->queue_lock held. | ||
139 | */ | 339 | */ |
140 | struct io_context *get_io_context(gfp_t gfp_flags, int node) | 340 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) |
141 | { | 341 | { |
142 | struct io_context *ioc = NULL; | 342 | struct io_cq *icq; |
343 | |||
344 | lockdep_assert_held(q->queue_lock); | ||
143 | 345 | ||
144 | /* | 346 | /* |
145 | * Check for unlikely race with exiting task. ioc ref count is | 347 | * icq's are indexed from @ioc using radix tree and hint pointer, |
146 | * zero when ioc is being detached. | 348 | * both of which are protected with RCU. All removals are done |
349 | * holding both q and ioc locks, and we're holding q lock - if we | ||
350 | * find a icq which points to us, it's guaranteed to be valid. | ||
147 | */ | 351 | */ |
148 | do { | 352 | rcu_read_lock(); |
149 | ioc = current_io_context(gfp_flags, node); | 353 | icq = rcu_dereference(ioc->icq_hint); |
150 | if (unlikely(!ioc)) | 354 | if (icq && icq->q == q) |
151 | break; | 355 | goto out; |
152 | } while (!atomic_long_inc_not_zero(&ioc->refcount)); | ||
153 | 356 | ||
154 | return ioc; | 357 | icq = radix_tree_lookup(&ioc->icq_tree, q->id); |
358 | if (icq && icq->q == q) | ||
359 | rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ | ||
360 | else | ||
361 | icq = NULL; | ||
362 | out: | ||
363 | rcu_read_unlock(); | ||
364 | return icq; | ||
155 | } | 365 | } |
156 | EXPORT_SYMBOL(get_io_context); | 366 | EXPORT_SYMBOL(ioc_lookup_icq); |
367 | |||
368 | /** | ||
369 | * ioc_create_icq - create and link io_cq | ||
370 | * @q: request_queue of interest | ||
371 | * @gfp_mask: allocation mask | ||
372 | * | ||
373 | * Make sure io_cq linking %current->io_context and @q exists. If either | ||
374 | * io_context and/or icq don't exist, they will be created using @gfp_mask. | ||
375 | * | ||
376 | * The caller is responsible for ensuring @ioc won't go away and @q is | ||
377 | * alive and will stay alive until this function returns. | ||
378 | */ | ||
379 | struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) | ||
380 | { | ||
381 | struct elevator_type *et = q->elevator->type; | ||
382 | struct io_context *ioc; | ||
383 | struct io_cq *icq; | ||
384 | |||
385 | /* allocate stuff */ | ||
386 | ioc = create_io_context(current, gfp_mask, q->node); | ||
387 | if (!ioc) | ||
388 | return NULL; | ||
389 | |||
390 | icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, | ||
391 | q->node); | ||
392 | if (!icq) | ||
393 | return NULL; | ||
394 | |||
395 | if (radix_tree_preload(gfp_mask) < 0) { | ||
396 | kmem_cache_free(et->icq_cache, icq); | ||
397 | return NULL; | ||
398 | } | ||
399 | |||
400 | icq->ioc = ioc; | ||
401 | icq->q = q; | ||
402 | INIT_LIST_HEAD(&icq->q_node); | ||
403 | INIT_HLIST_NODE(&icq->ioc_node); | ||
404 | |||
405 | /* lock both q and ioc and try to link @icq */ | ||
406 | spin_lock_irq(q->queue_lock); | ||
407 | spin_lock(&ioc->lock); | ||
408 | |||
409 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { | ||
410 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); | ||
411 | list_add(&icq->q_node, &q->icq_list); | ||
412 | if (et->ops.elevator_init_icq_fn) | ||
413 | et->ops.elevator_init_icq_fn(icq); | ||
414 | } else { | ||
415 | kmem_cache_free(et->icq_cache, icq); | ||
416 | icq = ioc_lookup_icq(ioc, q); | ||
417 | if (!icq) | ||
418 | printk(KERN_ERR "cfq: icq link failed!\n"); | ||
419 | } | ||
420 | |||
421 | spin_unlock(&ioc->lock); | ||
422 | spin_unlock_irq(q->queue_lock); | ||
423 | radix_tree_preload_end(); | ||
424 | return icq; | ||
425 | } | ||
426 | |||
427 | void ioc_set_changed(struct io_context *ioc, int which) | ||
428 | { | ||
429 | struct io_cq *icq; | ||
430 | struct hlist_node *n; | ||
431 | |||
432 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) | ||
433 | set_bit(which, &icq->changed); | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * ioc_ioprio_changed - notify ioprio change | ||
438 | * @ioc: io_context of interest | ||
439 | * @ioprio: new ioprio | ||
440 | * | ||
441 | * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all | ||
442 | * icq's. iosched is responsible for checking the bit and applying it on | ||
443 | * request issue path. | ||
444 | */ | ||
445 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | ||
446 | { | ||
447 | unsigned long flags; | ||
448 | |||
449 | spin_lock_irqsave(&ioc->lock, flags); | ||
450 | ioc->ioprio = ioprio; | ||
451 | ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED); | ||
452 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * ioc_cgroup_changed - notify cgroup change | ||
457 | * @ioc: io_context of interest | ||
458 | * | ||
459 | * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's. | ||
460 | * iosched is responsible for checking the bit and applying it on request | ||
461 | * issue path. | ||
462 | */ | ||
463 | void ioc_cgroup_changed(struct io_context *ioc) | ||
464 | { | ||
465 | unsigned long flags; | ||
466 | |||
467 | spin_lock_irqsave(&ioc->lock, flags); | ||
468 | ioc_set_changed(ioc, ICQ_CGROUP_CHANGED); | ||
469 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
470 | } | ||
471 | EXPORT_SYMBOL(ioc_cgroup_changed); | ||
157 | 472 | ||
158 | static int __init blk_ioc_init(void) | 473 | static int __init blk_ioc_init(void) |
159 | { | 474 | { |