aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:24 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:24 -0500
commit24acfc34fba0b4f62ef9d5c2616eb0faa802b606 (patch)
tree42d07b0e4ad922b24853fe542cb9ab543aa8174c /block
parentb679281a6410676a41b175c5a185150a1ae42f9d (diff)
block: interface update for ioc/icq creation functions
Make the following interface updates to prepare for future ioc related changes. * create_io_context() returning ioc only works for %current because it doesn't increment ref on the ioc. Drop @task parameter from it and always assume %current. * Make create_io_context_slowpath() return 0 or -errno and rename it to create_task_io_context(). * Make ioc_create_icq() take @ioc as parameter instead of assuming that of %current. The caller, get_request(), is updated to create ioc explicitly and then pass it into ioc_create_icq(). Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-ioc.c22
-rw-r--r--block/blk.h24
3 files changed, 26 insertions, 28 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 792a384a8e35..b2d0fcd8f87f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -855,7 +855,7 @@ retry:
855 */ 855 */
856 if (!ioc && !retried) { 856 if (!ioc && !retried) {
857 spin_unlock_irq(q->queue_lock); 857 spin_unlock_irq(q->queue_lock);
858 create_io_context(current, gfp_mask, q->node); 858 create_io_context(gfp_mask, q->node);
859 spin_lock_irq(q->queue_lock); 859 spin_lock_irq(q->queue_lock);
860 retried = true; 860 retried = true;
861 goto retry; 861 goto retry;
@@ -919,7 +919,9 @@ retry:
919 919
920 /* create icq if missing */ 920 /* create icq if missing */
921 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { 921 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
922 icq = ioc_create_icq(q, gfp_mask); 922 ioc = create_io_context(gfp_mask, q->node);
923 if (ioc)
924 icq = ioc_create_icq(ioc, q, gfp_mask);
923 if (!icq) 925 if (!icq)
924 goto fail_alloc; 926 goto fail_alloc;
925 } 927 }
@@ -1005,7 +1007,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
1005 * up to a big batch of them for a small period time. 1007 * up to a big batch of them for a small period time.
1006 * See ioc_batching, ioc_set_batching 1008 * See ioc_batching, ioc_set_batching
1007 */ 1009 */
1008 create_io_context(current, GFP_NOIO, q->node); 1010 create_io_context(GFP_NOIO, q->node);
1009 ioc_set_batching(q, current->io_context); 1011 ioc_set_batching(q, current->io_context);
1010 1012
1011 spin_lock_irq(q->queue_lock); 1013 spin_lock_irq(q->queue_lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 92bf55540d87..10928740b5da 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -212,15 +212,14 @@ void ioc_clear_queue(struct request_queue *q)
212 } 212 }
213} 213}
214 214
215void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, 215int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
216 int node)
217{ 216{
218 struct io_context *ioc; 217 struct io_context *ioc;
219 218
220 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, 219 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
221 node); 220 node);
222 if (unlikely(!ioc)) 221 if (unlikely(!ioc))
223 return; 222 return -ENOMEM;
224 223
225 /* initialize */ 224 /* initialize */
226 atomic_long_set(&ioc->refcount, 1); 225 atomic_long_set(&ioc->refcount, 1);
@@ -244,6 +243,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
244 else 243 else
245 kmem_cache_free(iocontext_cachep, ioc); 244 kmem_cache_free(iocontext_cachep, ioc);
246 task_unlock(task); 245 task_unlock(task);
246
247 return 0;
247} 248}
248 249
249/** 250/**
@@ -275,7 +276,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
275 return ioc; 276 return ioc;
276 } 277 }
277 task_unlock(task); 278 task_unlock(task);
278 } while (create_io_context(task, gfp_flags, node)); 279 } while (!create_task_io_context(task, gfp_flags, node));
279 280
280 return NULL; 281 return NULL;
281} 282}
@@ -319,26 +320,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
319 320
320/** 321/**
321 * ioc_create_icq - create and link io_cq 322 * ioc_create_icq - create and link io_cq
323 * @ioc: io_context of interest
322 * @q: request_queue of interest 324 * @q: request_queue of interest
323 * @gfp_mask: allocation mask 325 * @gfp_mask: allocation mask
324 * 326 *
325 * Make sure io_cq linking %current->io_context and @q exists. If either 327 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
326 * io_context and/or icq don't exist, they will be created using @gfp_mask. 328 * will be created using @gfp_mask.
327 * 329 *
328 * The caller is responsible for ensuring @ioc won't go away and @q is 330 * The caller is responsible for ensuring @ioc won't go away and @q is
329 * alive and will stay alive until this function returns. 331 * alive and will stay alive until this function returns.
330 */ 332 */
331struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) 333struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
334 gfp_t gfp_mask)
332{ 335{
333 struct elevator_type *et = q->elevator->type; 336 struct elevator_type *et = q->elevator->type;
334 struct io_context *ioc;
335 struct io_cq *icq; 337 struct io_cq *icq;
336 338
337 /* allocate stuff */ 339 /* allocate stuff */
338 ioc = create_io_context(current, gfp_mask, q->node);
339 if (!ioc)
340 return NULL;
341
342 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, 340 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
343 q->node); 341 q->node);
344 if (!icq) 342 if (!icq)
diff --git a/block/blk.h b/block/blk.h
index de15f920b38f..aa81afde8220 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -200,32 +200,30 @@ static inline int blk_do_io_stat(struct request *rq)
200 */ 200 */
201void get_io_context(struct io_context *ioc); 201void get_io_context(struct io_context *ioc);
202struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 202struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
203struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask); 203struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
204 gfp_t gfp_mask);
204void ioc_clear_queue(struct request_queue *q); 205void ioc_clear_queue(struct request_queue *q);
205 206
206void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, 207int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
207 int node);
208 208
209/** 209/**
210 * create_io_context - try to create task->io_context 210 * create_io_context - try to create task->io_context
211 * @task: target task
212 * @gfp_mask: allocation mask 211 * @gfp_mask: allocation mask
213 * @node: allocation node 212 * @node: allocation node
214 * 213 *
215 * If @task->io_context is %NULL, allocate a new io_context and install it. 214 * If %current->io_context is %NULL, allocate a new io_context and install
216 * Returns the current @task->io_context which may be %NULL if allocation 215 * it. Returns the current %current->io_context which may be %NULL if
217 * failed. 216 * allocation failed.
218 * 217 *
219 * Note that this function can't be called with IRQ disabled because 218 * Note that this function can't be called with IRQ disabled because
220 * task_lock which protects @task->io_context is IRQ-unsafe. 219 * task_lock which protects %current->io_context is IRQ-unsafe.
221 */ 220 */
222static inline struct io_context *create_io_context(struct task_struct *task, 221static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
223 gfp_t gfp_mask, int node)
224{ 222{
225 WARN_ON_ONCE(irqs_disabled()); 223 WARN_ON_ONCE(irqs_disabled());
226 if (unlikely(!task->io_context)) 224 if (unlikely(!current->io_context))
227 create_io_context_slowpath(task, gfp_mask, node); 225 create_task_io_context(current, gfp_mask, node);
228 return task->io_context; 226 return current->io_context;
229} 227}
230 228
231/* 229/*