aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-28 23:45:15 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-29 00:20:35 -0400
commitfb3cc4320e1fd87143683b540e459a2e20fdc9bb (patch)
treebc684070d75cd741820410d743cffb4bd0721504 /drivers
parentd6344532a26a318c128102507f6328aaafe02d4d (diff)
[PATCH] blk: light iocontext ops
get_io_context needlessly turned off interrupts and checked for racing io context creations. Both of which aren't needed, because the io context can only be created while in process context of the current process. Also, split the function in 2. A light version, current_io_context does not elevate the reference count specifically, but can be used when in process context, because the process holds a reference itself. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/ll_rw_blk.c56
1 files changed, 25 insertions, 31 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 5caebe2cf0a1..1197462bb6ba 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1876,7 +1876,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1876{ 1876{
1877 struct request *rq = NULL; 1877 struct request *rq = NULL;
1878 struct request_list *rl = &q->rq; 1878 struct request_list *rl = &q->rq;
1879 struct io_context *ioc = get_io_context(GFP_ATOMIC); 1879 struct io_context *ioc = current_io_context(GFP_ATOMIC);
1880 1880
1881 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) 1881 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
1882 goto out; 1882 goto out;
@@ -1959,7 +1959,6 @@ rq_starved:
1959 rq_init(q, rq); 1959 rq_init(q, rq);
1960 rq->rl = rl; 1960 rq->rl = rl;
1961out: 1961out:
1962 put_io_context(ioc);
1963 return rq; 1962 return rq;
1964} 1963}
1965 1964
@@ -1997,9 +1996,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
1997 * up to a big batch of them for a small period time. 1996 * up to a big batch of them for a small period time.
1998 * See ioc_batching, ioc_set_batching 1997 * See ioc_batching, ioc_set_batching
1999 */ 1998 */
2000 ioc = get_io_context(GFP_NOIO); 1999 ioc = current_io_context(GFP_NOIO);
2001 ioc_set_batching(q, ioc); 2000 ioc_set_batching(q, ioc);
2002 put_io_context(ioc);
2003 2001
2004 spin_lock_irq(q->queue_lock); 2002 spin_lock_irq(q->queue_lock);
2005 } 2003 }
@@ -3282,24 +3280,20 @@ void exit_io_context(void)
3282 3280
3283/* 3281/*
3284 * If the current task has no IO context then create one and initialise it. 3282 * If the current task has no IO context then create one and initialise it.
3285 * If it does have a context, take a ref on it. 3283 * Otherwise, return its existing IO context.
3286 * 3284 *
3287 * This is always called in the context of the task which submitted the I/O. 3285 * This returned IO context doesn't have a specifically elevated refcount,
3288 * But weird things happen, so we disable local interrupts to ensure exclusive 3286 * but since the current task itself holds a reference, the context can be
3289 * access to *current. 3287 * used in general code, so long as it stays within `current` context.
3290 */ 3288 */
3291struct io_context *get_io_context(int gfp_flags) 3289struct io_context *current_io_context(int gfp_flags)
3292{ 3290{
3293 struct task_struct *tsk = current; 3291 struct task_struct *tsk = current;
3294 unsigned long flags;
3295 struct io_context *ret; 3292 struct io_context *ret;
3296 3293
3297 local_irq_save(flags);
3298 ret = tsk->io_context; 3294 ret = tsk->io_context;
3299 if (ret) 3295 if (likely(ret))
3300 goto out; 3296 return ret;
3301
3302 local_irq_restore(flags);
3303 3297
3304 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); 3298 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
3305 if (ret) { 3299 if (ret) {
@@ -3310,25 +3304,25 @@ struct io_context *get_io_context(int gfp_flags)
3310 ret->nr_batch_requests = 0; /* because this is 0 */ 3304 ret->nr_batch_requests = 0; /* because this is 0 */
3311 ret->aic = NULL; 3305 ret->aic = NULL;
3312 ret->cic = NULL; 3306 ret->cic = NULL;
3307 tsk->io_context = ret;
3308 }
3313 3309
3314 local_irq_save(flags); 3310 return ret;
3315 3311}
3316 /* 3312EXPORT_SYMBOL(current_io_context);
3317 * very unlikely, someone raced with us in setting up the task
3318 * io context. free new context and just grab a reference.
3319 */
3320 if (!tsk->io_context)
3321 tsk->io_context = ret;
3322 else {
3323 kmem_cache_free(iocontext_cachep, ret);
3324 ret = tsk->io_context;
3325 }
3326 3313
3327out: 3314/*
3315 * If the current task has no IO context then create one and initialise it.
3316 * If it does have a context, take a ref on it.
3317 *
3318 * This is always called in the context of the task which submitted the I/O.
3319 */
3320struct io_context *get_io_context(int gfp_flags)
3321{
3322 struct io_context *ret;
3323 ret = current_io_context(gfp_flags);
3324 if (likely(ret))
3328 atomic_inc(&ret->refcount); 3325 atomic_inc(&ret->refcount);
3329 local_irq_restore(flags);
3330 }
3331
3332 return ret; 3326 return ret;
3333} 3327}
3334EXPORT_SYMBOL(get_io_context); 3328EXPORT_SYMBOL(get_io_context);