aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2011-03-02 19:04:42 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-02 19:06:49 -0500
commitc94a96ac93b4f5b8d1ff8430b1afa1a25610cf53 (patch)
tree3cc138b16df4c0802ec42868a6d1f2eed42ba695 /block
parent53f22956effe1c9e7961b8c6e4362ecca5e460b7 (diff)
block: Initialize ->queue_lock to internal lock at queue allocation time
There does not seem to be a clear convention whether q->queue_lock is initialized or not when blk_cleanup_queue() is called. In the past it was not necessary but now blk_throtl_exit() takes up queue lock by default and needs queue lock to be available. In fact elevator_exit() code also has similar requirement just that it is less stringent in the sense that elevator_exit() is called only if elevator is initialized. Two problems have been noticed because of ambiguity about spin lock status. - If a driver calls blk_alloc_queue() and then soon calls blk_cleanup_queue() almost immediately, (because some other driver structure allocation failed or some other error happened) then blk_throtl_exit() will run into issues as queue lock is not initialized. Loop driver ran into this issue recently and I noticed error paths in md driver too. Similar error paths should exist in other drivers too. - If some driver provided external spin lock and zapped the lock before blk_cleanup_queue(), then it can lead to issues. So this patch initializes the default queue lock at queue allocation time. block throttling code is one of the users of queue lock and it is initialized at the queue allocation time, so it makes sense to initialize ->queue_lock also to internal lock. A driver can overide that lock later. This will take care of the issue where a driver does not have to worry about initializing the queue lock to default before calling blk_cleanup_queue() Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-settings.c7
2 files changed, 15 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3cc17e6064d6..bc2b7c5004e1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -446,6 +446,11 @@ void blk_put_queue(struct request_queue *q)
446 kobject_put(&q->kobj); 446 kobject_put(&q->kobj);
447} 447}
448 448
449/*
450 * Note: If a driver supplied the queue lock, it should not zap that lock
451 * unexpectedly as some queue cleanup components like elevator_exit() and
452 * blk_throtl_exit() need queue lock.
453 */
449void blk_cleanup_queue(struct request_queue *q) 454void blk_cleanup_queue(struct request_queue *q)
450{ 455{
451 /* 456 /*
@@ -540,6 +545,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
540 mutex_init(&q->sysfs_lock); 545 mutex_init(&q->sysfs_lock);
541 spin_lock_init(&q->__queue_lock); 546 spin_lock_init(&q->__queue_lock);
542 547
548 /*
549 * By default initialize queue_lock to internal lock and driver can
550 * override it later if need be.
551 */
552 q->queue_lock = &q->__queue_lock;
553
543 return q; 554 return q;
544} 555}
545EXPORT_SYMBOL(blk_alloc_queue_node); 556EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -624,7 +635,10 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
624 q->unprep_rq_fn = NULL; 635 q->unprep_rq_fn = NULL;
625 q->unplug_fn = generic_unplug_device; 636 q->unplug_fn = generic_unplug_device;
626 q->queue_flags = QUEUE_FLAG_DEFAULT; 637 q->queue_flags = QUEUE_FLAG_DEFAULT;
627 q->queue_lock = lock; 638
639 /* Override internal queue lock with supplied lock pointer */
640 if (lock)
641 q->queue_lock = lock;
628 642
629 /* 643 /*
630 * This also sets hw/phys segments, boundary and size 644 * This also sets hw/phys segments, boundary and size
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 36c8c1f2af18..df649fa59ded 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -176,13 +176,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
176 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); 176 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177 177
178 /* 178 /*
179 * If the caller didn't supply a lock, fall back to our embedded
180 * per-queue locks
181 */
182 if (!q->queue_lock)
183 q->queue_lock = &q->__queue_lock;
184
185 /*
186 * by default assume old behaviour and bounce for any highmem page 179 * by default assume old behaviour and bounce for any highmem page
187 */ 180 */
188 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 181 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);