aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-settings.c7
2 files changed, 15 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3cc17e6064d6..bc2b7c5004e1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -446,6 +446,11 @@ void blk_put_queue(struct request_queue *q)
446 kobject_put(&q->kobj); 446 kobject_put(&q->kobj);
447} 447}
448 448
449/*
450 * Note: If a driver supplied the queue lock, it should not zap that lock
451 * unexpectedly as some queue cleanup components like elevator_exit() and
452 * blk_throtl_exit() need queue lock.
453 */
449void blk_cleanup_queue(struct request_queue *q) 454void blk_cleanup_queue(struct request_queue *q)
450{ 455{
451 /* 456 /*
@@ -540,6 +545,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
540 mutex_init(&q->sysfs_lock); 545 mutex_init(&q->sysfs_lock);
541 spin_lock_init(&q->__queue_lock); 546 spin_lock_init(&q->__queue_lock);
542 547
548 /*
549 * By default initialize queue_lock to internal lock and driver can
550 * override it later if need be.
551 */
552 q->queue_lock = &q->__queue_lock;
553
543 return q; 554 return q;
544} 555}
545EXPORT_SYMBOL(blk_alloc_queue_node); 556EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -624,7 +635,10 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
624 q->unprep_rq_fn = NULL; 635 q->unprep_rq_fn = NULL;
625 q->unplug_fn = generic_unplug_device; 636 q->unplug_fn = generic_unplug_device;
626 q->queue_flags = QUEUE_FLAG_DEFAULT; 637 q->queue_flags = QUEUE_FLAG_DEFAULT;
627 q->queue_lock = lock; 638
639 /* Override internal queue lock with supplied lock pointer */
640 if (lock)
641 q->queue_lock = lock;
628 642
629 /* 643 /*
630 * This also sets hw/phys segments, boundary and size 644 * This also sets hw/phys segments, boundary and size
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 36c8c1f2af18..df649fa59ded 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -176,13 +176,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
176 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); 176 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177 177
178 /* 178 /*
179 * If the caller didn't supply a lock, fall back to our embedded
180 * per-queue locks
181 */
182 if (!q->queue_lock)
183 q->queue_lock = &q->__queue_lock;
184
185 /*
186 * by default assume old behaviour and bounce for any highmem page 179 * by default assume old behaviour and bounce for any highmem page
187 */ 180 */
188 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 181 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);