aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorNeil Brown <neilb@suse.de>2008-05-14 19:05:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-14 22:11:15 -0400
commite7e72bf641b1fc7b9df6f40bd2c36dfccd8d647c (patch)
tree81b1db5434c9635bf23fb40415056e10390cd692 /block/blk-core.c
parent4920916f728fe3c51f54c25ab7b3d271254aab5a (diff)
Remove blkdev warning triggered by using md
As setting and clearing queue flags now requires that we hold a spinlock on the queue, and as blk_queue_stack_limits is called without that lock, get the lock inside blk_queue_stack_limits. For blk_queue_stack_limits to be able to find the right lock, each md personality needs to set q->queue_lock to point to the appropriate lock. Those personalities which didn't previously use a spin_lock, us q->__queue_lock. So always initialise that lock when allocated. With this in place, setting/clearing of the QUEUE_FLAG_PLUGGED bit will no longer cause warnings as it will be clear that the proper lock is held. Thanks to Dan Williams for review and fixing the silly bugs. Signed-off-by: NeilBrown <neilb@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Alistair John Strachan <alistair@devzero.co.uk> Cc: Nick Piggin <npiggin@suse.de> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Jacek Luczak <difrost.kernel@gmail.com> Cc: Prakash Punnoor <prakash@punnoor.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2987fe47b5ee..6a9cc0d22a61 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -482,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
482 kobject_init(&q->kobj, &blk_queue_ktype); 482 kobject_init(&q->kobj, &blk_queue_ktype);
483 483
484 mutex_init(&q->sysfs_lock); 484 mutex_init(&q->sysfs_lock);
485 spin_lock_init(&q->__queue_lock);
485 486
486 return q; 487 return q;
487} 488}
@@ -544,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
544 * if caller didn't supply a lock, they get per-queue locking with 545 * if caller didn't supply a lock, they get per-queue locking with
545 * our embedded lock 546 * our embedded lock
546 */ 547 */
547 if (!lock) { 548 if (!lock)
548 spin_lock_init(&q->__queue_lock);
549 lock = &q->__queue_lock; 549 lock = &q->__queue_lock;
550 }
551 550
552 q->request_fn = rfn; 551 q->request_fn = rfn;
553 q->prep_rq_fn = NULL; 552 q->prep_rq_fn = NULL;