diff options
author | Neil Brown <neilb@suse.de> | 2008-05-14 19:05:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-14 22:11:15 -0400 |
commit | e7e72bf641b1fc7b9df6f40bd2c36dfccd8d647c (patch) | |
tree | 81b1db5434c9635bf23fb40415056e10390cd692 /block/blk-settings.c | |
parent | 4920916f728fe3c51f54c25ab7b3d271254aab5a (diff) |
Remove blkdev warning triggered by using md
As setting and clearing queue flags now requires that we hold a spinlock
on the queue, and as blk_queue_stack_limits is called without that lock,
get the lock inside blk_queue_stack_limits.
For blk_queue_stack_limits to be able to find the right lock, each md
personality needs to set q->queue_lock to point to the appropriate lock.
Those personalities which didn't previously use a spin_lock, us
q->__queue_lock. So always initialise that lock when allocated.
With this in place, setting/clearing of the QUEUE_FLAG_PLUGGED bit will no
longer cause warnings as it will be clear that the proper lock is held.
Thanks to Dan Williams for review and fixing the silly bugs.
Signed-off-by: NeilBrown <neilb@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Alistair John Strachan <alistair@devzero.co.uk>
Cc: Nick Piggin <npiggin@suse.de>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Jacek Luczak <difrost.kernel@gmail.com>
Cc: Prakash Punnoor <prakash@punnoor.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 8 |
1 files changed, 7 insertions, 1 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index bb93d4c32775..8dd86418f35d 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -286,8 +286,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
286 | t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); | 286 | t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); |
287 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); | 287 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); |
288 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); | 288 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); |
289 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | 289 | if (!t->queue_lock) |
290 | WARN_ON_ONCE(1); | ||
291 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
292 | unsigned long flags; | ||
293 | spin_lock_irqsave(t->queue_lock, flags); | ||
290 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | 294 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); |
295 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
296 | } | ||
291 | } | 297 | } |
292 | EXPORT_SYMBOL(blk_queue_stack_limits); | 298 | EXPORT_SYMBOL(blk_queue_stack_limits); |
293 | 299 | ||