diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2010-12-01 13:41:49 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-12-17 02:35:53 -0500 |
commit | e692cb668fdd5a712c6ed2a2d6f2a36ee83997b4 (patch) | |
tree | accf682fe5e1388f305b5fc364a931dfda5f3fb9 /block/blk-settings.c | |
parent | 04a6b516cdc6efc2500b52a540cf65be8c5aaf9e (diff) |
block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead
When stacking devices, a request_queue is not always available. This
forced us to have a no_cluster flag in the queue_limits that could be
used as a carrier until the request_queue had been set up for a
metadevice.
There were several problems with that approach. First of all it was up
to the stacking device to remember to set queue flag after stacking had
completed. Also, the queue flag and the queue limits had to be kept in
sync at all times. We got that wrong, which could lead to us issuing
commands that went beyond the max scatterlist limit set by the driver.
The proper fix is to avoid having two flags for tracking the same thing.
We deprecate QUEUE_FLAG_CLUSTER and use the queue limit directly in the
block layer merging functions. The queue_limit 'no_cluster' is turned
into 'cluster' to avoid double negatives and to ease stacking.
Clustering defaults to being enabled as before. The queue flag logic is
removed from the stacking function, and explicitly setting the cluster
flag is no longer necessary in DM and MD.
Reported-by: Ed Lin <ed.lin@promise.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@kernel.org
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 25 |
1 files changed, 2 insertions, 23 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 701859fb9647..e55f5fc4ca22 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
126 | lim->alignment_offset = 0; | 126 | lim->alignment_offset = 0; |
127 | lim->io_opt = 0; | 127 | lim->io_opt = 0; |
128 | lim->misaligned = 0; | 128 | lim->misaligned = 0; |
129 | lim->no_cluster = 0; | 129 | lim->cluster = 1; |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(blk_set_default_limits); | 131 | EXPORT_SYMBOL(blk_set_default_limits); |
132 | 132 | ||
@@ -464,15 +464,6 @@ EXPORT_SYMBOL(blk_queue_io_opt); | |||
464 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 464 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
465 | { | 465 | { |
466 | blk_stack_limits(&t->limits, &b->limits, 0); | 466 | blk_stack_limits(&t->limits, &b->limits, 0); |
467 | |||
468 | if (!t->queue_lock) | ||
469 | WARN_ON_ONCE(1); | ||
470 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
471 | unsigned long flags; | ||
472 | spin_lock_irqsave(t->queue_lock, flags); | ||
473 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | ||
474 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
475 | } | ||
476 | } | 467 | } |
477 | EXPORT_SYMBOL(blk_queue_stack_limits); | 468 | EXPORT_SYMBOL(blk_queue_stack_limits); |
478 | 469 | ||
@@ -545,7 +536,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
545 | t->io_min = max(t->io_min, b->io_min); | 536 | t->io_min = max(t->io_min, b->io_min); |
546 | t->io_opt = lcm(t->io_opt, b->io_opt); | 537 | t->io_opt = lcm(t->io_opt, b->io_opt); |
547 | 538 | ||
548 | t->no_cluster |= b->no_cluster; | 539 | t->cluster &= b->cluster; |
549 | t->discard_zeroes_data &= b->discard_zeroes_data; | 540 | t->discard_zeroes_data &= b->discard_zeroes_data; |
550 | 541 | ||
551 | /* Physical block size a multiple of the logical block size? */ | 542 | /* Physical block size a multiple of the logical block size? */ |
@@ -641,7 +632,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
641 | sector_t offset) | 632 | sector_t offset) |
642 | { | 633 | { |
643 | struct request_queue *t = disk->queue; | 634 | struct request_queue *t = disk->queue; |
644 | struct request_queue *b = bdev_get_queue(bdev); | ||
645 | 635 | ||
646 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { | 636 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
647 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | 637 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
@@ -652,17 +642,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
652 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", | 642 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
653 | top, bottom); | 643 | top, bottom); |
654 | } | 644 | } |
655 | |||
656 | if (!t->queue_lock) | ||
657 | WARN_ON_ONCE(1); | ||
658 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
659 | unsigned long flags; | ||
660 | |||
661 | spin_lock_irqsave(t->queue_lock, flags); | ||
662 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
663 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | ||
664 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
665 | } | ||
666 | } | 645 | } |
667 | EXPORT_SYMBOL(disk_stack_limits); | 646 | EXPORT_SYMBOL(disk_stack_limits); |
668 | 647 | ||