aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2010-12-01 13:41:49 -0500
committerJens Axboe <jaxboe@fusionio.com>2010-12-17 02:35:53 -0500
commite692cb668fdd5a712c6ed2a2d6f2a36ee83997b4 (patch)
treeaccf682fe5e1388f305b5fc364a931dfda5f3fb9 /block/blk-merge.c
parent04a6b516cdc6efc2500b52a540cf65be8c5aaf9e (diff)
block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead
When stacking devices, a request_queue is not always available. This forced us to have a no_cluster flag in the queue_limits that could be used as a carrier until the request_queue had been set up for a metadevice. There were several problems with that approach. First of all it was up to the stacking device to remember to set queue flag after stacking had completed. Also, the queue flag and the queue limits had to be kept in sync at all times. We got that wrong, which could lead to us issuing commands that went beyond the max scatterlist limit set by the driver. The proper fix is to avoid having two flags for tracking the same thing. We deprecate QUEUE_FLAG_CLUSTER and use the queue limit directly in the block layer merging functions. The queue_limit 'no_cluster' is turned into 'cluster' to avoid double negatives and to ease stacking. Clustering defaults to being enabled as before. The queue flag logic is removed from the stacking function, and explicitly setting the cluster flag is no longer necessary in DM and MD. Reported-by: Ed Lin <ed.lin@promise.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Acked-by: Mike Snitzer <snitzer@redhat.com> Cc: stable@kernel.org Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 77b7c26df6b5..74bc4a768f32 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
21 return 0; 21 return 0;
22 22
23 fbio = bio; 23 fbio = bio;
24 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 24 cluster = blk_queue_cluster(q);
25 seg_size = 0; 25 seg_size = 0;
26 nr_phys_segs = 0; 26 nr_phys_segs = 0;
27 for_each_bio(bio) { 27 for_each_bio(bio) {
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 struct bio *nxt) 88 struct bio *nxt)
89{ 89{
90 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 90 if (!blk_queue_cluster(q))
91 return 0; 91 return 0;
92 92
93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
@@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
123 int nsegs, cluster; 123 int nsegs, cluster;
124 124
125 nsegs = 0; 125 nsegs = 0;
126 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 126 cluster = blk_queue_cluster(q);
127 127
128 /* 128 /*
129 * for each bio in rq 129 * for each bio in rq