aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:50 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:54 -0400
commitae03bf639a5027d27270123f5f6e3ee6a412781d (patch)
treed705f41a188ad656b1f47f7952626a9f992e3b8f /drivers/md
parente1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff)
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions instead of poking the request queue variables directly. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c4
7 files changed, 26 insertions, 26 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 65e2d9759857..e9a73bb242b0 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
510 * combine_restrictions_low() 510 * combine_restrictions_low()
511 */ 511 */
512 rs->max_sectors = 512 rs->max_sectors =
513 min_not_zero(rs->max_sectors, q->max_sectors); 513 min_not_zero(rs->max_sectors, queue_max_sectors(q));
514 514
515 /* 515 /*
516 * Check if merge fn is supported. 516 * Check if merge fn is supported.
@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
525 525
526 rs->max_phys_segments = 526 rs->max_phys_segments =
527 min_not_zero(rs->max_phys_segments, 527 min_not_zero(rs->max_phys_segments,
528 q->max_phys_segments); 528 queue_max_phys_segments(q));
529 529
530 rs->max_hw_segments = 530 rs->max_hw_segments =
531 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 531 min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
532 532
533 rs->logical_block_size = max(rs->logical_block_size, 533 rs->logical_block_size = max(rs->logical_block_size,
534 queue_logical_block_size(q)); 534 queue_logical_block_size(q));
535 535
536 rs->max_segment_size = 536 rs->max_segment_size =
537 min_not_zero(rs->max_segment_size, q->max_segment_size); 537 min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
538 538
539 rs->max_hw_sectors = 539 rs->max_hw_sectors =
540 min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); 540 min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
541 541
542 rs->seg_boundary_mask = 542 rs->seg_boundary_mask =
543 min_not_zero(rs->seg_boundary_mask, 543 min_not_zero(rs->seg_boundary_mask,
544 q->seg_boundary_mask); 544 queue_segment_boundary(q));
545 545
546 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); 546 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
547 547
548 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 548 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
549} 549}
@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
914 * restrictions. 914 * restrictions.
915 */ 915 */
916 blk_queue_max_sectors(q, t->limits.max_sectors); 916 blk_queue_max_sectors(q, t->limits.max_sectors);
917 q->max_phys_segments = t->limits.max_phys_segments; 917 blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
918 q->max_hw_segments = t->limits.max_hw_segments; 918 blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
919 q->logical_block_size = t->limits.logical_block_size; 919 blk_queue_logical_block_size(q, t->limits.logical_block_size);
920 q->max_segment_size = t->limits.max_segment_size; 920 blk_queue_max_segment_size(q, t->limits.max_segment_size);
921 q->max_hw_sectors = t->limits.max_hw_sectors; 921 blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
922 q->seg_boundary_mask = t->limits.seg_boundary_mask; 922 blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
923 q->bounce_pfn = t->limits.bounce_pfn; 923 blk_queue_bounce_limit(q, t->limits.bounce_pfn);
924 924
925 if (t->limits.no_cluster) 925 if (t->limits.no_cluster)
926 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 926 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7a36e38393a1..64f1f3e046e0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
146 * a one page request is never in violation. 146 * a one page request is never in violation.
147 */ 147 */
148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
149 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 149 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
151 151
152 disk->num_sectors = rdev->sectors; 152 disk->num_sectors = rdev->sectors;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 41ced0cbe823..4ee31aa13c40 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
303 * merge_bvec_fn will be involved in multipath.) 303 * merge_bvec_fn will be involved in multipath.)
304 */ 304 */
305 if (q->merge_bvec_fn && 305 if (q->merge_bvec_fn &&
306 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 306 queue_max_sectors(q) > (PAGE_SIZE>>9))
307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
308 308
309 conf->working_disks++; 309 conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
467 * violating it, not that we ever expect a device with 467 * violating it, not that we ever expect a device with
468 * a merge_bvec_fn to be involved in multipath */ 468 * a merge_bvec_fn to be involved in multipath */
469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
470 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 470 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
472 472
473 if (!test_bit(Faulty, &rdev->flags)) 473 if (!test_bit(Faulty, &rdev->flags))
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c08d7559be55..925507e7d673 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
144 */ 144 */
145 145
146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
147 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 147 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
149 149
150 if (!smallest || (rdev1->sectors < smallest->sectors)) 150 if (!smallest || (rdev1->sectors < smallest->sectors))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 36df9109cde1..e23758b4a34e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1130 * a one page request is never in violation. 1130 * a one page request is never in violation.
1131 */ 1131 */
1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1133 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1133 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1135 1135
1136 p->head_position = 0; 1136 p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
1996 * a one page request is never in violation. 1996 * a one page request is never in violation.
1997 */ 1997 */
1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1999 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1999 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2001 2001
2002 disk->head_position = 0; 2002 disk->head_position = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 499620afb44b..750550c1166f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
1159 */ 1159 */
1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1161 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1161 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1162 mddev->queue->max_sectors = (PAGE_SIZE>>9); 1162 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1163 1163
1164 p->head_position = 0; 1164 p->head_position = 0;
1165 rdev->raid_disk = mirror; 1165 rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
2145 * a one page request is never in violation. 2145 * a one page request is never in violation.
2146 */ 2146 */
2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2148 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 2148 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2149 mddev->queue->max_sectors = (PAGE_SIZE>>9); 2149 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2150 2150
2151 disk->head_position = 0; 2151 disk->head_position = 0;
2152 } 2152 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4616bc3a6e71..7970dc8c522e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
3463{ 3463{
3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3465 3465
3466 if ((bi->bi_size>>9) > q->max_sectors) 3466 if ((bi->bi_size>>9) > queue_max_sectors(q))
3467 return 0; 3467 return 0;
3468 blk_recount_segments(q, bi); 3468 blk_recount_segments(q, bi);
3469 if (bi->bi_phys_segments > q->max_phys_segments) 3469 if (bi->bi_phys_segments > queue_max_phys_segments(q))
3470 return 0; 3470 return 0;
3471 3471
3472 if (q->merge_bvec_fn) 3472 if (q->merge_bvec_fn)