diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 12:00:29 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 12:00:29 -0500 |
commit | b1bf9368407ae7e89d8a005bb40beb70a41df539 (patch) | |
tree | 3815c8aab19c6c186736673c624fef5f3faab716 /block/blk-settings.c | |
parent | 524df55725217b13d5a232fb5badb5846418ea0e (diff) | |
parent | 4671a1322052425afa38fcb7980d2fd2bb0fc99b (diff) |
Merge branch 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block: (38 commits)
block: don't access jiffies when initialising io_context
cfq: remove 8 bytes of padding from cfq_rb_root on 64 bit builds
block: fix for "Consolidate phys_segment and hw_segment limits"
cfq-iosched: quantum check tweak
blktrace: perform cleanup after setup error
blkdev: fix merge_bvec_fn return value checks
cfq-iosched: requests "in flight" vs "in driver" clarification
cciss: Fix problem with scatter gather elements in the scsi half of the driver
cciss: eliminate unnecessary pointer use in cciss scsi code
cciss: do not use void pointer for scsi hba data
cciss: factor out scatter gather chain block mapping code
cciss: fix scatter gather chain block dma direction kludge
cciss: simplify scatter gather code
cciss: factor out scatter gather chain block allocation and freeing
cciss: detect bad alignment of scsi commands at build time
cciss: clarify command list padding calculation
cfq-iosched: rethink seeky detection for SSDs
cfq-iosched: rework seeky detection
block: remove padding from io_context on 64bit builds
block: Consolidate phys_segment and hw_segment limits
...
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 131 |
1 files changed, 42 insertions, 89 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 5eeb9e0d256e..31e7a9375c13 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -91,10 +91,9 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | |||
91 | */ | 91 | */ |
92 | void blk_set_default_limits(struct queue_limits *lim) | 92 | void blk_set_default_limits(struct queue_limits *lim) |
93 | { | 93 | { |
94 | lim->max_phys_segments = MAX_PHYS_SEGMENTS; | 94 | lim->max_segments = BLK_MAX_SEGMENTS; |
95 | lim->max_hw_segments = MAX_HW_SEGMENTS; | ||
96 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 95 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
97 | lim->max_segment_size = MAX_SEGMENT_SIZE; | 96 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
98 | lim->max_sectors = BLK_DEF_MAX_SECTORS; | 97 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
99 | lim->max_hw_sectors = INT_MAX; | 98 | lim->max_hw_sectors = INT_MAX; |
100 | lim->max_discard_sectors = 0; | 99 | lim->max_discard_sectors = 0; |
@@ -154,7 +153,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
154 | q->unplug_timer.data = (unsigned long)q; | 153 | q->unplug_timer.data = (unsigned long)q; |
155 | 154 | ||
156 | blk_set_default_limits(&q->limits); | 155 | blk_set_default_limits(&q->limits); |
157 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | 156 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
158 | 157 | ||
159 | /* | 158 | /* |
160 | * If the caller didn't supply a lock, fall back to our embedded | 159 | * If the caller didn't supply a lock, fall back to our embedded |
@@ -210,37 +209,32 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
210 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 209 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
211 | 210 | ||
212 | /** | 211 | /** |
213 | * blk_queue_max_sectors - set max sectors for a request for this queue | 212 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
214 | * @q: the request queue for the device | 213 | * @q: the request queue for the device |
215 | * @max_sectors: max sectors in the usual 512b unit | 214 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
216 | * | 215 | * |
217 | * Description: | 216 | * Description: |
218 | * Enables a low level driver to set an upper limit on the size of | 217 | * Enables a low level driver to set a hard upper limit, |
219 | * received requests. | 218 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
219 | * the device driver based upon the combined capabilities of I/O | ||
220 | * controller and storage device. | ||
221 | * | ||
222 | * max_sectors is a soft limit imposed by the block layer for | ||
223 | * filesystem type requests. This value can be overridden on a | ||
224 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | ||
225 | * The soft limit can not exceed max_hw_sectors. | ||
220 | **/ | 226 | **/ |
221 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | 227 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
222 | { | 228 | { |
223 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | 229 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
224 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 230 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
225 | printk(KERN_INFO "%s: set to minimum %d\n", | 231 | printk(KERN_INFO "%s: set to minimum %d\n", |
226 | __func__, max_sectors); | 232 | __func__, max_hw_sectors); |
227 | } | 233 | } |
228 | 234 | ||
229 | if (BLK_DEF_MAX_SECTORS > max_sectors) | 235 | q->limits.max_hw_sectors = max_hw_sectors; |
230 | q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; | 236 | q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, |
231 | else { | 237 | BLK_DEF_MAX_SECTORS); |
232 | q->limits.max_sectors = BLK_DEF_MAX_SECTORS; | ||
233 | q->limits.max_hw_sectors = max_sectors; | ||
234 | } | ||
235 | } | ||
236 | EXPORT_SYMBOL(blk_queue_max_sectors); | ||
237 | |||
238 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) | ||
239 | { | ||
240 | if (BLK_DEF_MAX_SECTORS > max_sectors) | ||
241 | q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS; | ||
242 | else | ||
243 | q->limits.max_hw_sectors = max_sectors; | ||
244 | } | 238 | } |
245 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 239 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
246 | 240 | ||
@@ -257,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q, | |||
257 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); | 251 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
258 | 252 | ||
259 | /** | 253 | /** |
260 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | 254 | * blk_queue_max_segments - set max hw segments for a request for this queue |
261 | * @q: the request queue for the device | 255 | * @q: the request queue for the device |
262 | * @max_segments: max number of segments | 256 | * @max_segments: max number of segments |
263 | * | 257 | * |
264 | * Description: | 258 | * Description: |
265 | * Enables a low level driver to set an upper limit on the number of | 259 | * Enables a low level driver to set an upper limit on the number of |
266 | * physical data segments in a request. This would be the largest sized | 260 | * hw data segments in a request. |
267 | * scatter list the driver could handle. | ||
268 | **/ | 261 | **/ |
269 | void blk_queue_max_phys_segments(struct request_queue *q, | 262 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
270 | unsigned short max_segments) | ||
271 | { | 263 | { |
272 | if (!max_segments) { | 264 | if (!max_segments) { |
273 | max_segments = 1; | 265 | max_segments = 1; |
@@ -275,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q, | |||
275 | __func__, max_segments); | 267 | __func__, max_segments); |
276 | } | 268 | } |
277 | 269 | ||
278 | q->limits.max_phys_segments = max_segments; | 270 | q->limits.max_segments = max_segments; |
279 | } | 271 | } |
280 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | 272 | EXPORT_SYMBOL(blk_queue_max_segments); |
281 | |||
282 | /** | ||
283 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
284 | * @q: the request queue for the device | ||
285 | * @max_segments: max number of segments | ||
286 | * | ||
287 | * Description: | ||
288 | * Enables a low level driver to set an upper limit on the number of | ||
289 | * hw data segments in a request. This would be the largest number of | ||
290 | * address/length pairs the host adapter can actually give at once | ||
291 | * to the device. | ||
292 | **/ | ||
293 | void blk_queue_max_hw_segments(struct request_queue *q, | ||
294 | unsigned short max_segments) | ||
295 | { | ||
296 | if (!max_segments) { | ||
297 | max_segments = 1; | ||
298 | printk(KERN_INFO "%s: set to minimum %d\n", | ||
299 | __func__, max_segments); | ||
300 | } | ||
301 | |||
302 | q->limits.max_hw_segments = max_segments; | ||
303 | } | ||
304 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
305 | 273 | ||
306 | /** | 274 | /** |
307 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | 275 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
@@ -507,7 +475,7 @@ static unsigned int lcm(unsigned int a, unsigned int b) | |||
507 | * blk_stack_limits - adjust queue_limits for stacked devices | 475 | * blk_stack_limits - adjust queue_limits for stacked devices |
508 | * @t: the stacking driver limits (top device) | 476 | * @t: the stacking driver limits (top device) |
509 | * @b: the underlying queue limits (bottom, component device) | 477 | * @b: the underlying queue limits (bottom, component device) |
510 | * @offset: offset to beginning of data within component device | 478 | * @start: first data sector within component device |
511 | * | 479 | * |
512 | * Description: | 480 | * Description: |
513 | * This function is used by stacking drivers like MD and DM to ensure | 481 | * This function is used by stacking drivers like MD and DM to ensure |
@@ -525,10 +493,9 @@ static unsigned int lcm(unsigned int a, unsigned int b) | |||
525 | * the alignment_offset is undefined. | 493 | * the alignment_offset is undefined. |
526 | */ | 494 | */ |
527 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 495 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
528 | sector_t offset) | 496 | sector_t start) |
529 | { | 497 | { |
530 | sector_t alignment; | 498 | unsigned int top, bottom, alignment, ret = 0; |
531 | unsigned int top, bottom, ret = 0; | ||
532 | 499 | ||
533 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 500 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
534 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 501 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
@@ -537,18 +504,14 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
537 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, | 504 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
538 | b->seg_boundary_mask); | 505 | b->seg_boundary_mask); |
539 | 506 | ||
540 | t->max_phys_segments = min_not_zero(t->max_phys_segments, | 507 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
541 | b->max_phys_segments); | ||
542 | |||
543 | t->max_hw_segments = min_not_zero(t->max_hw_segments, | ||
544 | b->max_hw_segments); | ||
545 | 508 | ||
546 | t->max_segment_size = min_not_zero(t->max_segment_size, | 509 | t->max_segment_size = min_not_zero(t->max_segment_size, |
547 | b->max_segment_size); | 510 | b->max_segment_size); |
548 | 511 | ||
549 | t->misaligned |= b->misaligned; | 512 | t->misaligned |= b->misaligned; |
550 | 513 | ||
551 | alignment = queue_limit_alignment_offset(b, offset); | 514 | alignment = queue_limit_alignment_offset(b, start); |
552 | 515 | ||
553 | /* Bottom device has different alignment. Check that it is | 516 | /* Bottom device has different alignment. Check that it is |
554 | * compatible with the current top alignment. | 517 | * compatible with the current top alignment. |
@@ -611,11 +574,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
611 | 574 | ||
612 | /* Discard alignment and granularity */ | 575 | /* Discard alignment and granularity */ |
613 | if (b->discard_granularity) { | 576 | if (b->discard_granularity) { |
614 | unsigned int granularity = b->discard_granularity; | 577 | alignment = queue_limit_discard_alignment(b, start); |
615 | offset &= granularity - 1; | ||
616 | |||
617 | alignment = (granularity + b->discard_alignment - offset) | ||
618 | & (granularity - 1); | ||
619 | 578 | ||
620 | if (t->discard_granularity != 0 && | 579 | if (t->discard_granularity != 0 && |
621 | t->discard_alignment != alignment) { | 580 | t->discard_alignment != alignment) { |
@@ -657,7 +616,7 @@ int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | |||
657 | 616 | ||
658 | start += get_start_sect(bdev); | 617 | start += get_start_sect(bdev); |
659 | 618 | ||
660 | return blk_stack_limits(t, &bq->limits, start << 9); | 619 | return blk_stack_limits(t, &bq->limits, start); |
661 | } | 620 | } |
662 | EXPORT_SYMBOL(bdev_stack_limits); | 621 | EXPORT_SYMBOL(bdev_stack_limits); |
663 | 622 | ||
@@ -668,9 +627,8 @@ EXPORT_SYMBOL(bdev_stack_limits); | |||
668 | * @offset: offset to beginning of data within component device | 627 | * @offset: offset to beginning of data within component device |
669 | * | 628 | * |
670 | * Description: | 629 | * Description: |
671 | * Merges the limits for two queues. Returns 0 if alignment | 630 | * Merges the limits for a top level gendisk and a bottom level |
672 | * didn't change. Returns -1 if adding the bottom device caused | 631 | * block_device. |
673 | * misalignment. | ||
674 | */ | 632 | */ |
675 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 633 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
676 | sector_t offset) | 634 | sector_t offset) |
@@ -678,9 +636,7 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
678 | struct request_queue *t = disk->queue; | 636 | struct request_queue *t = disk->queue; |
679 | struct request_queue *b = bdev_get_queue(bdev); | 637 | struct request_queue *b = bdev_get_queue(bdev); |
680 | 638 | ||
681 | offset += get_start_sect(bdev) << 9; | 639 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
682 | |||
683 | if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { | ||
684 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | 640 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
685 | 641 | ||
686 | disk_name(disk, 0, top); | 642 | disk_name(disk, 0, top); |
@@ -752,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad); | |||
752 | * does is adjust the queue so that the buf is always appended | 708 | * does is adjust the queue so that the buf is always appended |
753 | * silently to the scatterlist. | 709 | * silently to the scatterlist. |
754 | * | 710 | * |
755 | * Note: This routine adjusts max_hw_segments to make room for | 711 | * Note: This routine adjusts max_hw_segments to make room for appending |
756 | * appending the drain buffer. If you call | 712 | * the drain buffer. If you call blk_queue_max_segments() after calling |
757 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | 713 | * this routine, you must set the limit to one fewer than your device |
758 | * calling this routine, you must set the limit to one fewer than your | 714 | * can support otherwise there won't be room for the drain buffer. |
759 | * device can support otherwise there won't be room for the drain | ||
760 | * buffer. | ||
761 | */ | 715 | */ |
762 | int blk_queue_dma_drain(struct request_queue *q, | 716 | int blk_queue_dma_drain(struct request_queue *q, |
763 | dma_drain_needed_fn *dma_drain_needed, | 717 | dma_drain_needed_fn *dma_drain_needed, |
764 | void *buf, unsigned int size) | 718 | void *buf, unsigned int size) |
765 | { | 719 | { |
766 | if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) | 720 | if (queue_max_segments(q) < 2) |
767 | return -EINVAL; | 721 | return -EINVAL; |
768 | /* make room for appending the drain */ | 722 | /* make room for appending the drain */ |
769 | blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); | 723 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
770 | blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); | ||
771 | q->dma_drain_needed = dma_drain_needed; | 724 | q->dma_drain_needed = dma_drain_needed; |
772 | q->dma_drain_buffer = buf; | 725 | q->dma_drain_buffer = buf; |
773 | q->dma_drain_size = size; | 726 | q->dma_drain_size = size; |