diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2010-02-26 00:20:39 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-02-26 07:58:08 -0500 |
commit | 8a78362c4eefc1deddbefe2c7f38aabbc2429d6b (patch) | |
tree | c095d95af1aec0f9cee5975b1dcdc6bc1d17d401 /block | |
parent | 086fa5ff0854c676ec333760f4c0154b3b242616 (diff) |
block: Consolidate phys_segment and hw_segment limits
Except for SCSI no device drivers distinguish between physical and
hardware segment limits. Consolidate the two into a single segment
limit.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 3 | ||||
-rw-r--r-- | block/blk-merge.c | 8 | ||||
-rw-r--r-- | block/blk-settings.c | 60 |
3 files changed, 16 insertions, 55 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 36c0deebc2dc..9fe174dc74d1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1614,8 +1614,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) | |||
1614 | * limitation. | 1614 | * limitation. |
1615 | */ | 1615 | */ |
1616 | blk_recalc_rq_segments(rq); | 1616 | blk_recalc_rq_segments(rq); |
1617 | if (rq->nr_phys_segments > queue_max_phys_segments(q) || | 1617 | if (rq->nr_phys_segments > queue_max_segments(q)) { |
1618 | rq->nr_phys_segments > queue_max_hw_segments(q)) { | ||
1619 | printk(KERN_ERR "%s: over max segments limit.\n", __func__); | 1618 | printk(KERN_ERR "%s: over max segments limit.\n", __func__); |
1620 | return -EIO; | 1619 | return -EIO; |
1621 | } | 1620 | } |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 99cb5cf1f447..5e7dc9973458 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -206,8 +206,7 @@ static inline int ll_new_hw_segment(struct request_queue *q, | |||
206 | { | 206 | { |
207 | int nr_phys_segs = bio_phys_segments(q, bio); | 207 | int nr_phys_segs = bio_phys_segments(q, bio); |
208 | 208 | ||
209 | if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || | 209 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { |
210 | req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) { | ||
211 | req->cmd_flags |= REQ_NOMERGE; | 210 | req->cmd_flags |= REQ_NOMERGE; |
212 | if (req == q->last_merge) | 211 | if (req == q->last_merge) |
213 | q->last_merge = NULL; | 212 | q->last_merge = NULL; |
@@ -300,10 +299,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
300 | total_phys_segments--; | 299 | total_phys_segments--; |
301 | } | 300 | } |
302 | 301 | ||
303 | if (total_phys_segments > queue_max_phys_segments(q)) | 302 | if (total_phys_segments > queue_max_segments(q)) |
304 | return 0; | ||
305 | |||
306 | if (total_phys_segments > queue_max_hw_segments(q)) | ||
307 | return 0; | 303 | return 0; |
308 | 304 | ||
309 | /* Merge is OK... */ | 305 | /* Merge is OK... */ |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 61afae9dbc6d..31e7a9375c13 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -91,8 +91,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | |||
91 | */ | 91 | */ |
92 | void blk_set_default_limits(struct queue_limits *lim) | 92 | void blk_set_default_limits(struct queue_limits *lim) |
93 | { | 93 | { |
94 | lim->max_phys_segments = MAX_PHYS_SEGMENTS; | 94 | lim->max_segments = BLK_MAX_SEGMENTS; |
95 | lim->max_hw_segments = MAX_HW_SEGMENTS; | ||
96 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 95 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
97 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 96 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
98 | lim->max_sectors = BLK_DEF_MAX_SECTORS; | 97 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
@@ -252,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q, | |||
252 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); | 251 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
253 | 252 | ||
254 | /** | 253 | /** |
255 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | 254 | * blk_queue_max_segments - set max hw segments for a request for this queue |
256 | * @q: the request queue for the device | 255 | * @q: the request queue for the device |
257 | * @max_segments: max number of segments | 256 | * @max_segments: max number of segments |
258 | * | 257 | * |
259 | * Description: | 258 | * Description: |
260 | * Enables a low level driver to set an upper limit on the number of | 259 | * Enables a low level driver to set an upper limit on the number of |
261 | * physical data segments in a request. This would be the largest sized | 260 | * hw data segments in a request. |
262 | * scatter list the driver could handle. | ||
263 | **/ | 261 | **/ |
264 | void blk_queue_max_phys_segments(struct request_queue *q, | 262 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
265 | unsigned short max_segments) | ||
266 | { | 263 | { |
267 | if (!max_segments) { | 264 | if (!max_segments) { |
268 | max_segments = 1; | 265 | max_segments = 1; |
@@ -270,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q, | |||
270 | __func__, max_segments); | 267 | __func__, max_segments); |
271 | } | 268 | } |
272 | 269 | ||
273 | q->limits.max_phys_segments = max_segments; | 270 | q->limits.max_segments = max_segments; |
274 | } | 271 | } |
275 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | 272 | EXPORT_SYMBOL(blk_queue_max_segments); |
276 | |||
277 | /** | ||
278 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
279 | * @q: the request queue for the device | ||
280 | * @max_segments: max number of segments | ||
281 | * | ||
282 | * Description: | ||
283 | * Enables a low level driver to set an upper limit on the number of | ||
284 | * hw data segments in a request. This would be the largest number of | ||
285 | * address/length pairs the host adapter can actually give at once | ||
286 | * to the device. | ||
287 | **/ | ||
288 | void blk_queue_max_hw_segments(struct request_queue *q, | ||
289 | unsigned short max_segments) | ||
290 | { | ||
291 | if (!max_segments) { | ||
292 | max_segments = 1; | ||
293 | printk(KERN_INFO "%s: set to minimum %d\n", | ||
294 | __func__, max_segments); | ||
295 | } | ||
296 | |||
297 | q->limits.max_hw_segments = max_segments; | ||
298 | } | ||
299 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
300 | 273 | ||
301 | /** | 274 | /** |
302 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | 275 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
@@ -531,11 +504,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
531 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, | 504 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
532 | b->seg_boundary_mask); | 505 | b->seg_boundary_mask); |
533 | 506 | ||
534 | t->max_phys_segments = min_not_zero(t->max_phys_segments, | 507 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
535 | b->max_phys_segments); | ||
536 | |||
537 | t->max_hw_segments = min_not_zero(t->max_hw_segments, | ||
538 | b->max_hw_segments); | ||
539 | 508 | ||
540 | t->max_segment_size = min_not_zero(t->max_segment_size, | 509 | t->max_segment_size = min_not_zero(t->max_segment_size, |
541 | b->max_segment_size); | 510 | b->max_segment_size); |
@@ -739,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad); | |||
739 | * does is adjust the queue so that the buf is always appended | 708 | * does is adjust the queue so that the buf is always appended |
740 | * silently to the scatterlist. | 709 | * silently to the scatterlist. |
741 | * | 710 | * |
742 | * Note: This routine adjusts max_hw_segments to make room for | 711 | * Note: This routine adjusts max_hw_segments to make room for appending |
743 | * appending the drain buffer. If you call | 712 | * the drain buffer. If you call blk_queue_max_segments() after calling |
744 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | 713 | * this routine, you must set the limit to one fewer than your device |
745 | * calling this routine, you must set the limit to one fewer than your | 714 | * can support otherwise there won't be room for the drain buffer. |
746 | * device can support otherwise there won't be room for the drain | ||
747 | * buffer. | ||
748 | */ | 715 | */ |
749 | int blk_queue_dma_drain(struct request_queue *q, | 716 | int blk_queue_dma_drain(struct request_queue *q, |
750 | dma_drain_needed_fn *dma_drain_needed, | 717 | dma_drain_needed_fn *dma_drain_needed, |
751 | void *buf, unsigned int size) | 718 | void *buf, unsigned int size) |
752 | { | 719 | { |
753 | if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) | 720 | if (queue_max_segments(q) < 2) |
754 | return -EINVAL; | 721 | return -EINVAL; |
755 | /* make room for appending the drain */ | 722 | /* make room for appending the drain */ |
756 | blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); | 723 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
757 | blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); | ||
758 | q->dma_drain_needed = dma_drain_needed; | 724 | q->dma_drain_needed = dma_drain_needed; |
759 | q->dma_drain_buffer = buf; | 725 | q->dma_drain_buffer = buf; |
760 | q->dma_drain_size = size; | 726 | q->dma_drain_size = size; |