aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-settings.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c105
1 files changed, 33 insertions, 72 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 78549c723783..31e7a9375c13 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,10 +91,9 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
91 */ 91 */
92void blk_set_default_limits(struct queue_limits *lim) 92void blk_set_default_limits(struct queue_limits *lim)
93{ 93{
94 lim->max_phys_segments = MAX_PHYS_SEGMENTS; 94 lim->max_segments = BLK_MAX_SEGMENTS;
95 lim->max_hw_segments = MAX_HW_SEGMENTS;
96 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 95 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
97 lim->max_segment_size = MAX_SEGMENT_SIZE; 96 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
98 lim->max_sectors = BLK_DEF_MAX_SECTORS; 97 lim->max_sectors = BLK_DEF_MAX_SECTORS;
99 lim->max_hw_sectors = INT_MAX; 98 lim->max_hw_sectors = INT_MAX;
100 lim->max_discard_sectors = 0; 99 lim->max_discard_sectors = 0;
@@ -154,7 +153,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
154 q->unplug_timer.data = (unsigned long)q; 153 q->unplug_timer.data = (unsigned long)q;
155 154
156 blk_set_default_limits(&q->limits); 155 blk_set_default_limits(&q->limits);
157 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 156 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
158 157
159 /* 158 /*
160 * If the caller didn't supply a lock, fall back to our embedded 159 * If the caller didn't supply a lock, fall back to our embedded
@@ -210,37 +209,32 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
210EXPORT_SYMBOL(blk_queue_bounce_limit); 209EXPORT_SYMBOL(blk_queue_bounce_limit);
211 210
212/** 211/**
213 * blk_queue_max_sectors - set max sectors for a request for this queue 212 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
214 * @q: the request queue for the device 213 * @q: the request queue for the device
215 * @max_sectors: max sectors in the usual 512b unit 214 * @max_hw_sectors: max hardware sectors in the usual 512b unit
216 * 215 *
217 * Description: 216 * Description:
218 * Enables a low level driver to set an upper limit on the size of 217 * Enables a low level driver to set a hard upper limit,
219 * received requests. 218 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
219 * the device driver based upon the combined capabilities of I/O
220 * controller and storage device.
221 *
222 * max_sectors is a soft limit imposed by the block layer for
223 * filesystem type requests. This value can be overridden on a
224 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
225 * The soft limit can not exceed max_hw_sectors.
220 **/ 226 **/
221void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) 227void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
222{ 228{
223 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 229 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
224 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 230 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
225 printk(KERN_INFO "%s: set to minimum %d\n", 231 printk(KERN_INFO "%s: set to minimum %d\n",
226 __func__, max_sectors); 232 __func__, max_hw_sectors);
227 } 233 }
228 234
229 if (BLK_DEF_MAX_SECTORS > max_sectors) 235 q->limits.max_hw_sectors = max_hw_sectors;
230 q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; 236 q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
231 else { 237 BLK_DEF_MAX_SECTORS);
232 q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
233 q->limits.max_hw_sectors = max_sectors;
234 }
235}
236EXPORT_SYMBOL(blk_queue_max_sectors);
237
238void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
239{
240 if (BLK_DEF_MAX_SECTORS > max_sectors)
241 q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
242 else
243 q->limits.max_hw_sectors = max_sectors;
244} 238}
245EXPORT_SYMBOL(blk_queue_max_hw_sectors); 239EXPORT_SYMBOL(blk_queue_max_hw_sectors);
246 240
@@ -257,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
257EXPORT_SYMBOL(blk_queue_max_discard_sectors); 251EXPORT_SYMBOL(blk_queue_max_discard_sectors);
258 252
259/** 253/**
260 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 254 * blk_queue_max_segments - set max hw segments for a request for this queue
261 * @q: the request queue for the device 255 * @q: the request queue for the device
262 * @max_segments: max number of segments 256 * @max_segments: max number of segments
263 * 257 *
264 * Description: 258 * Description:
265 * Enables a low level driver to set an upper limit on the number of 259 * Enables a low level driver to set an upper limit on the number of
266 * physical data segments in a request. This would be the largest sized 260 * hw data segments in a request.
267 * scatter list the driver could handle.
268 **/ 261 **/
269void blk_queue_max_phys_segments(struct request_queue *q, 262void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
270 unsigned short max_segments)
271{ 263{
272 if (!max_segments) { 264 if (!max_segments) {
273 max_segments = 1; 265 max_segments = 1;
@@ -275,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q,
275 __func__, max_segments); 267 __func__, max_segments);
276 } 268 }
277 269
278 q->limits.max_phys_segments = max_segments; 270 q->limits.max_segments = max_segments;
279} 271}
280EXPORT_SYMBOL(blk_queue_max_phys_segments); 272EXPORT_SYMBOL(blk_queue_max_segments);
281
282/**
283 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
284 * @q: the request queue for the device
285 * @max_segments: max number of segments
286 *
287 * Description:
288 * Enables a low level driver to set an upper limit on the number of
289 * hw data segments in a request. This would be the largest number of
290 * address/length pairs the host adapter can actually give at once
291 * to the device.
292 **/
293void blk_queue_max_hw_segments(struct request_queue *q,
294 unsigned short max_segments)
295{
296 if (!max_segments) {
297 max_segments = 1;
298 printk(KERN_INFO "%s: set to minimum %d\n",
299 __func__, max_segments);
300 }
301
302 q->limits.max_hw_segments = max_segments;
303}
304EXPORT_SYMBOL(blk_queue_max_hw_segments);
305 273
306/** 274/**
307 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 275 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
@@ -536,11 +504,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
536 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 504 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
537 b->seg_boundary_mask); 505 b->seg_boundary_mask);
538 506
539 t->max_phys_segments = min_not_zero(t->max_phys_segments, 507 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
540 b->max_phys_segments);
541
542 t->max_hw_segments = min_not_zero(t->max_hw_segments,
543 b->max_hw_segments);
544 508
545 t->max_segment_size = min_not_zero(t->max_segment_size, 509 t->max_segment_size = min_not_zero(t->max_segment_size,
546 b->max_segment_size); 510 b->max_segment_size);
@@ -744,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad);
744 * does is adjust the queue so that the buf is always appended 708 * does is adjust the queue so that the buf is always appended
745 * silently to the scatterlist. 709 * silently to the scatterlist.
746 * 710 *
747 * Note: This routine adjusts max_hw_segments to make room for 711 * Note: This routine adjusts max_hw_segments to make room for appending
748 * appending the drain buffer. If you call 712 * the drain buffer. If you call blk_queue_max_segments() after calling
749 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after 713 * this routine, you must set the limit to one fewer than your device
750 * calling this routine, you must set the limit to one fewer than your 714 * can support otherwise there won't be room for the drain buffer.
751 * device can support otherwise there won't be room for the drain
752 * buffer.
753 */ 715 */
754int blk_queue_dma_drain(struct request_queue *q, 716int blk_queue_dma_drain(struct request_queue *q,
755 dma_drain_needed_fn *dma_drain_needed, 717 dma_drain_needed_fn *dma_drain_needed,
756 void *buf, unsigned int size) 718 void *buf, unsigned int size)
757{ 719{
758 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) 720 if (queue_max_segments(q) < 2)
759 return -EINVAL; 721 return -EINVAL;
760 /* make room for appending the drain */ 722 /* make room for appending the drain */
761 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); 723 blk_queue_max_segments(q, queue_max_segments(q) - 1);
762 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
763 q->dma_drain_needed = dma_drain_needed; 724 q->dma_drain_needed = dma_drain_needed;
764 q->dma_drain_buffer = buf; 725 q->dma_drain_buffer = buf;
765 q->dma_drain_size = size; 726 q->dma_drain_size = size;