aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:51 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:55 -0400
commit025146e13b63483add912706c101fb0fb6f015cc (patch)
tree42d2d42e2222f0a2d6373a0ddf0cbf733f75dcba
parentae03bf639a5027d27270123f5f6e3ee6a412781d (diff)
block: Move queue limits to an embedded struct
To accommodate stacking drivers that do not have an associated request queue we're moving the limits to a separate, embedded structure. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-settings.c55
-rw-r--r--include/linux/blkdev.h44
2 files changed, 60 insertions, 39 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 0b32f984eed2..b0f547cecfb8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -179,16 +179,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
179 */ 179 */
180 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 180 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
181 dma = 1; 181 dma = 1;
182 q->bounce_pfn = max_low_pfn; 182 q->limits.bounce_pfn = max_low_pfn;
183#else 183#else
184 if (b_pfn < blk_max_low_pfn) 184 if (b_pfn < blk_max_low_pfn)
185 dma = 1; 185 dma = 1;
186 q->bounce_pfn = b_pfn; 186 q->limits.bounce_pfn = b_pfn;
187#endif 187#endif
188 if (dma) { 188 if (dma) {
189 init_emergency_isa_pool(); 189 init_emergency_isa_pool();
190 q->bounce_gfp = GFP_NOIO | GFP_DMA; 190 q->bounce_gfp = GFP_NOIO | GFP_DMA;
191 q->bounce_pfn = b_pfn; 191 q->limits.bounce_pfn = b_pfn;
192 } 192 }
193} 193}
194EXPORT_SYMBOL(blk_queue_bounce_limit); 194EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -211,10 +211,10 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
211 } 211 }
212 212
213 if (BLK_DEF_MAX_SECTORS > max_sectors) 213 if (BLK_DEF_MAX_SECTORS > max_sectors)
214 q->max_hw_sectors = q->max_sectors = max_sectors; 214 q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
215 else { 215 else {
216 q->max_sectors = BLK_DEF_MAX_SECTORS; 216 q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
217 q->max_hw_sectors = max_sectors; 217 q->limits.max_hw_sectors = max_sectors;
218 } 218 }
219} 219}
220EXPORT_SYMBOL(blk_queue_max_sectors); 220EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -222,9 +222,9 @@ EXPORT_SYMBOL(blk_queue_max_sectors);
222void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) 222void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
223{ 223{
224 if (BLK_DEF_MAX_SECTORS > max_sectors) 224 if (BLK_DEF_MAX_SECTORS > max_sectors)
225 q->max_hw_sectors = BLK_DEF_MAX_SECTORS; 225 q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
226 else 226 else
227 q->max_hw_sectors = max_sectors; 227 q->limits.max_hw_sectors = max_sectors;
228} 228}
229EXPORT_SYMBOL(blk_queue_max_hw_sectors); 229EXPORT_SYMBOL(blk_queue_max_hw_sectors);
230 230
@@ -247,7 +247,7 @@ void blk_queue_max_phys_segments(struct request_queue *q,
247 __func__, max_segments); 247 __func__, max_segments);
248 } 248 }
249 249
250 q->max_phys_segments = max_segments; 250 q->limits.max_phys_segments = max_segments;
251} 251}
252EXPORT_SYMBOL(blk_queue_max_phys_segments); 252EXPORT_SYMBOL(blk_queue_max_phys_segments);
253 253
@@ -271,7 +271,7 @@ void blk_queue_max_hw_segments(struct request_queue *q,
271 __func__, max_segments); 271 __func__, max_segments);
272 } 272 }
273 273
274 q->max_hw_segments = max_segments; 274 q->limits.max_hw_segments = max_segments;
275} 275}
276EXPORT_SYMBOL(blk_queue_max_hw_segments); 276EXPORT_SYMBOL(blk_queue_max_hw_segments);
277 277
@@ -292,7 +292,7 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
292 __func__, max_size); 292 __func__, max_size);
293 } 293 }
294 294
295 q->max_segment_size = max_size; 295 q->limits.max_segment_size = max_size;
296} 296}
297EXPORT_SYMBOL(blk_queue_max_segment_size); 297EXPORT_SYMBOL(blk_queue_max_segment_size);
298 298
@@ -308,7 +308,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
308 **/ 308 **/
309void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) 309void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
310{ 310{
311 q->logical_block_size = size; 311 q->limits.logical_block_size = size;
312} 312}
313EXPORT_SYMBOL(blk_queue_logical_block_size); 313EXPORT_SYMBOL(blk_queue_logical_block_size);
314 314
@@ -325,14 +325,27 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
325void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 325void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
326{ 326{
327 /* zero is "infinity" */ 327 /* zero is "infinity" */
328 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 328 t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
329 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 329 queue_max_sectors(b));
330 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); 330
331 331 t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
332 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); 332 queue_max_hw_sectors(b));
333 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); 333
334 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); 334 t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
335 t->logical_block_size = max(t->logical_block_size, b->logical_block_size); 335 queue_segment_boundary(b));
336
337 t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
338 queue_max_phys_segments(b));
339
340 t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
341 queue_max_hw_segments(b));
342
343 t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
344 queue_max_segment_size(b));
345
346 t->limits.logical_block_size = max(queue_logical_block_size(t),
347 queue_logical_block_size(b));
348
336 if (!t->queue_lock) 349 if (!t->queue_lock)
337 WARN_ON_ONCE(1); 350 WARN_ON_ONCE(1);
338 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 351 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
@@ -430,7 +443,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
430 __func__, mask); 443 __func__, mask);
431 } 444 }
432 445
433 q->seg_boundary_mask = mask; 446 q->limits.seg_boundary_mask = mask;
434} 447}
435EXPORT_SYMBOL(blk_queue_segment_boundary); 448EXPORT_SYMBOL(blk_queue_segment_boundary);
436 449
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 29b48f7b4ba8..b7bb6fdba12c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -307,6 +307,21 @@ struct blk_cmd_filter {
307 struct kobject kobj; 307 struct kobject kobj;
308}; 308};
309 309
310struct queue_limits {
311 unsigned long bounce_pfn;
312 unsigned long seg_boundary_mask;
313
314 unsigned int max_hw_sectors;
315 unsigned int max_sectors;
316 unsigned int max_segment_size;
317
318 unsigned short logical_block_size;
319 unsigned short max_hw_segments;
320 unsigned short max_phys_segments;
321
322 unsigned char no_cluster;
323};
324
310struct request_queue 325struct request_queue
311{ 326{
312 /* 327 /*
@@ -358,7 +373,6 @@ struct request_queue
358 /* 373 /*
359 * queue needs bounce pages for pages above this limit 374 * queue needs bounce pages for pages above this limit
360 */ 375 */
361 unsigned long bounce_pfn;
362 gfp_t bounce_gfp; 376 gfp_t bounce_gfp;
363 377
364 /* 378 /*
@@ -387,14 +401,6 @@ struct request_queue
387 unsigned int nr_congestion_off; 401 unsigned int nr_congestion_off;
388 unsigned int nr_batching; 402 unsigned int nr_batching;
389 403
390 unsigned int max_sectors;
391 unsigned int max_hw_sectors;
392 unsigned short max_phys_segments;
393 unsigned short max_hw_segments;
394 unsigned short logical_block_size;
395 unsigned int max_segment_size;
396
397 unsigned long seg_boundary_mask;
398 void *dma_drain_buffer; 404 void *dma_drain_buffer;
399 unsigned int dma_drain_size; 405 unsigned int dma_drain_size;
400 unsigned int dma_pad_mask; 406 unsigned int dma_pad_mask;
@@ -410,6 +416,8 @@ struct request_queue
410 struct timer_list timeout; 416 struct timer_list timeout;
411 struct list_head timeout_list; 417 struct list_head timeout_list;
412 418
419 struct queue_limits limits;
420
413 /* 421 /*
414 * sg stuff 422 * sg stuff
415 */ 423 */
@@ -991,45 +999,45 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
991 999
992static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1000static inline unsigned long queue_bounce_pfn(struct request_queue *q)
993{ 1001{
994 return q->bounce_pfn; 1002 return q->limits.bounce_pfn;
995} 1003}
996 1004
997static inline unsigned long queue_segment_boundary(struct request_queue *q) 1005static inline unsigned long queue_segment_boundary(struct request_queue *q)
998{ 1006{
999 return q->seg_boundary_mask; 1007 return q->limits.seg_boundary_mask;
1000} 1008}
1001 1009
1002static inline unsigned int queue_max_sectors(struct request_queue *q) 1010static inline unsigned int queue_max_sectors(struct request_queue *q)
1003{ 1011{
1004 return q->max_sectors; 1012 return q->limits.max_sectors;
1005} 1013}
1006 1014
1007static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1015static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1008{ 1016{
1009 return q->max_hw_sectors; 1017 return q->limits.max_hw_sectors;
1010} 1018}
1011 1019
1012static inline unsigned short queue_max_hw_segments(struct request_queue *q) 1020static inline unsigned short queue_max_hw_segments(struct request_queue *q)
1013{ 1021{
1014 return q->max_hw_segments; 1022 return q->limits.max_hw_segments;
1015} 1023}
1016 1024
1017static inline unsigned short queue_max_phys_segments(struct request_queue *q) 1025static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1018{ 1026{
1019 return q->max_phys_segments; 1027 return q->limits.max_phys_segments;
1020} 1028}
1021 1029
1022static inline unsigned int queue_max_segment_size(struct request_queue *q) 1030static inline unsigned int queue_max_segment_size(struct request_queue *q)
1023{ 1031{
1024 return q->max_segment_size; 1032 return q->limits.max_segment_size;
1025} 1033}
1026 1034
1027static inline unsigned short queue_logical_block_size(struct request_queue *q) 1035static inline unsigned short queue_logical_block_size(struct request_queue *q)
1028{ 1036{
1029 int retval = 512; 1037 int retval = 512;
1030 1038
1031 if (q && q->logical_block_size) 1039 if (q && q->limits.logical_block_size)
1032 retval = q->logical_block_size; 1040 retval = q->limits.logical_block_size;
1033 1041
1034 return retval; 1042 return retval;
1035} 1043}