diff options
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 107 |
1 files changed, 55 insertions, 52 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index a234f4bf1d6f..fa1eb0449a05 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -111,6 +111,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | |||
111 | void blk_set_default_limits(struct queue_limits *lim) | 111 | void blk_set_default_limits(struct queue_limits *lim) |
112 | { | 112 | { |
113 | lim->max_segments = BLK_MAX_SEGMENTS; | 113 | lim->max_segments = BLK_MAX_SEGMENTS; |
114 | lim->max_integrity_segments = 0; | ||
114 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 115 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
115 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 116 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
116 | lim->max_sectors = BLK_DEF_MAX_SECTORS; | 117 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
@@ -119,13 +120,13 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
119 | lim->discard_granularity = 0; | 120 | lim->discard_granularity = 0; |
120 | lim->discard_alignment = 0; | 121 | lim->discard_alignment = 0; |
121 | lim->discard_misaligned = 0; | 122 | lim->discard_misaligned = 0; |
122 | lim->discard_zeroes_data = -1; | 123 | lim->discard_zeroes_data = 1; |
123 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | 124 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
124 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); | 125 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
125 | lim->alignment_offset = 0; | 126 | lim->alignment_offset = 0; |
126 | lim->io_opt = 0; | 127 | lim->io_opt = 0; |
127 | lim->misaligned = 0; | 128 | lim->misaligned = 0; |
128 | lim->no_cluster = 0; | 129 | lim->cluster = 1; |
129 | } | 130 | } |
130 | EXPORT_SYMBOL(blk_set_default_limits); | 131 | EXPORT_SYMBOL(blk_set_default_limits); |
131 | 132 | ||
@@ -163,23 +164,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
163 | blk_queue_congestion_threshold(q); | 164 | blk_queue_congestion_threshold(q); |
164 | q->nr_batching = BLK_BATCH_REQ; | 165 | q->nr_batching = BLK_BATCH_REQ; |
165 | 166 | ||
166 | q->unplug_thresh = 4; /* hmm */ | ||
167 | q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ | ||
168 | if (q->unplug_delay == 0) | ||
169 | q->unplug_delay = 1; | ||
170 | |||
171 | q->unplug_timer.function = blk_unplug_timeout; | ||
172 | q->unplug_timer.data = (unsigned long)q; | ||
173 | |||
174 | blk_set_default_limits(&q->limits); | 167 | blk_set_default_limits(&q->limits); |
175 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); | 168 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
176 | 169 | q->limits.discard_zeroes_data = 0; | |
177 | /* | ||
178 | * If the caller didn't supply a lock, fall back to our embedded | ||
179 | * per-queue locks | ||
180 | */ | ||
181 | if (!q->queue_lock) | ||
182 | q->queue_lock = &q->__queue_lock; | ||
183 | 170 | ||
184 | /* | 171 | /* |
185 | * by default assume old behaviour and bounce for any highmem page | 172 | * by default assume old behaviour and bounce for any highmem page |
@@ -213,7 +200,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
213 | */ | 200 | */ |
214 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | 201 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
215 | dma = 1; | 202 | dma = 1; |
216 | q->limits.bounce_pfn = max_low_pfn; | 203 | q->limits.bounce_pfn = max(max_low_pfn, b_pfn); |
217 | #else | 204 | #else |
218 | if (b_pfn < blk_max_low_pfn) | 205 | if (b_pfn < blk_max_low_pfn) |
219 | dma = 1; | 206 | dma = 1; |
@@ -228,8 +215,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
228 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 215 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
229 | 216 | ||
230 | /** | 217 | /** |
231 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | 218 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request |
232 | * @q: the request queue for the device | 219 | * @limits: the queue limits |
233 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 220 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
234 | * | 221 | * |
235 | * Description: | 222 | * Description: |
@@ -243,7 +230,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); | |||
243 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | 230 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
244 | * The soft limit can not exceed max_hw_sectors. | 231 | * The soft limit can not exceed max_hw_sectors. |
245 | **/ | 232 | **/ |
246 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | 233 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) |
247 | { | 234 | { |
248 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { | 235 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
249 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 236 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
@@ -251,9 +238,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto | |||
251 | __func__, max_hw_sectors); | 238 | __func__, max_hw_sectors); |
252 | } | 239 | } |
253 | 240 | ||
254 | q->limits.max_hw_sectors = max_hw_sectors; | 241 | limits->max_hw_sectors = max_hw_sectors; |
255 | q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, | 242 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, |
256 | BLK_DEF_MAX_SECTORS); | 243 | BLK_DEF_MAX_SECTORS); |
244 | } | ||
245 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); | ||
246 | |||
247 | /** | ||
248 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | ||
249 | * @q: the request queue for the device | ||
250 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | ||
251 | * | ||
252 | * Description: | ||
253 | * See description for blk_limits_max_hw_sectors(). | ||
254 | **/ | ||
255 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | ||
256 | { | ||
257 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); | ||
257 | } | 258 | } |
258 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 259 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
259 | 260 | ||
@@ -343,7 +344,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_size); | |||
343 | * hardware can operate on without reverting to read-modify-write | 344 | * hardware can operate on without reverting to read-modify-write |
344 | * operations. | 345 | * operations. |
345 | */ | 346 | */ |
346 | void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) | 347 | void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
347 | { | 348 | { |
348 | q->limits.physical_block_size = size; | 349 | q->limits.physical_block_size = size; |
349 | 350 | ||
@@ -455,11 +456,6 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) | |||
455 | } | 456 | } |
456 | EXPORT_SYMBOL(blk_queue_io_opt); | 457 | EXPORT_SYMBOL(blk_queue_io_opt); |
457 | 458 | ||
458 | /* | ||
459 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
460 | */ | ||
461 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
462 | |||
463 | /** | 459 | /** |
464 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | 460 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers |
465 | * @t: the stacking driver (top) | 461 | * @t: the stacking driver (top) |
@@ -468,15 +464,6 @@ EXPORT_SYMBOL(blk_queue_io_opt); | |||
468 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 464 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
469 | { | 465 | { |
470 | blk_stack_limits(&t->limits, &b->limits, 0); | 466 | blk_stack_limits(&t->limits, &b->limits, 0); |
471 | |||
472 | if (!t->queue_lock) | ||
473 | WARN_ON_ONCE(1); | ||
474 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
475 | unsigned long flags; | ||
476 | spin_lock_irqsave(t->queue_lock, flags); | ||
477 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | ||
478 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
479 | } | ||
480 | } | 467 | } |
481 | EXPORT_SYMBOL(blk_queue_stack_limits); | 468 | EXPORT_SYMBOL(blk_queue_stack_limits); |
482 | 469 | ||
@@ -514,6 +501,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
514 | b->seg_boundary_mask); | 501 | b->seg_boundary_mask); |
515 | 502 | ||
516 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); | 503 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
504 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, | ||
505 | b->max_integrity_segments); | ||
517 | 506 | ||
518 | t->max_segment_size = min_not_zero(t->max_segment_size, | 507 | t->max_segment_size = min_not_zero(t->max_segment_size, |
519 | b->max_segment_size); | 508 | b->max_segment_size); |
@@ -547,7 +536,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
547 | t->io_min = max(t->io_min, b->io_min); | 536 | t->io_min = max(t->io_min, b->io_min); |
548 | t->io_opt = lcm(t->io_opt, b->io_opt); | 537 | t->io_opt = lcm(t->io_opt, b->io_opt); |
549 | 538 | ||
550 | t->no_cluster |= b->no_cluster; | 539 | t->cluster &= b->cluster; |
551 | t->discard_zeroes_data &= b->discard_zeroes_data; | 540 | t->discard_zeroes_data &= b->discard_zeroes_data; |
552 | 541 | ||
553 | /* Physical block size a multiple of the logical block size? */ | 542 | /* Physical block size a multiple of the logical block size? */ |
@@ -643,7 +632,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
643 | sector_t offset) | 632 | sector_t offset) |
644 | { | 633 | { |
645 | struct request_queue *t = disk->queue; | 634 | struct request_queue *t = disk->queue; |
646 | struct request_queue *b = bdev_get_queue(bdev); | ||
647 | 635 | ||
648 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { | 636 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
649 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | 637 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
@@ -654,17 +642,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
654 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", | 642 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
655 | top, bottom); | 643 | top, bottom); |
656 | } | 644 | } |
657 | |||
658 | if (!t->queue_lock) | ||
659 | WARN_ON_ONCE(1); | ||
660 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
661 | unsigned long flags; | ||
662 | |||
663 | spin_lock_irqsave(t->queue_lock, flags); | ||
664 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
665 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | ||
666 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
667 | } | ||
668 | } | 645 | } |
669 | EXPORT_SYMBOL(disk_stack_limits); | 646 | EXPORT_SYMBOL(disk_stack_limits); |
670 | 647 | ||
@@ -794,6 +771,32 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | |||
794 | } | 771 | } |
795 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | 772 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
796 | 773 | ||
774 | /** | ||
775 | * blk_queue_flush - configure queue's cache flush capability | ||
776 | * @q: the request queue for the device | ||
777 | * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA | ||
778 | * | ||
779 | * Tell block layer cache flush capability of @q. If it supports | ||
780 | * flushing, REQ_FLUSH should be set. If it supports bypassing | ||
781 | * write cache for individual writes, REQ_FUA should be set. | ||
782 | */ | ||
783 | void blk_queue_flush(struct request_queue *q, unsigned int flush) | ||
784 | { | ||
785 | WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); | ||
786 | |||
787 | if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) | ||
788 | flush &= ~REQ_FUA; | ||
789 | |||
790 | q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); | ||
791 | } | ||
792 | EXPORT_SYMBOL_GPL(blk_queue_flush); | ||
793 | |||
794 | void blk_queue_flush_queueable(struct request_queue *q, bool queueable) | ||
795 | { | ||
796 | q->flush_not_queueable = !queueable; | ||
797 | } | ||
798 | EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); | ||
799 | |||
797 | static int __init blk_settings_init(void) | 800 | static int __init blk_settings_init(void) |
798 | { | 801 | { |
799 | blk_max_low_pfn = max_low_pfn - 1; | 802 | blk_max_low_pfn = max_low_pfn - 1; |