diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-12-20 12:19:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-12-20 12:19:46 -0500 |
commit | 7f8635cc9e66a26d7280ba680b044fa2f65104af (patch) | |
tree | 0d8506e86d07e15c473aca1a09af7ad6ff7d8b49 /block | |
parent | 3cb50ddf97a0a1ca4c68bc12fa1e727a6b45fbf2 (diff) | |
parent | 0fc13c8995cd96f4123de400c71c223d80400ed9 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
cciss: fix cciss_revalidate panic
block: max hardware sectors limit wrapper
block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead
blk-throttle: Correct the placement of smp_rmb()
blk-throttle: Trim/adjust slice_end once a bio has been dispatched
block: check for proper length of iov entries earlier in blk_rq_map_user_iov()
drbd: fix for spin_lock_irqsave in endio callback
drbd: don't recvmsg with zero length
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-map.c | 5 | ||||
-rw-r--r-- | block/blk-merge.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 51 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk-throttle.c | 39 |
5 files changed, 54 insertions, 49 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 5d5dbe47c228..e663ac2d8e68 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
201 | for (i = 0; i < iov_count; i++) { | 201 | for (i = 0; i < iov_count; i++) { |
202 | unsigned long uaddr = (unsigned long)iov[i].iov_base; | 202 | unsigned long uaddr = (unsigned long)iov[i].iov_base; |
203 | 203 | ||
204 | if (!iov[i].iov_len) | ||
205 | return -EINVAL; | ||
206 | |||
204 | if (uaddr & queue_dma_alignment(q)) { | 207 | if (uaddr & queue_dma_alignment(q)) { |
205 | unaligned = 1; | 208 | unaligned = 1; |
206 | break; | 209 | break; |
207 | } | 210 | } |
208 | if (!iov[i].iov_len) | ||
209 | return -EINVAL; | ||
210 | } | 211 | } |
211 | 212 | ||
212 | if (unaligned || (q->dma_pad_mask & len) || map_data) | 213 | if (unaligned || (q->dma_pad_mask & len) || map_data) |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 77b7c26df6b5..74bc4a768f32 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
21 | return 0; | 21 | return 0; |
22 | 22 | ||
23 | fbio = bio; | 23 | fbio = bio; |
24 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 24 | cluster = blk_queue_cluster(q); |
25 | seg_size = 0; | 25 | seg_size = 0; |
26 | nr_phys_segs = 0; | 26 | nr_phys_segs = 0; |
27 | for_each_bio(bio) { | 27 | for_each_bio(bio) { |
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments); | |||
87 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | 87 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
88 | struct bio *nxt) | 88 | struct bio *nxt) |
89 | { | 89 | { |
90 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) | 90 | if (!blk_queue_cluster(q)) |
91 | return 0; | 91 | return 0; |
92 | 92 | ||
93 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > | 93 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
@@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
123 | int nsegs, cluster; | 123 | int nsegs, cluster; |
124 | 124 | ||
125 | nsegs = 0; | 125 | nsegs = 0; |
126 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 126 | cluster = blk_queue_cluster(q); |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * for each bio in rq | 129 | * for each bio in rq |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 701859fb9647..36c8c1f2af18 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
126 | lim->alignment_offset = 0; | 126 | lim->alignment_offset = 0; |
127 | lim->io_opt = 0; | 127 | lim->io_opt = 0; |
128 | lim->misaligned = 0; | 128 | lim->misaligned = 0; |
129 | lim->no_cluster = 0; | 129 | lim->cluster = 1; |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(blk_set_default_limits); | 131 | EXPORT_SYMBOL(blk_set_default_limits); |
132 | 132 | ||
@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
229 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 229 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
230 | 230 | ||
231 | /** | 231 | /** |
232 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | 232 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request |
233 | * @q: the request queue for the device | 233 | * @limits: the queue limits |
234 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 234 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
235 | * | 235 | * |
236 | * Description: | 236 | * Description: |
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); | |||
244 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | 244 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
245 | * The soft limit can not exceed max_hw_sectors. | 245 | * The soft limit can not exceed max_hw_sectors. |
246 | **/ | 246 | **/ |
247 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | 247 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) |
248 | { | 248 | { |
249 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { | 249 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
250 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 250 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
@@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto | |||
252 | __func__, max_hw_sectors); | 252 | __func__, max_hw_sectors); |
253 | } | 253 | } |
254 | 254 | ||
255 | q->limits.max_hw_sectors = max_hw_sectors; | 255 | limits->max_hw_sectors = max_hw_sectors; |
256 | q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, | 256 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, |
257 | BLK_DEF_MAX_SECTORS); | 257 | BLK_DEF_MAX_SECTORS); |
258 | } | ||
259 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); | ||
260 | |||
261 | /** | ||
262 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | ||
263 | * @q: the request queue for the device | ||
264 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | ||
265 | * | ||
266 | * Description: | ||
267 | * See description for blk_limits_max_hw_sectors(). | ||
268 | **/ | ||
269 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | ||
270 | { | ||
271 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); | ||
258 | } | 272 | } |
259 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 273 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
260 | 274 | ||
@@ -464,15 +478,6 @@ EXPORT_SYMBOL(blk_queue_io_opt); | |||
464 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 478 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
465 | { | 479 | { |
466 | blk_stack_limits(&t->limits, &b->limits, 0); | 480 | blk_stack_limits(&t->limits, &b->limits, 0); |
467 | |||
468 | if (!t->queue_lock) | ||
469 | WARN_ON_ONCE(1); | ||
470 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
471 | unsigned long flags; | ||
472 | spin_lock_irqsave(t->queue_lock, flags); | ||
473 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | ||
474 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
475 | } | ||
476 | } | 481 | } |
477 | EXPORT_SYMBOL(blk_queue_stack_limits); | 482 | EXPORT_SYMBOL(blk_queue_stack_limits); |
478 | 483 | ||
@@ -545,7 +550,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
545 | t->io_min = max(t->io_min, b->io_min); | 550 | t->io_min = max(t->io_min, b->io_min); |
546 | t->io_opt = lcm(t->io_opt, b->io_opt); | 551 | t->io_opt = lcm(t->io_opt, b->io_opt); |
547 | 552 | ||
548 | t->no_cluster |= b->no_cluster; | 553 | t->cluster &= b->cluster; |
549 | t->discard_zeroes_data &= b->discard_zeroes_data; | 554 | t->discard_zeroes_data &= b->discard_zeroes_data; |
550 | 555 | ||
551 | /* Physical block size a multiple of the logical block size? */ | 556 | /* Physical block size a multiple of the logical block size? */ |
@@ -641,7 +646,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
641 | sector_t offset) | 646 | sector_t offset) |
642 | { | 647 | { |
643 | struct request_queue *t = disk->queue; | 648 | struct request_queue *t = disk->queue; |
644 | struct request_queue *b = bdev_get_queue(bdev); | ||
645 | 649 | ||
646 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { | 650 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
647 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | 651 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
@@ -652,17 +656,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
652 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", | 656 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
653 | top, bottom); | 657 | top, bottom); |
654 | } | 658 | } |
655 | |||
656 | if (!t->queue_lock) | ||
657 | WARN_ON_ONCE(1); | ||
658 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
659 | unsigned long flags; | ||
660 | |||
661 | spin_lock_irqsave(t->queue_lock, flags); | ||
662 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
663 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | ||
664 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
665 | } | ||
666 | } | 659 | } |
667 | EXPORT_SYMBOL(disk_stack_limits); | 660 | EXPORT_SYMBOL(disk_stack_limits); |
668 | 661 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 013457f47fdc..41fb69150b4d 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char * | |||
119 | 119 | ||
120 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) | 120 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
121 | { | 121 | { |
122 | if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) | 122 | if (blk_queue_cluster(q)) |
123 | return queue_var_show(queue_max_segment_size(q), (page)); | 123 | return queue_var_show(queue_max_segment_size(q), (page)); |
124 | 124 | ||
125 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | 125 | return queue_var_show(PAGE_CACHE_SIZE, (page)); |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 004be80fd894..381b09bb562b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -355,6 +355,12 @@ throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
355 | tg->slice_end[rw], jiffies); | 355 | tg->slice_end[rw], jiffies); |
356 | } | 356 | } |
357 | 357 | ||
358 | static inline void throtl_set_slice_end(struct throtl_data *td, | ||
359 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) | ||
360 | { | ||
361 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); | ||
362 | } | ||
363 | |||
358 | static inline void throtl_extend_slice(struct throtl_data *td, | 364 | static inline void throtl_extend_slice(struct throtl_data *td, |
359 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) | 365 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) |
360 | { | 366 | { |
@@ -391,6 +397,16 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
391 | if (throtl_slice_used(td, tg, rw)) | 397 | if (throtl_slice_used(td, tg, rw)) |
392 | return; | 398 | return; |
393 | 399 | ||
400 | /* | ||
401 | * A bio has been dispatched. Also adjust slice_end. It might happen | ||
402 | * that initially cgroup limit was very low resulting in high | ||
403 | * slice_end, but later limit was bumped up and bio was dispached | ||
404 | * sooner, then we need to reduce slice_end. A high bogus slice_end | ||
405 | * is bad because it does not allow new slice to start. | ||
406 | */ | ||
407 | |||
408 | throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); | ||
409 | |||
394 | time_elapsed = jiffies - tg->slice_start[rw]; | 410 | time_elapsed = jiffies - tg->slice_start[rw]; |
395 | 411 | ||
396 | nr_slices = time_elapsed / throtl_slice; | 412 | nr_slices = time_elapsed / throtl_slice; |
@@ -709,26 +725,21 @@ static void throtl_process_limit_change(struct throtl_data *td) | |||
709 | struct throtl_grp *tg; | 725 | struct throtl_grp *tg; |
710 | struct hlist_node *pos, *n; | 726 | struct hlist_node *pos, *n; |
711 | 727 | ||
712 | /* | ||
713 | * Make sure atomic_inc() effects from | ||
714 | * throtl_update_blkio_group_read_bps(), group of functions are | ||
715 | * visible. | ||
716 | * Is this required or smp_mb__after_atomic_inc() was suffcient | ||
717 | * after the atomic_inc(). | ||
718 | */ | ||
719 | smp_rmb(); | ||
720 | if (!atomic_read(&td->limits_changed)) | 728 | if (!atomic_read(&td->limits_changed)) |
721 | return; | 729 | return; |
722 | 730 | ||
723 | throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); | 731 | throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); |
724 | 732 | ||
725 | hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { | 733 | /* |
726 | /* | 734 | * Make sure updates from throtl_update_blkio_group_read_bps() group |
727 | * Do I need an smp_rmb() here to make sure tg->limits_changed | 735 | * of functions to tg->limits_changed are visible. We do not |
728 | * update is visible. I am relying on smp_rmb() at the | 736 | * want update td->limits_changed to be visible but update to |
729 | * beginning of function and not putting a new one here. | 737 | * tg->limits_changed not being visible yet on this cpu. Hence |
730 | */ | 738 | * the read barrier. |
739 | */ | ||
740 | smp_rmb(); | ||
731 | 741 | ||
742 | hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { | ||
732 | if (throtl_tg_on_rr(tg) && tg->limits_changed) { | 743 | if (throtl_tg_on_rr(tg) && tg->limits_changed) { |
733 | throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" | 744 | throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" |
734 | " riops=%u wiops=%u", tg->bps[READ], | 745 | " riops=%u wiops=%u", tg->bps[READ], |