aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-12-22 12:57:02 -0500
committerJiri Kosina <jkosina@suse.cz>2010-12-22 12:57:02 -0500
commit4b7bd364700d9ac8372eff48832062b936d0793b (patch)
tree0dbf78c95456a0b02d07fcd473281f04a87e266d /block
parentc0d8768af260e2cbb4bf659ae6094a262c86b085 (diff)
parent90a8a73c06cc32b609a880d48449d7083327e11a (diff)
Merge branch 'master' into for-next
Conflicts: MAINTAINERS arch/arm/mach-omap2/pm24xx.c drivers/scsi/bfa/bfa_fcpim.c Needed to update to apply fixes for which the old branch was too outdated.
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c11
-rw-r--r--block/blk-ioc.c14
-rw-r--r--block/blk-map.c3
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c51
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-throttle.c41
-rw-r--r--block/bsg.c8
-rw-r--r--block/compat_ioctl.c5
-rw-r--r--block/elevator.c4
-rw-r--r--block/ioctl.c8
-rw-r--r--block/scsi_ioctl.c34
12 files changed, 97 insertions, 90 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f0834e2f5727..4ce953f1b390 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1194 int where = ELEVATOR_INSERT_SORT; 1194 int where = ELEVATOR_INSERT_SORT;
1195 int rw_flags; 1195 int rw_flags;
1196 1196
1197 /* REQ_HARDBARRIER is no more */
1198 if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
1199 "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
1200 bio_endio(bio, -EOPNOTSUPP);
1201 return 0;
1202 }
1203
1204 /* 1197 /*
1205 * low level driver can indicate that it wants pages above a 1198 * low level driver can indicate that it wants pages above a
1206 * certain limit bounced to low memory (ie for highmem, or even 1199 * certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
1351 bdevname(bio->bi_bdev, b), 1344 bdevname(bio->bi_bdev, b),
1352 bio->bi_rw, 1345 bio->bi_rw,
1353 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1346 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1354 (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); 1347 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1355 1348
1356 set_bit(BIO_EOF, &bio->bi_flags); 1349 set_bit(BIO_EOF, &bio->bi_flags);
1357} 1350}
@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1404 return 0; 1397 return 0;
1405 1398
1406 /* Test device or partition size, when known. */ 1399 /* Test device or partition size, when known. */
1407 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 1400 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1408 if (maxsector) { 1401 if (maxsector) {
1409 sector_t sector = bio->bi_sector; 1402 sector_t sector = bio->bi_sector;
1410 1403
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c55c406..3c7a339fe381 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
153} 153}
154EXPORT_SYMBOL(get_io_context); 154EXPORT_SYMBOL(get_io_context);
155 155
156void copy_io_context(struct io_context **pdst, struct io_context **psrc)
157{
158 struct io_context *src = *psrc;
159 struct io_context *dst = *pdst;
160
161 if (src) {
162 BUG_ON(atomic_long_read(&src->refcount) == 0);
163 atomic_long_inc(&src->refcount);
164 put_io_context(dst);
165 *pdst = src;
166 }
167}
168EXPORT_SYMBOL(copy_io_context);
169
170static int __init blk_ioc_init(void) 156static int __init blk_ioc_init(void)
171{ 157{
172 iocontext_cachep = kmem_cache_create("blkdev_ioc", 158 iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d8691e..e663ac2d8e68 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -201,6 +201,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
201 for (i = 0; i < iov_count; i++) { 201 for (i = 0; i < iov_count; i++) {
202 unsigned long uaddr = (unsigned long)iov[i].iov_base; 202 unsigned long uaddr = (unsigned long)iov[i].iov_base;
203 203
204 if (!iov[i].iov_len)
205 return -EINVAL;
206
204 if (uaddr & queue_dma_alignment(q)) { 207 if (uaddr & queue_dma_alignment(q)) {
205 unaligned = 1; 208 unaligned = 1;
206 break; 209 break;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 77b7c26df6b5..74bc4a768f32 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
21 return 0; 21 return 0;
22 22
23 fbio = bio; 23 fbio = bio;
24 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 24 cluster = blk_queue_cluster(q);
25 seg_size = 0; 25 seg_size = 0;
26 nr_phys_segs = 0; 26 nr_phys_segs = 0;
27 for_each_bio(bio) { 27 for_each_bio(bio) {
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 struct bio *nxt) 88 struct bio *nxt)
89{ 89{
90 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 90 if (!blk_queue_cluster(q))
91 return 0; 91 return 0;
92 92
93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
@@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
123 int nsegs, cluster; 123 int nsegs, cluster;
124 124
125 nsegs = 0; 125 nsegs = 0;
126 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 126 cluster = blk_queue_cluster(q);
127 127
128 /* 128 /*
129 * for each bio in rq 129 * for each bio in rq
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 701859fb9647..36c8c1f2af18 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim)
126 lim->alignment_offset = 0; 126 lim->alignment_offset = 0;
127 lim->io_opt = 0; 127 lim->io_opt = 0;
128 lim->misaligned = 0; 128 lim->misaligned = 0;
129 lim->no_cluster = 0; 129 lim->cluster = 1;
130} 130}
131EXPORT_SYMBOL(blk_set_default_limits); 131EXPORT_SYMBOL(blk_set_default_limits);
132 132
@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
229EXPORT_SYMBOL(blk_queue_bounce_limit); 229EXPORT_SYMBOL(blk_queue_bounce_limit);
230 230
231/** 231/**
232 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 232 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
233 * @q: the request queue for the device 233 * @limits: the queue limits
234 * @max_hw_sectors: max hardware sectors in the usual 512b unit 234 * @max_hw_sectors: max hardware sectors in the usual 512b unit
235 * 235 *
236 * Description: 236 * Description:
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
244 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 244 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
245 * The soft limit can not exceed max_hw_sectors. 245 * The soft limit can not exceed max_hw_sectors.
246 **/ 246 **/
247void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 247void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
248{ 248{
249 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 249 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
250 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 250 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
252 __func__, max_hw_sectors); 252 __func__, max_hw_sectors);
253 } 253 }
254 254
255 q->limits.max_hw_sectors = max_hw_sectors; 255 limits->max_hw_sectors = max_hw_sectors;
256 q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, 256 limits->max_sectors = min_t(unsigned int, max_hw_sectors,
257 BLK_DEF_MAX_SECTORS); 257 BLK_DEF_MAX_SECTORS);
258}
259EXPORT_SYMBOL(blk_limits_max_hw_sectors);
260
261/**
262 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
263 * @q: the request queue for the device
264 * @max_hw_sectors: max hardware sectors in the usual 512b unit
265 *
266 * Description:
267 * See description for blk_limits_max_hw_sectors().
268 **/
269void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
270{
271 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
258} 272}
259EXPORT_SYMBOL(blk_queue_max_hw_sectors); 273EXPORT_SYMBOL(blk_queue_max_hw_sectors);
260 274
@@ -464,15 +478,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
464void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 478void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
465{ 479{
466 blk_stack_limits(&t->limits, &b->limits, 0); 480 blk_stack_limits(&t->limits, &b->limits, 0);
467
468 if (!t->queue_lock)
469 WARN_ON_ONCE(1);
470 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
471 unsigned long flags;
472 spin_lock_irqsave(t->queue_lock, flags);
473 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
474 spin_unlock_irqrestore(t->queue_lock, flags);
475 }
476} 481}
477EXPORT_SYMBOL(blk_queue_stack_limits); 482EXPORT_SYMBOL(blk_queue_stack_limits);
478 483
@@ -545,7 +550,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
545 t->io_min = max(t->io_min, b->io_min); 550 t->io_min = max(t->io_min, b->io_min);
546 t->io_opt = lcm(t->io_opt, b->io_opt); 551 t->io_opt = lcm(t->io_opt, b->io_opt);
547 552
548 t->no_cluster |= b->no_cluster; 553 t->cluster &= b->cluster;
549 t->discard_zeroes_data &= b->discard_zeroes_data; 554 t->discard_zeroes_data &= b->discard_zeroes_data;
550 555
551 /* Physical block size a multiple of the logical block size? */ 556 /* Physical block size a multiple of the logical block size? */
@@ -641,7 +646,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
641 sector_t offset) 646 sector_t offset)
642{ 647{
643 struct request_queue *t = disk->queue; 648 struct request_queue *t = disk->queue;
644 struct request_queue *b = bdev_get_queue(bdev);
645 649
646 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { 650 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
647 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 651 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
@@ -652,17 +656,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
652 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 656 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
653 top, bottom); 657 top, bottom);
654 } 658 }
655
656 if (!t->queue_lock)
657 WARN_ON_ONCE(1);
658 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
659 unsigned long flags;
660
661 spin_lock_irqsave(t->queue_lock, flags);
662 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
663 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
664 spin_unlock_irqrestore(t->queue_lock, flags);
665 }
666} 659}
667EXPORT_SYMBOL(disk_stack_limits); 660EXPORT_SYMBOL(disk_stack_limits);
668 661
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 013457f47fdc..41fb69150b4d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
119 119
120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
121{ 121{
122 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 122 if (blk_queue_cluster(q))
123 return queue_var_show(queue_max_segment_size(q), (page)); 123 return queue_var_show(queue_max_segment_size(q), (page));
124 124
125 return queue_var_show(PAGE_CACHE_SIZE, (page)); 125 return queue_var_show(PAGE_CACHE_SIZE, (page));
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 56ad4531b412..381b09bb562b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -355,6 +355,12 @@ throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
355 tg->slice_end[rw], jiffies); 355 tg->slice_end[rw], jiffies);
356} 356}
357 357
358static inline void throtl_set_slice_end(struct throtl_data *td,
359 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360{
361 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
362}
363
358static inline void throtl_extend_slice(struct throtl_data *td, 364static inline void throtl_extend_slice(struct throtl_data *td,
359 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 365 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360{ 366{
@@ -391,6 +397,16 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
391 if (throtl_slice_used(td, tg, rw)) 397 if (throtl_slice_used(td, tg, rw))
392 return; 398 return;
393 399
400 /*
401 * A bio has been dispatched. Also adjust slice_end. It might happen
402 * that initially cgroup limit was very low resulting in high
403 * slice_end, but later limit was bumped up and bio was dispached
404 * sooner, then we need to reduce slice_end. A high bogus slice_end
405 * is bad because it does not allow new slice to start.
406 */
407
408 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
409
394 time_elapsed = jiffies - tg->slice_start[rw]; 410 time_elapsed = jiffies - tg->slice_start[rw];
395 411
396 nr_slices = time_elapsed / throtl_slice; 412 nr_slices = time_elapsed / throtl_slice;
@@ -645,7 +661,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
645{ 661{
646 unsigned int nr_reads = 0, nr_writes = 0; 662 unsigned int nr_reads = 0, nr_writes = 0;
647 unsigned int max_nr_reads = throtl_grp_quantum*3/4; 663 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
648 unsigned int max_nr_writes = throtl_grp_quantum - nr_reads; 664 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
649 struct bio *bio; 665 struct bio *bio;
650 666
651 /* Try to dispatch 75% READS and 25% WRITES */ 667 /* Try to dispatch 75% READS and 25% WRITES */
@@ -709,26 +725,21 @@ static void throtl_process_limit_change(struct throtl_data *td)
709 struct throtl_grp *tg; 725 struct throtl_grp *tg;
710 struct hlist_node *pos, *n; 726 struct hlist_node *pos, *n;
711 727
712 /*
713 * Make sure atomic_inc() effects from
714 * throtl_update_blkio_group_read_bps(), group of functions are
715 * visible.
716 * Is this required or smp_mb__after_atomic_inc() was suffcient
717 * after the atomic_inc().
718 */
719 smp_rmb();
720 if (!atomic_read(&td->limits_changed)) 728 if (!atomic_read(&td->limits_changed))
721 return; 729 return;
722 730
723 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); 731 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
724 732
725 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 733 /*
726 /* 734 * Make sure updates from throtl_update_blkio_group_read_bps() group
727 * Do I need an smp_rmb() here to make sure tg->limits_changed 735 * of functions to tg->limits_changed are visible. We do not
728 * update is visible. I am relying on smp_rmb() at the 736 * want update td->limits_changed to be visible but update to
729 * beginning of function and not putting a new one here. 737 * tg->limits_changed not being visible yet on this cpu. Hence
730 */ 738 * the read barrier.
739 */
740 smp_rmb();
731 741
742 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
732 if (throtl_tg_on_rr(tg) && tg->limits_changed) { 743 if (throtl_tg_on_rr(tg) && tg->limits_changed) {
733 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" 744 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
734 " riops=%u wiops=%u", tg->bps[READ], 745 " riops=%u wiops=%u", tg->bps[READ],
diff --git a/block/bsg.c b/block/bsg.c
index f20d6a789d48..0c8b64a16484 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -250,6 +250,14 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
250 int ret, rw; 250 int ret, rw;
251 unsigned int dxfer_len; 251 unsigned int dxfer_len;
252 void *dxferp = NULL; 252 void *dxferp = NULL;
253 struct bsg_class_device *bcd = &q->bsg_dev;
254
255 /* if the LLD has been removed then the bsg_unregister_queue will
256 * eventually be called and the class_dev was freed, so we can no
257 * longer use this request_queue. Return no such address.
258 */
259 if (!bcd->class_dev)
260 return ERR_PTR(-ENXIO);
253 261
254 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, 262 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
255 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, 263 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b74dc0..cc3eb78e333a 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -8,7 +8,6 @@
8#include <linux/hdreg.h> 8#include <linux/hdreg.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/syscalls.h> 10#include <linux/syscalls.h>
11#include <linux/smp_lock.h>
12#include <linux/types.h> 11#include <linux/types.h>
13#include <linux/uaccess.h> 12#include <linux/uaccess.h>
14 13
@@ -744,13 +743,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
744 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 743 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
745 return 0; 744 return 0;
746 case BLKGETSIZE: 745 case BLKGETSIZE:
747 size = bdev->bd_inode->i_size; 746 size = i_size_read(bdev->bd_inode);
748 if ((size >> 9) > ~0UL) 747 if ((size >> 9) > ~0UL)
749 return -EFBIG; 748 return -EFBIG;
750 return compat_put_ulong(arg, size >> 9); 749 return compat_put_ulong(arg, size >> 9);
751 750
752 case BLKGETSIZE64_32: 751 case BLKGETSIZE64_32:
753 return compat_put_u64(arg, bdev->bd_inode->i_size); 752 return compat_put_u64(arg, i_size_read(bdev->bd_inode));
754 753
755 case BLKTRACESETUP32: 754 case BLKTRACESETUP32:
756 case BLKTRACESTART: /* compatible */ 755 case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e8308f7e2..2569512830d3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
429 q->nr_sorted--; 429 q->nr_sorted--;
430 430
431 boundary = q->end_sector; 431 boundary = q->end_sector;
432 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 432 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
433 list_for_each_prev(entry, &q->queue_head) { 433 list_for_each_prev(entry, &q->queue_head) {
434 struct request *pos = list_entry_rq(entry); 434 struct request *pos = list_entry_rq(entry);
435 435
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
691void __elv_add_request(struct request_queue *q, struct request *rq, int where, 691void __elv_add_request(struct request_queue *q, struct request *rq, int where,
692 int plug) 692 int plug)
693{ 693{
694 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 694 if (rq->cmd_flags & REQ_SOFTBARRIER) {
695 /* barriers are scheduling boundary, update end_sector */ 695 /* barriers are scheduling boundary, update end_sector */
696 if (rq->cmd_type == REQ_TYPE_FS || 696 if (rq->cmd_type == REQ_TYPE_FS ||
697 (rq->cmd_flags & REQ_DISCARD)) { 697 (rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb1d465..a9a302eba01e 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -5,7 +5,6 @@
5#include <linux/hdreg.h> 5#include <linux/hdreg.h>
6#include <linux/backing-dev.h> 6#include <linux/backing-dev.h>
7#include <linux/buffer_head.h> 7#include <linux/buffer_head.h>
8#include <linux/smp_lock.h>
9#include <linux/blktrace_api.h> 8#include <linux/blktrace_api.h>
10#include <asm/uaccess.h> 9#include <asm/uaccess.h>
11 10
@@ -125,7 +124,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
125 start >>= 9; 124 start >>= 9;
126 len >>= 9; 125 len >>= 9;
127 126
128 if (start + len > (bdev->bd_inode->i_size >> 9)) 127 if (start + len > (i_size_read(bdev->bd_inode) >> 9))
129 return -EINVAL; 128 return -EINVAL;
130 if (secure) 129 if (secure)
131 flags |= BLKDEV_DISCARD_SECURE; 130 flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +241,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
242 * We need to set the startsect first, the driver may 241 * We need to set the startsect first, the driver may
243 * want to override it. 242 * want to override it.
244 */ 243 */
244 memset(&geo, 0, sizeof(geo));
245 geo.start = get_start_sect(bdev); 245 geo.start = get_start_sect(bdev);
246 ret = disk->fops->getgeo(bdev, &geo); 246 ret = disk->fops->getgeo(bdev, &geo);
247 if (ret) 247 if (ret)
@@ -307,12 +307,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
307 ret = blkdev_reread_part(bdev); 307 ret = blkdev_reread_part(bdev);
308 break; 308 break;
309 case BLKGETSIZE: 309 case BLKGETSIZE:
310 size = bdev->bd_inode->i_size; 310 size = i_size_read(bdev->bd_inode);
311 if ((size >> 9) > ~0UL) 311 if ((size >> 9) > ~0UL)
312 return -EFBIG; 312 return -EFBIG;
313 return put_ulong(arg, size >> 9); 313 return put_ulong(arg, size >> 9);
314 case BLKGETSIZE64: 314 case BLKGETSIZE64:
315 return put_u64(arg, bdev->bd_inode->i_size); 315 return put_u64(arg, i_size_read(bdev->bd_inode));
316 case BLKTRACESTART: 316 case BLKTRACESTART:
317 case BLKTRACESTOP: 317 case BLKTRACESTOP:
318 case BLKTRACESETUP: 318 case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10eb5b0..4f4230b79bb6 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
321 if (hdr->iovec_count) { 321 if (hdr->iovec_count) {
322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count; 322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
323 size_t iov_data_len; 323 size_t iov_data_len;
324 struct sg_iovec *iov; 324 struct sg_iovec *sg_iov;
325 struct iovec *iov;
326 int i;
325 327
326 iov = kmalloc(size, GFP_KERNEL); 328 sg_iov = kmalloc(size, GFP_KERNEL);
327 if (!iov) { 329 if (!sg_iov) {
328 ret = -ENOMEM; 330 ret = -ENOMEM;
329 goto out; 331 goto out;
330 } 332 }
331 333
332 if (copy_from_user(iov, hdr->dxferp, size)) { 334 if (copy_from_user(sg_iov, hdr->dxferp, size)) {
333 kfree(iov); 335 kfree(sg_iov);
334 ret = -EFAULT; 336 ret = -EFAULT;
335 goto out; 337 goto out;
336 } 338 }
337 339
340 /*
341 * Sum up the vecs, making sure they don't overflow
342 */
343 iov = (struct iovec *) sg_iov;
344 iov_data_len = 0;
345 for (i = 0; i < hdr->iovec_count; i++) {
346 if (iov_data_len + iov[i].iov_len < iov_data_len) {
347 kfree(sg_iov);
348 ret = -EINVAL;
349 goto out;
350 }
351 iov_data_len += iov[i].iov_len;
352 }
353
338 /* SG_IO howto says that the shorter of the two wins */ 354 /* SG_IO howto says that the shorter of the two wins */
339 iov_data_len = iov_length((struct iovec *)iov,
340 hdr->iovec_count);
341 if (hdr->dxfer_len < iov_data_len) { 355 if (hdr->dxfer_len < iov_data_len) {
342 hdr->iovec_count = iov_shorten((struct iovec *)iov, 356 hdr->iovec_count = iov_shorten(iov,
343 hdr->iovec_count, 357 hdr->iovec_count,
344 hdr->dxfer_len); 358 hdr->dxfer_len);
345 iov_data_len = hdr->dxfer_len; 359 iov_data_len = hdr->dxfer_len;
346 } 360 }
347 361
348 ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, 362 ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
349 iov_data_len, GFP_KERNEL); 363 iov_data_len, GFP_KERNEL);
350 kfree(iov); 364 kfree(sg_iov);
351 } else if (hdr->dxfer_len) 365 } else if (hdr->dxfer_len)
352 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, 366 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
353 GFP_KERNEL); 367 GFP_KERNEL);