aboutsummaryrefslogtreecommitdiffstats
path: root/fs/read_write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/read_write.c')
-rw-r--r--fs/read_write.c95
1 files changed, 59 insertions, 36 deletions
diff --git a/fs/read_write.c b/fs/read_write.c
index 74e36586e4d3..431a0ed610c8 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -31,6 +31,20 @@ const struct file_operations generic_ro_fops = {
31 31
32EXPORT_SYMBOL(generic_ro_fops); 32EXPORT_SYMBOL(generic_ro_fops);
33 33
34static int
35__negative_fpos_check(struct file *file, loff_t pos, size_t count)
36{
37 /*
38 * pos or pos+count is negative here, check overflow.
39 * too big "count" will be caught in rw_verify_area().
40 */
41 if ((pos < 0) && (pos + count < pos))
42 return -EOVERFLOW;
43 if (file->f_mode & FMODE_UNSIGNED_OFFSET)
44 return 0;
45 return -EINVAL;
46}
47
34/** 48/**
35 * generic_file_llseek_unlocked - lockless generic llseek implementation 49 * generic_file_llseek_unlocked - lockless generic llseek implementation
36 * @file: file structure to seek on 50 * @file: file structure to seek on
@@ -62,7 +76,9 @@ generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin)
62 break; 76 break;
63 } 77 }
64 78
65 if (offset < 0 || offset > inode->i_sb->s_maxbytes) 79 if (offset < 0 && __negative_fpos_check(file, offset, 0))
80 return -EINVAL;
81 if (offset > inode->i_sb->s_maxbytes)
66 return -EINVAL; 82 return -EINVAL;
67 83
68 /* Special lock needed here? */ 84 /* Special lock needed here? */
@@ -124,7 +140,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
124{ 140{
125 loff_t retval; 141 loff_t retval;
126 142
127 lock_kernel(); 143 mutex_lock(&file->f_dentry->d_inode->i_mutex);
128 switch (origin) { 144 switch (origin) {
129 case SEEK_END: 145 case SEEK_END:
130 offset += i_size_read(file->f_path.dentry->d_inode); 146 offset += i_size_read(file->f_path.dentry->d_inode);
@@ -137,7 +153,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
137 offset += file->f_pos; 153 offset += file->f_pos;
138 } 154 }
139 retval = -EINVAL; 155 retval = -EINVAL;
140 if (offset >= 0) { 156 if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) {
141 if (offset != file->f_pos) { 157 if (offset != file->f_pos) {
142 file->f_pos = offset; 158 file->f_pos = offset;
143 file->f_version = 0; 159 file->f_version = 0;
@@ -145,7 +161,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
145 retval = offset; 161 retval = offset;
146 } 162 }
147out: 163out:
148 unlock_kernel(); 164 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
149 return retval; 165 return retval;
150} 166}
151EXPORT_SYMBOL(default_llseek); 167EXPORT_SYMBOL(default_llseek);
@@ -156,7 +172,6 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
156 172
157 fn = no_llseek; 173 fn = no_llseek;
158 if (file->f_mode & FMODE_LSEEK) { 174 if (file->f_mode & FMODE_LSEEK) {
159 fn = default_llseek;
160 if (file->f_op && file->f_op->llseek) 175 if (file->f_op && file->f_op->llseek)
161 fn = file->f_op->llseek; 176 fn = file->f_op->llseek;
162 } 177 }
@@ -222,13 +237,12 @@ bad:
222} 237}
223#endif 238#endif
224 239
240
225/* 241/*
226 * rw_verify_area doesn't like huge counts. We limit 242 * rw_verify_area doesn't like huge counts. We limit
227 * them to something that fits in "int" so that others 243 * them to something that fits in "int" so that others
228 * won't have to do range checks all the time. 244 * won't have to do range checks all the time.
229 */ 245 */
230#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
231
232int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count) 246int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count)
233{ 247{
234 struct inode *inode; 248 struct inode *inode;
@@ -239,8 +253,11 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
239 if (unlikely((ssize_t) count < 0)) 253 if (unlikely((ssize_t) count < 0))
240 return retval; 254 return retval;
241 pos = *ppos; 255 pos = *ppos;
242 if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) 256 if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) {
243 return retval; 257 retval = __negative_fpos_check(file, pos, count);
258 if (retval)
259 return retval;
260 }
244 261
245 if (unlikely(inode->i_flock && mandatory_lock(inode))) { 262 if (unlikely(inode->i_flock && mandatory_lock(inode))) {
246 retval = locks_mandatory_area( 263 retval = locks_mandatory_area(
@@ -565,65 +582,71 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
565 unsigned long nr_segs, unsigned long fast_segs, 582 unsigned long nr_segs, unsigned long fast_segs,
566 struct iovec *fast_pointer, 583 struct iovec *fast_pointer,
567 struct iovec **ret_pointer) 584 struct iovec **ret_pointer)
568 { 585{
569 unsigned long seg; 586 unsigned long seg;
570 ssize_t ret; 587 ssize_t ret;
571 struct iovec *iov = fast_pointer; 588 struct iovec *iov = fast_pointer;
572 589
573 /* 590 /*
574 * SuS says "The readv() function *may* fail if the iovcnt argument 591 * SuS says "The readv() function *may* fail if the iovcnt argument
575 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has 592 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
576 * traditionally returned zero for zero segments, so... 593 * traditionally returned zero for zero segments, so...
577 */ 594 */
578 if (nr_segs == 0) { 595 if (nr_segs == 0) {
579 ret = 0; 596 ret = 0;
580 goto out; 597 goto out;
581 } 598 }
582 599
583 /* 600 /*
584 * First get the "struct iovec" from user memory and 601 * First get the "struct iovec" from user memory and
585 * verify all the pointers 602 * verify all the pointers
586 */ 603 */
587 if (nr_segs > UIO_MAXIOV) { 604 if (nr_segs > UIO_MAXIOV) {
588 ret = -EINVAL; 605 ret = -EINVAL;
589 goto out; 606 goto out;
590 } 607 }
591 if (nr_segs > fast_segs) { 608 if (nr_segs > fast_segs) {
592 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); 609 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
593 if (iov == NULL) { 610 if (iov == NULL) {
594 ret = -ENOMEM; 611 ret = -ENOMEM;
595 goto out; 612 goto out;
596 } 613 }
597 } 614 }
598 if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) { 615 if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
599 ret = -EFAULT; 616 ret = -EFAULT;
600 goto out; 617 goto out;
601 } 618 }
602 619
603 /* 620 /*
604 * According to the Single Unix Specification we should return EINVAL 621 * According to the Single Unix Specification we should return EINVAL
605 * if an element length is < 0 when cast to ssize_t or if the 622 * if an element length is < 0 when cast to ssize_t or if the
606 * total length would overflow the ssize_t return value of the 623 * total length would overflow the ssize_t return value of the
607 * system call. 624 * system call.
608 */ 625 *
626 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
627 * overflow case.
628 */
609 ret = 0; 629 ret = 0;
610 for (seg = 0; seg < nr_segs; seg++) { 630 for (seg = 0; seg < nr_segs; seg++) {
611 void __user *buf = iov[seg].iov_base; 631 void __user *buf = iov[seg].iov_base;
612 ssize_t len = (ssize_t)iov[seg].iov_len; 632 ssize_t len = (ssize_t)iov[seg].iov_len;
613 633
614 /* see if we we're about to use an invalid len or if 634 /* see if we we're about to use an invalid len or if
615 * it's about to overflow ssize_t */ 635 * it's about to overflow ssize_t */
616 if (len < 0 || (ret + len < ret)) { 636 if (len < 0) {
617 ret = -EINVAL; 637 ret = -EINVAL;
618 goto out; 638 goto out;
619 } 639 }
620 if (unlikely(!access_ok(vrfy_dir(type), buf, len))) { 640 if (unlikely(!access_ok(vrfy_dir(type), buf, len))) {
621 ret = -EFAULT; 641 ret = -EFAULT;
622 goto out; 642 goto out;
643 }
644 if (len > MAX_RW_COUNT - ret) {
645 len = MAX_RW_COUNT - ret;
646 iov[seg].iov_len = len;
623 } 647 }
624
625 ret += len; 648 ret += len;
626 } 649 }
627out: 650out:
628 *ret_pointer = iov; 651 *ret_pointer = iov;
629 return ret; 652 return ret;