aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ceba0bd03662..126d3973b3d1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1374,7 +1374,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
1374 return 0; 1374 return 0;
1375} 1375}
1376 1376
1377asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) 1377SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
1378{ 1378{
1379 ssize_t ret; 1379 ssize_t ret;
1380 struct file *file; 1380 struct file *file;
@@ -1393,6 +1393,13 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1393 } 1393 }
1394 return ret; 1394 return ret;
1395} 1395}
1396#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1397asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
1398{
1399 return SYSC_readahead((int) fd, offset, (size_t) count);
1400}
1401SYSCALL_ALIAS(sys_readahead, SyS_readahead);
1402#endif
1396 1403
1397#ifdef CONFIG_MMU 1404#ifdef CONFIG_MMU
1398/** 1405/**
@@ -1816,7 +1823,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1816 int copy = min(bytes, iov->iov_len - base); 1823 int copy = min(bytes, iov->iov_len - base);
1817 1824
1818 base = 0; 1825 base = 0;
1819 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); 1826 left = __copy_from_user_inatomic(vaddr, buf, copy);
1820 copied += copy; 1827 copied += copy;
1821 bytes -= copy; 1828 bytes -= copy;
1822 vaddr += copy; 1829 vaddr += copy;
@@ -1844,8 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
1844 if (likely(i->nr_segs == 1)) { 1851 if (likely(i->nr_segs == 1)) {
1845 int left; 1852 int left;
1846 char __user *buf = i->iov->iov_base + i->iov_offset; 1853 char __user *buf = i->iov->iov_base + i->iov_offset;
1847 left = __copy_from_user_inatomic_nocache(kaddr + offset, 1854 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
1848 buf, bytes);
1849 copied = bytes - left; 1855 copied = bytes - left;
1850 } else { 1856 } else {
1851 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1857 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1873,7 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
1873 if (likely(i->nr_segs == 1)) { 1879 if (likely(i->nr_segs == 1)) {
1874 int left; 1880 int left;
1875 char __user *buf = i->iov->iov_base + i->iov_offset; 1881 char __user *buf = i->iov->iov_base + i->iov_offset;
1876 left = __copy_from_user_nocache(kaddr + offset, buf, bytes); 1882 left = __copy_from_user(kaddr + offset, buf, bytes);
1877 copied = bytes - left; 1883 copied = bytes - left;
1878 } else { 1884 } else {
1879 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1885 copied = __iovec_copy_from_user_inatomic(kaddr + offset,