diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 7 | ||||
-rw-r--r-- | mm/shmem.c | 43 | ||||
-rw-r--r-- | mm/vmalloc.c | 13 |
3 files changed, 36 insertions, 27 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 23acefe51808..126d3973b3d1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1823,7 +1823,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr, | |||
1823 | int copy = min(bytes, iov->iov_len - base); | 1823 | int copy = min(bytes, iov->iov_len - base); |
1824 | 1824 | ||
1825 | base = 0; | 1825 | base = 0; |
1826 | left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); | 1826 | left = __copy_from_user_inatomic(vaddr, buf, copy); |
1827 | copied += copy; | 1827 | copied += copy; |
1828 | bytes -= copy; | 1828 | bytes -= copy; |
1829 | vaddr += copy; | 1829 | vaddr += copy; |
@@ -1851,8 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, | |||
1851 | if (likely(i->nr_segs == 1)) { | 1851 | if (likely(i->nr_segs == 1)) { |
1852 | int left; | 1852 | int left; |
1853 | char __user *buf = i->iov->iov_base + i->iov_offset; | 1853 | char __user *buf = i->iov->iov_base + i->iov_offset; |
1854 | left = __copy_from_user_inatomic_nocache(kaddr + offset, | 1854 | left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); |
1855 | buf, bytes); | ||
1856 | copied = bytes - left; | 1855 | copied = bytes - left; |
1857 | } else { | 1856 | } else { |
1858 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, | 1857 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, |
@@ -1880,7 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page, | |||
1880 | if (likely(i->nr_segs == 1)) { | 1879 | if (likely(i->nr_segs == 1)) { |
1881 | int left; | 1880 | int left; |
1882 | char __user *buf = i->iov->iov_base + i->iov_offset; | 1881 | char __user *buf = i->iov->iov_base + i->iov_offset; |
1883 | left = __copy_from_user_nocache(kaddr + offset, buf, bytes); | 1882 | left = __copy_from_user(kaddr + offset, buf, bytes); |
1884 | copied = bytes - left; | 1883 | copied = bytes - left; |
1885 | } else { | 1884 | } else { |
1886 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, | 1885 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, |
diff --git a/mm/shmem.c b/mm/shmem.c index 19d566ccdeea..4103a239ce84 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -169,13 +169,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) | |||
169 | */ | 169 | */ |
170 | static inline int shmem_acct_size(unsigned long flags, loff_t size) | 170 | static inline int shmem_acct_size(unsigned long flags, loff_t size) |
171 | { | 171 | { |
172 | return (flags & VM_ACCOUNT) ? | 172 | return (flags & VM_NORESERVE) ? |
173 | security_vm_enough_memory_kern(VM_ACCT(size)) : 0; | 173 | 0 : security_vm_enough_memory_kern(VM_ACCT(size)); |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) | 176 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) |
177 | { | 177 | { |
178 | if (flags & VM_ACCOUNT) | 178 | if (!(flags & VM_NORESERVE)) |
179 | vm_unacct_memory(VM_ACCT(size)); | 179 | vm_unacct_memory(VM_ACCT(size)); |
180 | } | 180 | } |
181 | 181 | ||
@@ -187,13 +187,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size) | |||
187 | */ | 187 | */ |
188 | static inline int shmem_acct_block(unsigned long flags) | 188 | static inline int shmem_acct_block(unsigned long flags) |
189 | { | 189 | { |
190 | return (flags & VM_ACCOUNT) ? | 190 | return (flags & VM_NORESERVE) ? |
191 | 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)); | 191 | security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; |
192 | } | 192 | } |
193 | 193 | ||
194 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) | 194 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) |
195 | { | 195 | { |
196 | if (!(flags & VM_ACCOUNT)) | 196 | if (flags & VM_NORESERVE) |
197 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | 197 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); |
198 | } | 198 | } |
199 | 199 | ||
@@ -1515,8 +1515,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) | |||
1515 | return 0; | 1515 | return 0; |
1516 | } | 1516 | } |
1517 | 1517 | ||
1518 | static struct inode * | 1518 | static struct inode *shmem_get_inode(struct super_block *sb, int mode, |
1519 | shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | 1519 | dev_t dev, unsigned long flags) |
1520 | { | 1520 | { |
1521 | struct inode *inode; | 1521 | struct inode *inode; |
1522 | struct shmem_inode_info *info; | 1522 | struct shmem_inode_info *info; |
@@ -1537,6 +1537,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | |||
1537 | info = SHMEM_I(inode); | 1537 | info = SHMEM_I(inode); |
1538 | memset(info, 0, (char *)inode - (char *)info); | 1538 | memset(info, 0, (char *)inode - (char *)info); |
1539 | spin_lock_init(&info->lock); | 1539 | spin_lock_init(&info->lock); |
1540 | info->flags = flags & VM_NORESERVE; | ||
1540 | INIT_LIST_HEAD(&info->swaplist); | 1541 | INIT_LIST_HEAD(&info->swaplist); |
1541 | 1542 | ||
1542 | switch (mode & S_IFMT) { | 1543 | switch (mode & S_IFMT) { |
@@ -1779,9 +1780,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
1779 | static int | 1780 | static int |
1780 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | 1781 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) |
1781 | { | 1782 | { |
1782 | struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); | 1783 | struct inode *inode; |
1783 | int error = -ENOSPC; | 1784 | int error = -ENOSPC; |
1784 | 1785 | ||
1786 | inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE); | ||
1785 | if (inode) { | 1787 | if (inode) { |
1786 | error = security_inode_init_security(inode, dir, NULL, NULL, | 1788 | error = security_inode_init_security(inode, dir, NULL, NULL, |
1787 | NULL); | 1789 | NULL); |
@@ -1920,7 +1922,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s | |||
1920 | if (len > PAGE_CACHE_SIZE) | 1922 | if (len > PAGE_CACHE_SIZE) |
1921 | return -ENAMETOOLONG; | 1923 | return -ENAMETOOLONG; |
1922 | 1924 | ||
1923 | inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); | 1925 | inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); |
1924 | if (!inode) | 1926 | if (!inode) |
1925 | return -ENOSPC; | 1927 | return -ENOSPC; |
1926 | 1928 | ||
@@ -2332,7 +2334,7 @@ static int shmem_fill_super(struct super_block *sb, | |||
2332 | sb->s_flags |= MS_POSIXACL; | 2334 | sb->s_flags |= MS_POSIXACL; |
2333 | #endif | 2335 | #endif |
2334 | 2336 | ||
2335 | inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); | 2337 | inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); |
2336 | if (!inode) | 2338 | if (!inode) |
2337 | goto failed; | 2339 | goto failed; |
2338 | inode->i_uid = sbinfo->uid; | 2340 | inode->i_uid = sbinfo->uid; |
@@ -2574,12 +2576,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
2574 | return 0; | 2576 | return 0; |
2575 | } | 2577 | } |
2576 | 2578 | ||
2577 | #define shmem_file_operations ramfs_file_operations | 2579 | #define shmem_vm_ops generic_file_vm_ops |
2578 | #define shmem_vm_ops generic_file_vm_ops | 2580 | #define shmem_file_operations ramfs_file_operations |
2579 | #define shmem_get_inode ramfs_get_inode | 2581 | #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) |
2580 | #define shmem_acct_size(a, b) 0 | 2582 | #define shmem_acct_size(flags, size) 0 |
2581 | #define shmem_unacct_size(a, b) do {} while (0) | 2583 | #define shmem_unacct_size(flags, size) do {} while (0) |
2582 | #define SHMEM_MAX_BYTES LLONG_MAX | 2584 | #define SHMEM_MAX_BYTES LLONG_MAX |
2583 | 2585 | ||
2584 | #endif /* CONFIG_SHMEM */ | 2586 | #endif /* CONFIG_SHMEM */ |
2585 | 2587 | ||
@@ -2589,7 +2591,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
2589 | * shmem_file_setup - get an unlinked file living in tmpfs | 2591 | * shmem_file_setup - get an unlinked file living in tmpfs |
2590 | * @name: name for dentry (to be seen in /proc/<pid>/maps | 2592 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
2591 | * @size: size to be set for the file | 2593 | * @size: size to be set for the file |
2592 | * @flags: vm_flags | 2594 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size |
2593 | */ | 2595 | */ |
2594 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | 2596 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) |
2595 | { | 2597 | { |
@@ -2623,13 +2625,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | |||
2623 | goto put_dentry; | 2625 | goto put_dentry; |
2624 | 2626 | ||
2625 | error = -ENOSPC; | 2627 | error = -ENOSPC; |
2626 | inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); | 2628 | inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags); |
2627 | if (!inode) | 2629 | if (!inode) |
2628 | goto close_file; | 2630 | goto close_file; |
2629 | 2631 | ||
2630 | #ifdef CONFIG_SHMEM | ||
2631 | SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT; | ||
2632 | #endif | ||
2633 | d_instantiate(dentry, inode); | 2632 | d_instantiate(dentry, inode); |
2634 | inode->i_size = size; | 2633 | inode->i_size = size; |
2635 | inode->i_nlink = 0; /* It is unlinked */ | 2634 | inode->i_nlink = 0; /* It is unlinked */ |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index fb6f59935fb2..af58324c361a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -333,6 +333,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
333 | unsigned long addr; | 333 | unsigned long addr; |
334 | int purged = 0; | 334 | int purged = 0; |
335 | 335 | ||
336 | BUG_ON(!size); | ||
336 | BUG_ON(size & ~PAGE_MASK); | 337 | BUG_ON(size & ~PAGE_MASK); |
337 | 338 | ||
338 | va = kmalloc_node(sizeof(struct vmap_area), | 339 | va = kmalloc_node(sizeof(struct vmap_area), |
@@ -344,6 +345,9 @@ retry: | |||
344 | addr = ALIGN(vstart, align); | 345 | addr = ALIGN(vstart, align); |
345 | 346 | ||
346 | spin_lock(&vmap_area_lock); | 347 | spin_lock(&vmap_area_lock); |
348 | if (addr + size - 1 < addr) | ||
349 | goto overflow; | ||
350 | |||
347 | /* XXX: could have a last_hole cache */ | 351 | /* XXX: could have a last_hole cache */ |
348 | n = vmap_area_root.rb_node; | 352 | n = vmap_area_root.rb_node; |
349 | if (n) { | 353 | if (n) { |
@@ -375,6 +379,8 @@ retry: | |||
375 | 379 | ||
376 | while (addr + size > first->va_start && addr + size <= vend) { | 380 | while (addr + size > first->va_start && addr + size <= vend) { |
377 | addr = ALIGN(first->va_end + PAGE_SIZE, align); | 381 | addr = ALIGN(first->va_end + PAGE_SIZE, align); |
382 | if (addr + size - 1 < addr) | ||
383 | goto overflow; | ||
378 | 384 | ||
379 | n = rb_next(&first->rb_node); | 385 | n = rb_next(&first->rb_node); |
380 | if (n) | 386 | if (n) |
@@ -385,6 +391,7 @@ retry: | |||
385 | } | 391 | } |
386 | found: | 392 | found: |
387 | if (addr + size > vend) { | 393 | if (addr + size > vend) { |
394 | overflow: | ||
388 | spin_unlock(&vmap_area_lock); | 395 | spin_unlock(&vmap_area_lock); |
389 | if (!purged) { | 396 | if (!purged) { |
390 | purge_vmap_area_lazy(); | 397 | purge_vmap_area_lazy(); |
@@ -508,6 +515,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
508 | static DEFINE_SPINLOCK(purge_lock); | 515 | static DEFINE_SPINLOCK(purge_lock); |
509 | LIST_HEAD(valist); | 516 | LIST_HEAD(valist); |
510 | struct vmap_area *va; | 517 | struct vmap_area *va; |
518 | struct vmap_area *n_va; | ||
511 | int nr = 0; | 519 | int nr = 0; |
512 | 520 | ||
513 | /* | 521 | /* |
@@ -547,7 +555,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
547 | 555 | ||
548 | if (nr) { | 556 | if (nr) { |
549 | spin_lock(&vmap_area_lock); | 557 | spin_lock(&vmap_area_lock); |
550 | list_for_each_entry(va, &valist, purge_list) | 558 | list_for_each_entry_safe(va, n_va, &valist, purge_list) |
551 | __free_vmap_area(va); | 559 | __free_vmap_area(va); |
552 | spin_unlock(&vmap_area_lock); | 560 | spin_unlock(&vmap_area_lock); |
553 | } | 561 | } |
@@ -1347,6 +1355,7 @@ EXPORT_SYMBOL(vfree); | |||
1347 | void vunmap(const void *addr) | 1355 | void vunmap(const void *addr) |
1348 | { | 1356 | { |
1349 | BUG_ON(in_interrupt()); | 1357 | BUG_ON(in_interrupt()); |
1358 | might_sleep(); | ||
1350 | __vunmap(addr, 0); | 1359 | __vunmap(addr, 0); |
1351 | } | 1360 | } |
1352 | EXPORT_SYMBOL(vunmap); | 1361 | EXPORT_SYMBOL(vunmap); |
@@ -1366,6 +1375,8 @@ void *vmap(struct page **pages, unsigned int count, | |||
1366 | { | 1375 | { |
1367 | struct vm_struct *area; | 1376 | struct vm_struct *area; |
1368 | 1377 | ||
1378 | might_sleep(); | ||
1379 | |||
1369 | if (count > num_physpages) | 1380 | if (count > num_physpages) |
1370 | return NULL; | 1381 | return NULL; |
1371 | 1382 | ||