aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/maccess.c8
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/rmap.c22
-rw-r--r--mm/shmem.c2
5 files changed, 35 insertions, 19 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index bcdc393b6580..d7b10578a64b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1982,16 +1982,26 @@ static int __remove_suid(struct dentry *dentry, int kill)
1982int file_remove_suid(struct file *file) 1982int file_remove_suid(struct file *file)
1983{ 1983{
1984 struct dentry *dentry = file->f_path.dentry; 1984 struct dentry *dentry = file->f_path.dentry;
1985 int killsuid = should_remove_suid(dentry); 1985 struct inode *inode = dentry->d_inode;
1986 int killpriv = security_inode_need_killpriv(dentry); 1986 int killsuid;
1987 int killpriv;
1987 int error = 0; 1988 int error = 0;
1988 1989
1990 /* Fast path for nothing security related */
1991 if (IS_NOSEC(inode))
1992 return 0;
1993
1994 killsuid = should_remove_suid(dentry);
1995 killpriv = security_inode_need_killpriv(dentry);
1996
1989 if (killpriv < 0) 1997 if (killpriv < 0)
1990 return killpriv; 1998 return killpriv;
1991 if (killpriv) 1999 if (killpriv)
1992 error = security_inode_killpriv(dentry); 2000 error = security_inode_killpriv(dentry);
1993 if (!error && killsuid) 2001 if (!error && killsuid)
1994 error = __remove_suid(dentry, killsuid); 2002 error = __remove_suid(dentry, killsuid);
2003 if (!error)
2004 inode->i_flags |= S_NOSEC;
1995 2005
1996 return error; 2006 return error;
1997} 2007}
@@ -2327,7 +2337,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
2327repeat: 2337repeat:
2328 page = find_lock_page(mapping, index); 2338 page = find_lock_page(mapping, index);
2329 if (page) 2339 if (page)
2330 return page; 2340 goto found;
2331 2341
2332 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); 2342 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2333 if (!page) 2343 if (!page)
@@ -2340,6 +2350,8 @@ repeat:
2340 goto repeat; 2350 goto repeat;
2341 return NULL; 2351 return NULL;
2342 } 2352 }
2353found:
2354 wait_on_page_writeback(page);
2343 return page; 2355 return page;
2344} 2356}
2345EXPORT_SYMBOL(grab_cache_page_write_begin); 2357EXPORT_SYMBOL(grab_cache_page_write_begin);
diff --git a/mm/maccess.c b/mm/maccess.c
index e2b6f5634e0d..4cee182ab5f3 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -15,10 +15,10 @@
15 * happens, handle that and return -EFAULT. 15 * happens, handle that and return -EFAULT.
16 */ 16 */
17 17
18long __weak probe_kernel_read(void *dst, void *src, size_t size) 18long __weak probe_kernel_read(void *dst, const void *src, size_t size)
19 __attribute__((alias("__probe_kernel_read"))); 19 __attribute__((alias("__probe_kernel_read")));
20 20
21long __probe_kernel_read(void *dst, void *src, size_t size) 21long __probe_kernel_read(void *dst, const void *src, size_t size)
22{ 22{
23 long ret; 23 long ret;
24 mm_segment_t old_fs = get_fs(); 24 mm_segment_t old_fs = get_fs();
@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
43 * Safely write to address @dst from the buffer at @src. If a kernel fault 43 * Safely write to address @dst from the buffer at @src. If a kernel fault
44 * happens, handle that and return -EFAULT. 44 * happens, handle that and return -EFAULT.
45 */ 45 */
46long __weak probe_kernel_write(void *dst, void *src, size_t size) 46long __weak probe_kernel_write(void *dst, const void *src, size_t size)
47 __attribute__((alias("__probe_kernel_write"))); 47 __attribute__((alias("__probe_kernel_write")));
48 48
49long __probe_kernel_write(void *dst, void *src, size_t size) 49long __probe_kernel_write(void *dst, const void *src, size_t size)
50{ 50{
51 long ret; 51 long ret;
52 mm_segment_t old_fs = get_fs(); 52 mm_segment_t old_fs = get_fs();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a4e1db3f1981..4e8985acdab8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2247,10 +2247,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2247 2247
2248 if (should_fail_alloc_page(gfp_mask, order)) 2248 if (should_fail_alloc_page(gfp_mask, order))
2249 return NULL; 2249 return NULL;
2250#ifndef CONFIG_ZONE_DMA
2251 if (WARN_ON_ONCE(gfp_mask & __GFP_DMA))
2252 return NULL;
2253#endif
2254 2250
2255 /* 2251 /*
2256 * Check the zones suitable for the gfp_mask contain at least one 2252 * Check the zones suitable for the gfp_mask contain at least one
diff --git a/mm/rmap.c b/mm/rmap.c
index 3a39b518a653..0eb463ea88dd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -352,6 +352,11 @@ void __init anon_vma_init(void)
352 * The page might have been remapped to a different anon_vma or the anon_vma 352 * The page might have been remapped to a different anon_vma or the anon_vma
353 * returned may already be freed (and even reused). 353 * returned may already be freed (and even reused).
354 * 354 *
355 * In case it was remapped to a different anon_vma, the new anon_vma will be a
356 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
357 * ensure that any anon_vma obtained from the page will still be valid for as
358 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
359 *
355 * All users of this function must be very careful when walking the anon_vma 360 * All users of this function must be very careful when walking the anon_vma
356 * chain and verify that the page in question is indeed mapped in it 361 * chain and verify that the page in question is indeed mapped in it
357 * [ something equivalent to page_mapped_in_vma() ]. 362 * [ something equivalent to page_mapped_in_vma() ].
@@ -405,6 +410,7 @@ out:
405struct anon_vma *page_lock_anon_vma(struct page *page) 410struct anon_vma *page_lock_anon_vma(struct page *page)
406{ 411{
407 struct anon_vma *anon_vma = NULL; 412 struct anon_vma *anon_vma = NULL;
413 struct anon_vma *root_anon_vma;
408 unsigned long anon_mapping; 414 unsigned long anon_mapping;
409 415
410 rcu_read_lock(); 416 rcu_read_lock();
@@ -415,13 +421,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
415 goto out; 421 goto out;
416 422
417 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 423 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
418 if (mutex_trylock(&anon_vma->root->mutex)) { 424 root_anon_vma = ACCESS_ONCE(anon_vma->root);
425 if (mutex_trylock(&root_anon_vma->mutex)) {
419 /* 426 /*
420 * If we observe a !0 refcount, then holding the lock ensures 427 * If the page is still mapped, then this anon_vma is still
421 * the anon_vma will not go away, see __put_anon_vma(). 428 * its anon_vma, and holding the mutex ensures that it will
429 * not go away, see anon_vma_free().
422 */ 430 */
423 if (!atomic_read(&anon_vma->refcount)) { 431 if (!page_mapped(page)) {
424 anon_vma_unlock(anon_vma); 432 mutex_unlock(&root_anon_vma->mutex);
425 anon_vma = NULL; 433 anon_vma = NULL;
426 } 434 }
427 goto out; 435 goto out;
@@ -1014,7 +1022,7 @@ void do_page_add_anon_rmap(struct page *page,
1014 return; 1022 return;
1015 1023
1016 VM_BUG_ON(!PageLocked(page)); 1024 VM_BUG_ON(!PageLocked(page));
1017 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1025 /* address might be in next vma when migration races vma_adjust */
1018 if (first) 1026 if (first)
1019 __page_set_anon_rmap(page, vma, address, exclusive); 1027 __page_set_anon_rmap(page, vma, address, exclusive);
1020 else 1028 else
@@ -1709,7 +1717,7 @@ void hugepage_add_anon_rmap(struct page *page,
1709 1717
1710 BUG_ON(!PageLocked(page)); 1718 BUG_ON(!PageLocked(page));
1711 BUG_ON(!anon_vma); 1719 BUG_ON(!anon_vma);
1712 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1720 /* address might be in next vma when migration races vma_adjust */
1713 first = atomic_inc_and_test(&page->_mapcount); 1721 first = atomic_inc_and_test(&page->_mapcount);
1714 if (first) 1722 if (first)
1715 __hugepage_set_anon_rmap(page, vma, address, 0); 1723 __hugepage_set_anon_rmap(page, vma, address, 0);
diff --git a/mm/shmem.c b/mm/shmem.c
index 1acfb2687bfa..d221a1cfd7b1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1114,8 +1114,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1114 delete_from_page_cache(page); 1114 delete_from_page_cache(page);
1115 shmem_swp_set(info, entry, swap.val); 1115 shmem_swp_set(info, entry, swap.val);
1116 shmem_swp_unmap(entry); 1116 shmem_swp_unmap(entry);
1117 spin_unlock(&info->lock);
1118 swap_shmem_alloc(swap); 1117 swap_shmem_alloc(swap);
1118 spin_unlock(&info->lock);
1119 BUG_ON(page_mapped(page)); 1119 BUG_ON(page_mapped(page));
1120 swap_writepage(page, wbc); 1120 swap_writepage(page, wbc);
1121 return 0; 1121 return 0;