aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-08-06 22:20:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-06 22:20:40 -0400
commit8664b90baedb0f9aa6bea5bfbdfeae0d5883d28e (patch)
tree7fa0cdabdef8bc979c5d2b3b55bff36c33aae815
parenta58997e1a6287068be2d923f5fde6102e7e08f56 (diff)
parenta50fcb512d9539b5179b1e523641420339086995 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "21 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) writeback: fix initial dirty limit mm/memory-failure: set PageHWPoison before migrate_pages() mm: check __PG_HWPOISON separately from PAGE_FLAGS_CHECK_AT_* mm/memory-failure: give up error handling for non-tail-refcounted thp mm/memory-failure: fix race in counting num_poisoned_pages mm/memory-failure: unlock_page before put_page ipc: use private shmem or hugetlbfs inodes for shm segments. mm: initialize hotplugged pages as reserved ocfs2: fix shift left overflow kthread: export kthread functions fsnotify: fix oops in fsnotify_clear_marks_by_group_flags() lib/iommu-common.c: do not use 0xffffffffffffffffl for computing align_mask mm/slub: allow merging when SLAB_DEBUG_FREE is set signalfd: fix information leak in signalfd_copyinfo signal: fix information leak in copy_siginfo_to_user signal: fix information leak in copy_siginfo_from_user32 ocfs2: fix BUG in ocfs2_downconvert_thread_do_work() fs, file table: reinit files_stat.max_files after deferred memory initialisation mm, meminit: replace rwsem with completion mm, meminit: allow early_pfn_to_nid to be used during runtime ...
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--fs/dcache.c13
-rw-r--r--fs/file_table.c24
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/notify/mark.c30
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/signalfd.c5
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/signal.c13
-rw-r--r--lib/iommu-common.c2
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/memory-failure.c32
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c45
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab_common.c3
27 files changed, 153 insertions, 102 deletions
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
168 * Other callers might not initialize the si_lsb field, 168 * Other callers might not initialize the si_lsb field,
169 * so check explicitely for the right codes here. 169 * so check explicitely for the right codes here.
170 */ 170 */
171 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 171 if (from->si_signo == SIGBUS &&
172 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
172 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 173 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
173#endif 174#endif
174 break; 175 break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
201 202
202int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 203int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
203{ 204{
204 memset(to, 0, sizeof *to);
205
206 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || 205 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
207 copy_from_user(to->_sifields._pad, 206 copy_from_user(to->_sifields._pad,
208 from->_sifields._pad, SI_PAD_SIZE)) 207 from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
409 409
410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
411{ 411{
412 memset(to, 0, sizeof *to);
413
414 if (copy_from_user(to, from, 3*sizeof(int)) || 412 if (copy_from_user(to, from, 3*sizeof(int)) ||
415 copy_from_user(to->_sifields._pad, 413 copy_from_user(to->_sifields._pad,
416 from->_sifields._pad, SI_PAD_SIZE32)) 414 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
966 966
967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) 967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
968{ 968{
969 memset(to, 0, sizeof *to);
970
971 if (copy_from_user(to, from, 3*sizeof(int)) || 969 if (copy_from_user(to, from, 3*sizeof(int)) ||
972 copy_from_user(to->_sifields._pad, 970 copy_from_user(to->_sifields._pad,
973 from->_sifields._pad, SI_PAD_SIZE32)) 971 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) 113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
114 return -EFAULT; 114 return -EFAULT;
115 115
116 memset(to, 0, sizeof(*to));
117
118 err = __get_user(to->si_signo, &from->si_signo); 116 err = __get_user(to->si_signo, &from->si_signo);
119 err |= __get_user(to->si_errno, &from->si_errno); 117 err |= __get_user(to->si_errno, &from->si_errno);
120 err |= __get_user(to->si_code, &from->si_code); 118 err |= __get_user(to->si_code, &from->si_code);
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c8ea15e73a5..9b5fe503f6cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
3442 inode_init_early(); 3442 inode_init_early();
3443} 3443}
3444 3444
3445void __init vfs_caches_init(unsigned long mempages) 3445void __init vfs_caches_init(void)
3446{ 3446{
3447 unsigned long reserve;
3448
3449 /* Base hash sizes on available memory, with a reserve equal to
3450 150% of current kernel size */
3451
3452 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3453 mempages -= reserve;
3454
3455 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3447 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3456 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3448 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3457 3449
3458 dcache_init(); 3450 dcache_init();
3459 inode_init(); 3451 inode_init();
3460 files_init(mempages); 3452 files_init();
3453 files_maxfiles_init();
3461 mnt_init(); 3454 mnt_init();
3462 bdev_cache_init(); 3455 bdev_cache_init();
3463 chrdev_init(); 3456 chrdev_init();
diff --git a/fs/file_table.c b/fs/file_table.c
index 7f9d407c7595..ad17e05ebf95 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -25,6 +25,7 @@
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/task_work.h> 26#include <linux/task_work.h>
27#include <linux/ima.h> 27#include <linux/ima.h>
28#include <linux/swap.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30 31
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
308 } 309 }
309} 310}
310 311
311void __init files_init(unsigned long mempages) 312void __init files_init(void)
312{ 313{
313 unsigned long n;
314
315 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 314 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
316 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 315 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
316 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
317}
317 318
318 /* 319/*
319 * One file with associated inode and dcache is very roughly 1K. 320 * One file with associated inode and dcache is very roughly 1K. Per default
320 * Per default don't use more than 10% of our memory for files. 321 * do not use more than 10% of our memory for files.
321 */ 322 */
323void __init files_maxfiles_init(void)
324{
325 unsigned long n;
326 unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
327
328 memreserve = min(memreserve, totalram_pages - 1);
329 n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
322 330
323 n = (mempages * (PAGE_SIZE / 1024)) / 10;
324 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 331 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
325 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
326} 332}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0cf74df68617..973c24ce59ad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); 1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1011 if (!inode) 1011 if (!inode)
1012 goto out_dentry; 1012 goto out_dentry;
1013 if (creat_flags == HUGETLB_SHMFS_INODE)
1014 inode->i_flags |= S_PRIVATE;
1013 1015
1014 file = ERR_PTR(-ENOMEM); 1016 file = ERR_PTR(-ENOMEM);
1015 if (hugetlb_reserve_pages(inode, 0, 1017 if (hugetlb_reserve_pages(inode, 0,
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
412 unsigned int flags) 412 unsigned int flags)
413{ 413{
414 struct fsnotify_mark *lmark, *mark; 414 struct fsnotify_mark *lmark, *mark;
415 LIST_HEAD(to_free);
415 416
417 /*
418 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
419 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
420 * to_free list so we have to use mark_mutex even when accessing that
421 * list. And freeing mark requires us to drop mark_mutex. So we can
422 * reliably free only the first mark in the list. That's why we first
423 * move marks to free to to_free list in one go and then free marks in
424 * to_free list one by one.
425 */
416 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); 426 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
417 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { 427 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
418 if (mark->flags & flags) { 428 if (mark->flags & flags)
419 fsnotify_get_mark(mark); 429 list_move(&mark->g_list, &to_free);
420 fsnotify_destroy_mark_locked(mark, group);
421 fsnotify_put_mark(mark);
422 }
423 } 430 }
424 mutex_unlock(&group->mark_mutex); 431 mutex_unlock(&group->mark_mutex);
432
433 while (1) {
434 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
435 if (list_empty(&to_free)) {
436 mutex_unlock(&group->mark_mutex);
437 break;
438 }
439 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
440 fsnotify_get_mark(mark);
441 fsnotify_destroy_mark_locked(mark, group);
442 mutex_unlock(&group->mark_mutex);
443 fsnotify_put_mark(mark);
444 }
425} 445}
426 446
427/* 447/*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1a35c6139656..0f5fd9db8194 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
685 685
686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { 686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
687 u64 s = i_size_read(inode); 687 u64 s = i_size_read(inode);
688 sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + 688 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
689 (do_div(s, osb->s_clustersize) >> 9); 689 (do_div(s, osb->s_clustersize) >> 9);
690 690
691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, 691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); 910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
911 911
912 ret = blkdev_issue_zeroout(osb->sb->s_bdev, 912 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
913 p_cpos << (osb->s_clustersize_bits - 9), 913 (u64)p_cpos << (osb->s_clustersize_bits - 9),
914 zero_len_head >> 9, GFP_NOFS, false); 914 zero_len_head >> 9, GFP_NOFS, false);
915 if (ret < 0) 915 if (ret < 0)
916 mlog_errno(ret); 916 mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4025 osb->dc_work_sequence = osb->dc_wake_sequence; 4025 osb->dc_work_sequence = osb->dc_wake_sequence;
4026 4026
4027 processed = osb->blocked_lock_count; 4027 processed = osb->blocked_lock_count;
4028 while (processed) { 4028 /*
4029 BUG_ON(list_empty(&osb->blocked_lock_list)); 4029 * blocked lock processing in this loop might call iput which can
4030 4030 * remove items off osb->blocked_lock_list. Downconvert up to
4031 * 'processed' number of locks, but stop short if we had some
4032 * removed in ocfs2_mark_lockres_freeing when downconverting.
4033 */
4034 while (processed && !list_empty(&osb->blocked_lock_list)) {
4031 lockres = list_entry(osb->blocked_lock_list.next, 4035 lockres = list_entry(osb->blocked_lock_list.next,
4032 struct ocfs2_lock_res, l_blocked_list); 4036 struct ocfs2_lock_res, l_blocked_list);
4033 list_del_init(&lockres->l_blocked_list); 4037 list_del_init(&lockres->l_blocked_list);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
121 * Other callers might not initialize the si_lsb field, 121 * Other callers might not initialize the si_lsb field,
122 * so check explicitly for the right codes here. 122 * so check explicitly for the right codes here.
123 */ 123 */
124 if (kinfo->si_code == BUS_MCEERR_AR || 124 if (kinfo->si_signo == SIGBUS &&
125 kinfo->si_code == BUS_MCEERR_AO) 125 (kinfo->si_code == BUS_MCEERR_AR ||
126 kinfo->si_code == BUS_MCEERR_AO))
126 err |= __put_user((short) kinfo->si_addr_lsb, 127 err |= __put_user((short) kinfo->si_addr_lsb,
127 &uinfo->ssi_addr_lsb); 128 &uinfo->ssi_addr_lsb);
128#endif 129#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc008c338f5a..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,7 +55,8 @@ struct vm_fault;
55 55
56extern void __init inode_init(void); 56extern void __init inode_init(void);
57extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
58extern void __init files_init(unsigned long); 58extern void __init files_init(void);
59extern void __init files_maxfiles_init(void);
59 60
60extern struct files_stat_struct files_stat; 61extern struct files_stat_struct files_stat;
61extern unsigned long get_max_files(void); 62extern unsigned long get_max_files(void);
@@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
2245 2246
2246/* fs/dcache.c */ 2247/* fs/dcache.c */
2247extern void __init vfs_caches_init_early(void); 2248extern void __init vfs_caches_init_early(void);
2248extern void __init vfs_caches_init(unsigned long); 2249extern void __init vfs_caches_init(void);
2249 2250
2250extern struct kmem_cache *names_cachep; 2251extern struct kmem_cache *names_cachep;
2251 2252
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
631 1 << PG_private | 1 << PG_private_2 | \ 631 1 << PG_private | 1 << PG_private_2 | \
632 1 << PG_writeback | 1 << PG_reserved | \ 632 1 << PG_writeback | 1 << PG_reserved | \
633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
634 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ 634 1 << PG_unevictable | __PG_MLOCKED | \
635 __PG_COMPOUND_LOCK) 635 __PG_COMPOUND_LOCK)
636 636
637/* 637/*
638 * Flags checked when a page is prepped for return by the page allocator. 638 * Flags checked when a page is prepped for return by the page allocator.
639 * Pages being prepped should not have any flags set. It they are set, 639 * Pages being prepped should not have these flags set. It they are set,
640 * there has been a kernel bug or struct page corruption. 640 * there has been a kernel bug or struct page corruption.
641 *
642 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
643 * alloc-free cycle to prevent from reusing the page.
641 */ 644 */
642#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 645#define PAGE_FLAGS_CHECK_AT_PREP \
646 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
643 647
644#define PAGE_FLAGS_PRIVATE \ 648#define PAGE_FLAGS_PRIVATE \
645 (1 << PG_private | 1 << PG_private_2) 649 (1 << PG_private | 1 << PG_private_2)
diff --git a/init/main.c b/init/main.c
index c5d5626289ce..56506553d4d8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
656 key_init(); 656 key_init();
657 security_init(); 657 security_init();
658 dbg_late_init(); 658 dbg_late_init();
659 vfs_caches_init(totalram_pages); 659 vfs_caches_init();
660 signals_init(); 660 signals_init();
661 /* rootfs populating might need page-writeback */ 661 /* rootfs populating might need page-writeback */
662 page_writeback_init(); 662 page_writeback_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
142 if (!leaf) 142 if (!leaf)
143 return -ENOMEM; 143 return -ENOMEM;
144 INIT_LIST_HEAD(&leaf->msg_list); 144 INIT_LIST_HEAD(&leaf->msg_list);
145 info->qsize += sizeof(*leaf);
146 } 145 }
147 leaf->priority = msg->m_type; 146 leaf->priority = msg->m_type;
148 rb_link_node(&leaf->rb_node, parent, p); 147 rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
187 "lazy leaf delete!\n"); 186 "lazy leaf delete!\n");
188 rb_erase(&leaf->rb_node, &info->msg_tree); 187 rb_erase(&leaf->rb_node, &info->msg_tree);
189 if (info->node_cache) { 188 if (info->node_cache) {
190 info->qsize -= sizeof(*leaf);
191 kfree(leaf); 189 kfree(leaf);
192 } else { 190 } else {
193 info->node_cache = leaf; 191 info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
200 if (list_empty(&leaf->msg_list)) { 198 if (list_empty(&leaf->msg_list)) {
201 rb_erase(&leaf->rb_node, &info->msg_tree); 199 rb_erase(&leaf->rb_node, &info->msg_tree);
202 if (info->node_cache) { 200 if (info->node_cache) {
203 info->qsize -= sizeof(*leaf);
204 kfree(leaf); 201 kfree(leaf);
205 } else { 202 } else {
206 info->node_cache = leaf; 203 info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1034 /* Save our speculative allocation into the cache */ 1031 /* Save our speculative allocation into the cache */
1035 INIT_LIST_HEAD(&new_leaf->msg_list); 1032 INIT_LIST_HEAD(&new_leaf->msg_list);
1036 info->node_cache = new_leaf; 1033 info->node_cache = new_leaf;
1037 info->qsize += sizeof(*new_leaf);
1038 new_leaf = NULL; 1034 new_leaf = NULL;
1039 } else { 1035 } else {
1040 kfree(new_leaf); 1036 kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1142 /* Save our speculative allocation into the cache */ 1138 /* Save our speculative allocation into the cache */
1143 INIT_LIST_HEAD(&new_leaf->msg_list); 1139 INIT_LIST_HEAD(&new_leaf->msg_list);
1144 info->node_cache = new_leaf; 1140 info->node_cache = new_leaf;
1145 info->qsize += sizeof(*new_leaf);
1146 } else { 1141 } else {
1147 kfree(new_leaf); 1142 kfree(new_leaf);
1148 } 1143 }
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
545 if ((shmflg & SHM_NORESERVE) && 545 if ((shmflg & SHM_NORESERVE) &&
546 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 546 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
547 acctflag = VM_NORESERVE; 547 acctflag = VM_NORESERVE;
548 file = shmem_file_setup(name, size, acctflag); 548 file = shmem_kernel_file_setup(name, size, acctflag);
549 } 549 }
550 error = PTR_ERR(file); 550 error = PTR_ERR(file);
551 if (IS_ERR(file)) 551 if (IS_ERR(file))
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
97{ 97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99} 99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
100 101
101/** 102/**
102 * kthread_freezable_should_stop - should this freezable kthread return now? 103 * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
171{ 172{
172 __kthread_parkme(to_kthread(current)); 173 __kthread_parkme(to_kthread(current));
173} 174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
174 176
175static int kthread(void *_create) 177static int kthread(void *_create)
176{ 178{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
411 if (kthread) 413 if (kthread)
412 __kthread_unpark(k, kthread); 414 __kthread_unpark(k, kthread);
413} 415}
416EXPORT_SYMBOL_GPL(kthread_unpark);
414 417
415/** 418/**
416 * kthread_park - park a thread created by kthread_create(). 419 * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
441 } 444 }
442 return ret; 445 return ret;
443} 446}
447EXPORT_SYMBOL_GPL(kthread_park);
444 448
445/** 449/**
446 * kthread_stop - stop a thread created by kthread_create(). 450 * kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2748 * Other callers might not initialize the si_lsb field, 2748 * Other callers might not initialize the si_lsb field,
2749 * so check explicitly for the right codes here. 2749 * so check explicitly for the right codes here.
2750 */ 2750 */
2751 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2751 if (from->si_signo == SIGBUS &&
2752 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2752 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2753 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2753#endif 2754#endif
2754#ifdef SEGV_BNDERR 2755#ifdef SEGV_BNDERR
2755 err |= __put_user(from->si_lower, &to->si_lower); 2756 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2756 err |= __put_user(from->si_upper, &to->si_upper); 2757 err |= __put_user(from->si_lower, &to->si_lower);
2758 err |= __put_user(from->si_upper, &to->si_upper);
2759 }
2757#endif 2760#endif
2758 break; 2761 break;
2759 case __SI_CHLD: 2762 case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3017 int, sig, 3020 int, sig,
3018 struct compat_siginfo __user *, uinfo) 3021 struct compat_siginfo __user *, uinfo)
3019{ 3022{
3020 siginfo_t info; 3023 siginfo_t info = {};
3021 int ret = copy_siginfo_from_user32(&info, uinfo); 3024 int ret = copy_siginfo_from_user32(&info, uinfo);
3022 if (unlikely(ret)) 3025 if (unlikely(ret))
3023 return ret; 3026 return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3061 int, sig, 3064 int, sig,
3062 struct compat_siginfo __user *, uinfo) 3065 struct compat_siginfo __user *, uinfo)
3063{ 3066{
3064 siginfo_t info; 3067 siginfo_t info = {};
3065 3068
3066 if (copy_siginfo_from_user32(&info, uinfo)) 3069 if (copy_siginfo_from_user32(&info, uinfo))
3067 return -EFAULT; 3070 return -EFAULT;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..ff19f66d3f7f 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
119 unsigned long align_mask = 0; 119 unsigned long align_mask = 0;
120 120
121 if (align_order > 0) 121 if (align_order > 0)
122 align_mask = 0xffffffffffffffffl >> (64 - align_order); 122 align_mask = ~0ul >> (BITS_PER_LONG - align_order);
123 123
124 /* Sanity check */ 124 /* Sanity check */
125 if (unlikely(npages == 0)) { 125 if (unlikely(npages == 0)) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c107094f79ba..097c7a4bfbd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
1676 /* after clearing PageTail the gup refcount can be released */ 1676 /* after clearing PageTail the gup refcount can be released */
1677 smp_mb__after_atomic(); 1677 smp_mb__after_atomic();
1678 1678
1679 /* 1679 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1680 * retain hwpoison flag of the poisoned tail page:
1681 * fix for the unsuitable process killed on Guest Machine(KVM)
1682 * by the memory-failure.
1683 */
1684 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1685 page_tail->flags |= (page->flags & 1680 page_tail->flags |= (page->flags &
1686 ((1L << PG_referenced) | 1681 ((1L << PG_referenced) |
1687 (1L << PG_swapbacked) | 1682 (1L << PG_swapbacked) |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c53543d89282..ea5a93659488 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
909 * directly for tail pages. 909 * directly for tail pages.
910 */ 910 */
911 if (PageTransHuge(head)) { 911 if (PageTransHuge(head)) {
912 /*
913 * Non anonymous thp exists only in allocation/free time. We
914 * can't handle such a case correctly, so let's give it up.
915 * This should be better than triggering BUG_ON when kernel
916 * tries to touch the "partially handled" page.
917 */
918 if (!PageAnon(head)) {
919 pr_err("MCE: %#lx: non anonymous thp\n",
920 page_to_pfn(page));
921 return 0;
922 }
923
912 if (get_page_unless_zero(head)) { 924 if (get_page_unless_zero(head)) {
913 if (PageTail(page)) 925 if (PageTail(page))
914 get_page(page); 926 get_page(page);
@@ -1134,15 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1134 } 1146 }
1135 1147
1136 if (!PageHuge(p) && PageTransHuge(hpage)) { 1148 if (!PageHuge(p) && PageTransHuge(hpage)) {
1137 if (!PageAnon(hpage)) {
1138 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1139 if (TestClearPageHWPoison(p))
1140 atomic_long_sub(nr_pages, &num_poisoned_pages);
1141 put_page(p);
1142 if (p != hpage)
1143 put_page(hpage);
1144 return -EBUSY;
1145 }
1146 if (unlikely(split_huge_page(hpage))) { 1149 if (unlikely(split_huge_page(hpage))) {
1147 pr_err("MCE: %#lx: thp split failed\n", pfn); 1150 pr_err("MCE: %#lx: thp split failed\n", pfn);
1148 if (TestClearPageHWPoison(p)) 1151 if (TestClearPageHWPoison(p))
@@ -1209,9 +1212,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1209 if (!PageHWPoison(p)) { 1212 if (!PageHWPoison(p)) {
1210 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1213 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1211 atomic_long_sub(nr_pages, &num_poisoned_pages); 1214 atomic_long_sub(nr_pages, &num_poisoned_pages);
1215 unlock_page(hpage);
1212 put_page(hpage); 1216 put_page(hpage);
1213 res = 0; 1217 return 0;
1214 goto out;
1215 } 1218 }
1216 if (hwpoison_filter(p)) { 1219 if (hwpoison_filter(p)) {
1217 if (TestClearPageHWPoison(p)) 1220 if (TestClearPageHWPoison(p))
@@ -1656,6 +1659,8 @@ static int __soft_offline_page(struct page *page, int flags)
1656 inc_zone_page_state(page, NR_ISOLATED_ANON + 1659 inc_zone_page_state(page, NR_ISOLATED_ANON +
1657 page_is_file_cache(page)); 1660 page_is_file_cache(page));
1658 list_add(&page->lru, &pagelist); 1661 list_add(&page->lru, &pagelist);
1662 if (!TestSetPageHWPoison(page))
1663 atomic_long_inc(&num_poisoned_pages);
1659 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, 1664 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1660 MIGRATE_SYNC, MR_MEMORY_FAILURE); 1665 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1661 if (ret) { 1666 if (ret) {
@@ -1670,9 +1675,8 @@ static int __soft_offline_page(struct page *page, int flags)
1670 pfn, ret, page->flags); 1675 pfn, ret, page->flags);
1671 if (ret > 0) 1676 if (ret > 0)
1672 ret = -EIO; 1677 ret = -EIO;
1673 } else { 1678 if (TestClearPageHWPoison(page))
1674 SetPageHWPoison(page); 1679 atomic_long_dec(&num_poisoned_pages);
1675 atomic_long_inc(&num_poisoned_pages);
1676 } 1680 }
1677 } else { 1681 } else {
1678 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1682 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 26fbba7d888f..003dbe4b060d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
446 int nr_pages = PAGES_PER_SECTION; 446 int nr_pages = PAGES_PER_SECTION;
447 int nid = pgdat->node_id; 447 int nid = pgdat->node_id;
448 int zone_type; 448 int zone_type;
449 unsigned long flags; 449 unsigned long flags, pfn;
450 int ret; 450 int ret;
451 451
452 zone_type = zone - pgdat->node_zones; 452 zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
461 pgdat_resize_unlock(zone->zone_pgdat, &flags); 461 pgdat_resize_unlock(zone->zone_pgdat, &flags);
462 memmap_init_zone(nr_pages, nid, zone_type, 462 memmap_init_zone(nr_pages, nid, zone_type,
463 phys_start_pfn, MEMMAP_HOTPLUG); 463 phys_start_pfn, MEMMAP_HOTPLUG);
464
465 /* online_page_range is called later and expects pages reserved */
466 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
467 if (!pfn_valid(pfn))
468 continue;
469
470 SetPageReserved(pfn_to_page(pfn));
471 }
464 return 0; 472 return 0;
465} 473}
466 474
diff --git a/mm/migrate.c b/mm/migrate.c
index ee401e4e5ef1..eb4267107d1f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
880 /* Establish migration ptes or remove ptes */ 880 /* Establish migration ptes or remove ptes */
881 if (page_mapped(page)) { 881 if (page_mapped(page)) {
882 try_to_unmap(page, 882 try_to_unmap(page,
883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
884 TTU_IGNORE_HWPOISON);
884 page_was_mapped = 1; 885 page_was_mapped = 1;
885 } 886 }
886 887
@@ -950,7 +951,10 @@ out:
950 list_del(&page->lru); 951 list_del(&page->lru);
951 dec_zone_page_state(page, NR_ISOLATED_ANON + 952 dec_zone_page_state(page, NR_ISOLATED_ANON +
952 page_is_file_cache(page)); 953 page_is_file_cache(page));
953 if (reason != MR_MEMORY_FAILURE) 954 /* Soft-offlined page shouldn't go through lru cache list */
955 if (reason == MR_MEMORY_FAILURE)
956 put_page(page);
957 else
954 putback_lru_page(page); 958 putback_lru_page(page);
955 } 959 }
956 960
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 22cddd3e5de8..5cccc127ef81 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
2063 */ 2063 */
2064void __init page_writeback_init(void) 2064void __init page_writeback_init(void)
2065{ 2065{
2066 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2067
2066 writeback_set_ratelimit(); 2068 writeback_set_ratelimit();
2067 register_cpu_notifier(&ratelimit_nb); 2069 register_cpu_notifier(&ratelimit_nb);
2068
2069 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2070} 2070}
2071 2071
2072/** 2072/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ef19f22b2b7d..beda41710802 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,7 +18,6 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/rwsem.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/jiffies.h> 22#include <linux/jiffies.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
981 980
982#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 981#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
983 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 982 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
984/* Only safe to use early in boot when initialisation is single-threaded */ 983
985static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 984static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
986 985
987int __meminit early_pfn_to_nid(unsigned long pfn) 986int __meminit early_pfn_to_nid(unsigned long pfn)
988{ 987{
988 static DEFINE_SPINLOCK(early_pfn_lock);
989 int nid; 989 int nid;
990 990
991 /* The system will behave unpredictably otherwise */ 991 spin_lock(&early_pfn_lock);
992 BUG_ON(system_state != SYSTEM_BOOTING);
993
994 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 992 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
995 if (nid >= 0) 993 if (nid < 0)
996 return nid; 994 nid = 0;
997 /* just returns 0 */ 995 spin_unlock(&early_pfn_lock);
998 return 0; 996
997 return nid;
999} 998}
1000#endif 999#endif
1001 1000
@@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
1060 __free_pages_boot_core(page, pfn, 0); 1059 __free_pages_boot_core(page, pfn, 0);
1061} 1060}
1062 1061
1063static __initdata DECLARE_RWSEM(pgdat_init_rwsem); 1062/* Completion tracking for deferred_init_memmap() threads */
1063static atomic_t pgdat_init_n_undone __initdata;
1064static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1065
1066static inline void __init pgdat_init_report_one_done(void)
1067{
1068 if (atomic_dec_and_test(&pgdat_init_n_undone))
1069 complete(&pgdat_init_all_done_comp);
1070}
1064 1071
1065/* Initialise remaining memory on a node */ 1072/* Initialise remaining memory on a node */
1066static int __init deferred_init_memmap(void *data) 1073static int __init deferred_init_memmap(void *data)
@@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
1077 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1084 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1078 1085
1079 if (first_init_pfn == ULONG_MAX) { 1086 if (first_init_pfn == ULONG_MAX) {
1080 up_read(&pgdat_init_rwsem); 1087 pgdat_init_report_one_done();
1081 return 0; 1088 return 0;
1082 } 1089 }
1083 1090
@@ -1177,7 +1184,8 @@ free_range:
1177 1184
1178 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1185 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1179 jiffies_to_msecs(jiffies - start)); 1186 jiffies_to_msecs(jiffies - start));
1180 up_read(&pgdat_init_rwsem); 1187
1188 pgdat_init_report_one_done();
1181 return 0; 1189 return 0;
1182} 1190}
1183 1191
@@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void)
1185{ 1193{
1186 int nid; 1194 int nid;
1187 1195
1196 /* There will be num_node_state(N_MEMORY) threads */
1197 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1188 for_each_node_state(nid, N_MEMORY) { 1198 for_each_node_state(nid, N_MEMORY) {
1189 down_read(&pgdat_init_rwsem);
1190 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1199 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1191 } 1200 }
1192 1201
1193 /* Block until all are initialised */ 1202 /* Block until all are initialised */
1194 down_write(&pgdat_init_rwsem); 1203 wait_for_completion(&pgdat_init_all_done_comp);
1195 up_write(&pgdat_init_rwsem); 1204
1205 /* Reinit limits that are based on free pages after the kernel is up */
1206 files_maxfiles_init();
1196} 1207}
1197#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1208#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1198 1209
@@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page)
1285 bad_reason = "non-NULL mapping"; 1296 bad_reason = "non-NULL mapping";
1286 if (unlikely(atomic_read(&page->_count) != 0)) 1297 if (unlikely(atomic_read(&page->_count) != 0))
1287 bad_reason = "nonzero _count"; 1298 bad_reason = "nonzero _count";
1299 if (unlikely(page->flags & __PG_HWPOISON)) {
1300 bad_reason = "HWPoisoned (hardware-corrupted)";
1301 bad_flags = __PG_HWPOISON;
1302 }
1288 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1303 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1289 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1304 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1290 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1305 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
diff --git a/mm/shmem.c b/mm/shmem.c
index 4caf8ed24d65..dbe0c1e8349c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3363,8 +3363,8 @@ put_path:
3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3364 * kernel internal. There will be NO LSM permission checks against the 3364 * kernel internal. There will be NO LSM permission checks against the
3365 * underlying inode. So users of this interface must do LSM checks at a 3365 * underlying inode. So users of this interface must do LSM checks at a
3366 * higher layer. The one user is the big_key implementation. LSM checks 3366 * higher layer. The users are the big_key and shm implementations. LSM
3367 * are provided at the key level rather than the inode level. 3367 * checks are provided at the key or shm level rather than the inode.
3368 * @name: name for dentry (to be seen in /proc/<pid>/maps 3368 * @name: name for dentry (to be seen in /proc/<pid>/maps
3369 * @size: size to be set for the file 3369 * @size: size to be set for the file
3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3e5f8f29c286..86831105a09f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB)
39 39
40#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
42 41
43/* 42/*
44 * Merge control. If this is set then no merging of slab caches will occur. 43 * Merge control. If this is set then no merging of slab caches will occur.