aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-05 18:50:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-05 18:50:25 -0500
commit3e85fb9cd4f711d70c5d26baa86e438390731eab (patch)
tree85f0abea7e932a7e7c75cef2773fb648b35db1f4
parent055bf38d3d6069707e2d555cffdde629b8404ff2 (diff)
parentb24823e61bfd93d0e72088e4f5245287582ed289 (diff)
Merge branch 'akpm' (Andrew's patch bomb)
Merge the emailed seties of 19 patches from Andrew Morton * akpm: rapidio/tsi721: fix queue wrapping bug in inbound doorbell handler memcg: fix mapcount check in move charge code for anonymous page mm: thp: fix BUG on mm->nr_ptes alpha: fix 32/64-bit bug in futex support memcg: fix GPF when cgroup removal races with last exit debugobjects: Fix selftest for static warnings floppy/scsi: fix setting of BIO flags memcg: fix deadlock by inverting lrucare nesting drivers/rtc/rtc-r9701.c: fix crash in r9701_remove() c2port: class_create() returns an ERR_PTR pps: class_create() returns an ERR_PTR, not NULL hung_task: fix the broken rcu_lock_break() logic vfork: kill PF_STARTING coredump_wait: don't call complete_vfork_done() vfork: make it killable vfork: introduce complete_vfork_done() aio: wake up waiters when freeing unused kiocbs kprobes: return proper error code from register_kprobe() kmsg_dump: don't run on non-error paths by default
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--arch/alpha/include/asm/futex.h2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/misc/c2port/core.c4
-rw-r--r--drivers/pps/pps.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c5
-rw-r--r--drivers/rtc/rtc-r9701.c14
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--fs/aio.c2
-rw-r--r--fs/exec.c18
-rw-r--r--include/linux/kmsg_dump.h9
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/fork.c60
-rw-r--r--kernel/hung_task.c11
-rw-r--r--kernel/kprobes.c12
-rw-r--r--kernel/printk.c6
-rw-r--r--lib/debugobjects.c14
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/ksm.c11
-rw-r--r--mm/memcontrol.c104
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swap_state.c10
24 files changed, 156 insertions, 164 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 033d4e69b43b..d99fd9c0ec0e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2211,6 +2211,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2211 2211
2212 default: off. 2212 default: off.
2213 2213
2214 printk.always_kmsg_dump=
2215 Trigger kmsg_dump for cases other than kernel oops or
2216 panics
2217 Format: <bool> (1/Y/y=enable, 0/N/n=disable)
2218 default: disabled
2219
2214 printk.time= Show timing data prefixed to each printk message line 2220 printk.time= Show timing data prefixed to each printk message line
2215 Format: <bool> (1/Y/y=enable, 0/N/n=disable) 2221 Format: <bool> (1/Y/y=enable, 0/N/n=disable)
2216 2222
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index e8a761aee088..f939794363ac 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
108 " lda $31,3b-2b(%0)\n" 108 " lda $31,3b-2b(%0)\n"
109 " .previous\n" 109 " .previous\n"
110 : "+r"(ret), "=&r"(prev), "=&r"(cmp) 110 : "+r"(ret), "=&r"(prev), "=&r"(cmp)
111 : "r"(uaddr), "r"((long)oldval), "r"(newval) 111 : "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
112 : "memory"); 112 : "memory");
113 113
114 *uval = prev; 114 *uval = prev;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9baf11e86362..744f078f4dd8 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3832,7 +3832,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
3832 bio.bi_size = size; 3832 bio.bi_size = size;
3833 bio.bi_bdev = bdev; 3833 bio.bi_bdev = bdev;
3834 bio.bi_sector = 0; 3834 bio.bi_sector = 0;
3835 bio.bi_flags = BIO_QUIET; 3835 bio.bi_flags = (1 << BIO_QUIET);
3836 init_completion(&complete); 3836 init_completion(&complete);
3837 bio.bi_private = &complete; 3837 bio.bi_private = &complete;
3838 bio.bi_end_io = floppy_rb0_complete; 3838 bio.bi_end_io = floppy_rb0_complete;
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 19fc7c1cb428..f428d86bfc10 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -984,9 +984,9 @@ static int __init c2port_init(void)
984 " - (C) 2007 Rodolfo Giometti\n"); 984 " - (C) 2007 Rodolfo Giometti\n");
985 985
986 c2port_class = class_create(THIS_MODULE, "c2port"); 986 c2port_class = class_create(THIS_MODULE, "c2port");
987 if (!c2port_class) { 987 if (IS_ERR(c2port_class)) {
988 printk(KERN_ERR "c2port: failed to allocate class\n"); 988 printk(KERN_ERR "c2port: failed to allocate class\n");
989 return -ENOMEM; 989 return PTR_ERR(c2port_class);
990 } 990 }
991 c2port_class->dev_attrs = c2port_attrs; 991 c2port_class->dev_attrs = c2port_attrs;
992 992
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 2baadd21b7a6..98fbe62694d4 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -369,9 +369,9 @@ static int __init pps_init(void)
369 int err; 369 int err;
370 370
371 pps_class = class_create(THIS_MODULE, "pps"); 371 pps_class = class_create(THIS_MODULE, "pps");
372 if (!pps_class) { 372 if (IS_ERR(pps_class)) {
373 pr_err("failed to allocate class\n"); 373 pr_err("failed to allocate class\n");
374 return -ENOMEM; 374 return PTR_ERR(pps_class);
375 } 375 }
376 pps_class->dev_attrs = pps_attrs; 376 pps_class->dev_attrs = pps_attrs;
377 377
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 691b1ab1a3d0..30d2072f480b 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work)
410 */ 410 */
411 mport = priv->mport; 411 mport = priv->mport;
412 412
413 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)); 413 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
414 rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 414 rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
415 415
416 while (wr_ptr != rd_ptr) { 416 while (wr_ptr != rd_ptr) {
417 idb_entry = (u64 *)(priv->idb_base + 417 idb_entry = (u64 *)(priv->idb_base +
418 (TSI721_IDB_ENTRY_SIZE * rd_ptr)); 418 (TSI721_IDB_ENTRY_SIZE * rd_ptr));
419 rd_ptr++; 419 rd_ptr++;
420 rd_ptr %= IDB_QSIZE;
420 idb.msg = *idb_entry; 421 idb.msg = *idb_entry;
421 *idb_entry = 0; 422 *idb_entry = 0;
422 423
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 9beba49c3c5b..2853c2a6f10f 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -125,6 +125,13 @@ static int __devinit r9701_probe(struct spi_device *spi)
125 unsigned char tmp; 125 unsigned char tmp;
126 int res; 126 int res;
127 127
128 tmp = R100CNT;
129 res = read_regs(&spi->dev, &tmp, 1);
130 if (res || tmp != 0x20) {
131 dev_err(&spi->dev, "cannot read RTC register\n");
132 return -ENODEV;
133 }
134
128 rtc = rtc_device_register("r9701", 135 rtc = rtc_device_register("r9701",
129 &spi->dev, &r9701_rtc_ops, THIS_MODULE); 136 &spi->dev, &r9701_rtc_ops, THIS_MODULE);
130 if (IS_ERR(rtc)) 137 if (IS_ERR(rtc))
@@ -132,13 +139,6 @@ static int __devinit r9701_probe(struct spi_device *spi)
132 139
133 dev_set_drvdata(&spi->dev, rtc); 140 dev_set_drvdata(&spi->dev, rtc);
134 141
135 tmp = R100CNT;
136 res = read_regs(&spi->dev, &tmp, 1);
137 if (res || tmp != 0x20) {
138 rtc_device_unregister(rtc);
139 return res;
140 }
141
142 return 0; 142 return 0;
143} 143}
144 144
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 0cb39ff21171..f8fb2d691c0a 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -408,7 +408,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
408 kunmap_atomic(sdt, KM_USER0); 408 kunmap_atomic(sdt, KM_USER0);
409 } 409 }
410 410
411 bio->bi_flags |= BIO_MAPPED_INTEGRITY; 411 bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
412 } 412 }
413 413
414 return 0; 414 return 0;
diff --git a/fs/aio.c b/fs/aio.c
index 969beb0e2231..67e4b9047cc9 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
490 kmem_cache_free(kiocb_cachep, req); 490 kmem_cache_free(kiocb_cachep, req);
491 ctx->reqs_active--; 491 ctx->reqs_active--;
492 } 492 }
493 if (unlikely(!ctx->reqs_active && ctx->dead))
494 wake_up_all(&ctx->wait);
493 spin_unlock_irq(&ctx->ctx_lock); 495 spin_unlock_irq(&ctx->ctx_lock);
494} 496}
495 497
diff --git a/fs/exec.c b/fs/exec.c
index 92ce83a11e90..153dee14fe55 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1915,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
1915{ 1915{
1916 struct task_struct *tsk = current; 1916 struct task_struct *tsk = current;
1917 struct mm_struct *mm = tsk->mm; 1917 struct mm_struct *mm = tsk->mm;
1918 struct completion *vfork_done;
1919 int core_waiters = -EBUSY; 1918 int core_waiters = -EBUSY;
1920 1919
1921 init_completion(&core_state->startup); 1920 init_completion(&core_state->startup);
@@ -1927,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
1927 core_waiters = zap_threads(tsk, mm, core_state, exit_code); 1926 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1928 up_write(&mm->mmap_sem); 1927 up_write(&mm->mmap_sem);
1929 1928
1930 if (unlikely(core_waiters < 0)) 1929 if (core_waiters > 0)
1931 goto fail;
1932
1933 /*
1934 * Make sure nobody is waiting for us to release the VM,
1935 * otherwise we can deadlock when we wait on each other
1936 */
1937 vfork_done = tsk->vfork_done;
1938 if (vfork_done) {
1939 tsk->vfork_done = NULL;
1940 complete(vfork_done);
1941 }
1942
1943 if (core_waiters)
1944 wait_for_completion(&core_state->startup); 1930 wait_for_completion(&core_state->startup);
1945fail: 1931
1946 return core_waiters; 1932 return core_waiters;
1947} 1933}
1948 1934
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index fee66317e071..35f7237ec972 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -15,13 +15,18 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/list.h> 16#include <linux/list.h>
17 17
18/*
19 * Keep this list arranged in rough order of priority. Anything listed after
20 * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
21 * is passed to the kernel.
22 */
18enum kmsg_dump_reason { 23enum kmsg_dump_reason {
19 KMSG_DUMP_OOPS,
20 KMSG_DUMP_PANIC, 24 KMSG_DUMP_PANIC,
25 KMSG_DUMP_OOPS,
26 KMSG_DUMP_EMERG,
21 KMSG_DUMP_RESTART, 27 KMSG_DUMP_RESTART,
22 KMSG_DUMP_HALT, 28 KMSG_DUMP_HALT,
23 KMSG_DUMP_POWEROFF, 29 KMSG_DUMP_POWEROFF,
24 KMSG_DUMP_EMERG,
25}; 30};
26 31
27/** 32/**
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 4d34356fe644..b80de520670b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
129extern void mem_cgroup_replace_page_cache(struct page *oldpage, 129extern void mem_cgroup_replace_page_cache(struct page *oldpage,
130 struct page *newpage); 130 struct page *newpage);
131 131
132extern void mem_cgroup_reset_owner(struct page *page);
133#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 132#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
134extern int do_swap_account; 133extern int do_swap_account;
135#endif 134#endif
@@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
392 struct page *newpage) 391 struct page *newpage)
393{ 392{
394} 393}
395
396static inline void mem_cgroup_reset_owner(struct page *page)
397{
398}
399#endif /* CONFIG_CGROUP_MEM_CONT */ 394#endif /* CONFIG_CGROUP_MEM_CONT */
400 395
401#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 396#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d379a6bfd88..0657368bd78f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1777,7 +1777,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1777/* 1777/*
1778 * Per process flags 1778 * Per process flags
1779 */ 1779 */
1780#define PF_STARTING 0x00000002 /* being created */
1781#define PF_EXITING 0x00000004 /* getting shut down */ 1780#define PF_EXITING 0x00000004 /* getting shut down */
1782#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1781#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1783#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1782#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -2371,7 +2370,7 @@ static inline int thread_group_empty(struct task_struct *p)
2371 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2370 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2372 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2371 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2373 * pins the final release of task.io_context. Also protects ->cpuset and 2372 * pins the final release of task.io_context. Also protects ->cpuset and
2374 * ->cgroup.subsys[]. 2373 * ->cgroup.subsys[]. And ->vfork_done.
2375 * 2374 *
2376 * Nests both inside and outside of read_lock(&tasklist_lock). 2375 * Nests both inside and outside of read_lock(&tasklist_lock).
2377 * It must not be nested with write_lock_irq(&tasklist_lock), 2376 * It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/kernel/fork.c b/kernel/fork.c
index e2cd3e2a5ae8..26a7a6707fa7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
668 return mm; 668 return mm;
669} 669}
670 670
671static void complete_vfork_done(struct task_struct *tsk)
672{
673 struct completion *vfork;
674
675 task_lock(tsk);
676 vfork = tsk->vfork_done;
677 if (likely(vfork)) {
678 tsk->vfork_done = NULL;
679 complete(vfork);
680 }
681 task_unlock(tsk);
682}
683
684static int wait_for_vfork_done(struct task_struct *child,
685 struct completion *vfork)
686{
687 int killed;
688
689 freezer_do_not_count();
690 killed = wait_for_completion_killable(vfork);
691 freezer_count();
692
693 if (killed) {
694 task_lock(child);
695 child->vfork_done = NULL;
696 task_unlock(child);
697 }
698
699 put_task_struct(child);
700 return killed;
701}
702
671/* Please note the differences between mmput and mm_release. 703/* Please note the differences between mmput and mm_release.
672 * mmput is called whenever we stop holding onto a mm_struct, 704 * mmput is called whenever we stop holding onto a mm_struct,
673 * error success whatever. 705 * error success whatever.
@@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
683 */ 715 */
684void mm_release(struct task_struct *tsk, struct mm_struct *mm) 716void mm_release(struct task_struct *tsk, struct mm_struct *mm)
685{ 717{
686 struct completion *vfork_done = tsk->vfork_done;
687
688 /* Get rid of any futexes when releasing the mm */ 718 /* Get rid of any futexes when releasing the mm */
689#ifdef CONFIG_FUTEX 719#ifdef CONFIG_FUTEX
690 if (unlikely(tsk->robust_list)) { 720 if (unlikely(tsk->robust_list)) {
@@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
704 /* Get rid of any cached register state */ 734 /* Get rid of any cached register state */
705 deactivate_mm(tsk, mm); 735 deactivate_mm(tsk, mm);
706 736
707 /* notify parent sleeping on vfork() */ 737 if (tsk->vfork_done)
708 if (vfork_done) { 738 complete_vfork_done(tsk);
709 tsk->vfork_done = NULL;
710 complete(vfork_done);
711 }
712 739
713 /* 740 /*
714 * If we're exiting normally, clear a user-space tid field if 741 * If we're exiting normally, clear a user-space tid field if
715 * requested. We leave this alone when dying by signal, to leave 742 * requested. We leave this alone when dying by signal, to leave
716 * the value intact in a core dump, and to save the unnecessary 743 * the value intact in a core dump, and to save the unnecessary
717 * trouble otherwise. Userland only wants this done for a sys_exit. 744 * trouble, say, a killed vfork parent shouldn't touch this mm.
745 * Userland only wants this done for a sys_exit.
718 */ 746 */
719 if (tsk->clear_child_tid) { 747 if (tsk->clear_child_tid) {
720 if (!(tsk->flags & PF_SIGNALED) && 748 if (!(tsk->flags & PF_SIGNALED) &&
@@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1018 1046
1019 new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); 1047 new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1020 new_flags |= PF_FORKNOEXEC; 1048 new_flags |= PF_FORKNOEXEC;
1021 new_flags |= PF_STARTING;
1022 p->flags = new_flags; 1049 p->flags = new_flags;
1023} 1050}
1024 1051
@@ -1548,16 +1575,9 @@ long do_fork(unsigned long clone_flags,
1548 if (clone_flags & CLONE_VFORK) { 1575 if (clone_flags & CLONE_VFORK) {
1549 p->vfork_done = &vfork; 1576 p->vfork_done = &vfork;
1550 init_completion(&vfork); 1577 init_completion(&vfork);
1578 get_task_struct(p);
1551 } 1579 }
1552 1580
1553 /*
1554 * We set PF_STARTING at creation in case tracing wants to
1555 * use this to distinguish a fully live task from one that
1556 * hasn't finished SIGSTOP raising yet. Now we clear it
1557 * and set the child going.
1558 */
1559 p->flags &= ~PF_STARTING;
1560
1561 wake_up_new_task(p); 1581 wake_up_new_task(p);
1562 1582
1563 /* forking complete and child started to run, tell ptracer */ 1583 /* forking complete and child started to run, tell ptracer */
@@ -1565,10 +1585,8 @@ long do_fork(unsigned long clone_flags,
1565 ptrace_event(trace, nr); 1585 ptrace_event(trace, nr);
1566 1586
1567 if (clone_flags & CLONE_VFORK) { 1587 if (clone_flags & CLONE_VFORK) {
1568 freezer_do_not_count(); 1588 if (!wait_for_vfork_done(p, &vfork))
1569 wait_for_completion(&vfork); 1589 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1570 freezer_count();
1571 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1572 } 1590 }
1573 } else { 1591 } else {
1574 nr = PTR_ERR(p); 1592 nr = PTR_ERR(p);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 2e48ec0c2e91..c21449f85a2a 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
119 * For preemptible RCU it is sufficient to call rcu_read_unlock in order 119 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
120 * to exit the grace period. For classic RCU, a reschedule is required. 120 * to exit the grace period. For classic RCU, a reschedule is required.
121 */ 121 */
122static void rcu_lock_break(struct task_struct *g, struct task_struct *t) 122static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
123{ 123{
124 bool can_cont;
125
124 get_task_struct(g); 126 get_task_struct(g);
125 get_task_struct(t); 127 get_task_struct(t);
126 rcu_read_unlock(); 128 rcu_read_unlock();
127 cond_resched(); 129 cond_resched();
128 rcu_read_lock(); 130 rcu_read_lock();
131 can_cont = pid_alive(g) && pid_alive(t);
129 put_task_struct(t); 132 put_task_struct(t);
130 put_task_struct(g); 133 put_task_struct(g);
134
135 return can_cont;
131} 136}
132 137
133/* 138/*
@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
154 goto unlock; 159 goto unlock;
155 if (!--batch_count) { 160 if (!--batch_count) {
156 batch_count = HUNG_TASK_BATCHING; 161 batch_count = HUNG_TASK_BATCHING;
157 rcu_lock_break(g, t); 162 if (!rcu_lock_break(g, t))
158 /* Exit if t or g was unhashed during refresh. */
159 if (t->state == TASK_DEAD || g->state == TASK_DEAD)
160 goto unlock; 163 goto unlock;
161 } 164 }
162 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ 165 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9788c0ec6f43..c62b8546cc90 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
1334 if (!kernel_text_address((unsigned long) p->addr) || 1334 if (!kernel_text_address((unsigned long) p->addr) ||
1335 in_kprobes_functions((unsigned long) p->addr) || 1335 in_kprobes_functions((unsigned long) p->addr) ||
1336 ftrace_text_reserved(p->addr, p->addr) || 1336 ftrace_text_reserved(p->addr, p->addr) ||
1337 jump_label_text_reserved(p->addr, p->addr)) 1337 jump_label_text_reserved(p->addr, p->addr)) {
1338 goto fail_with_jump_label; 1338 ret = -EINVAL;
1339 goto cannot_probe;
1340 }
1339 1341
1340 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1342 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1341 p->flags &= KPROBE_FLAG_DISABLED; 1343 p->flags &= KPROBE_FLAG_DISABLED;
@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
1352 * its code to prohibit unexpected unloading. 1354 * its code to prohibit unexpected unloading.
1353 */ 1355 */
1354 if (unlikely(!try_module_get(probed_mod))) 1356 if (unlikely(!try_module_get(probed_mod)))
1355 goto fail_with_jump_label; 1357 goto cannot_probe;
1356 1358
1357 /* 1359 /*
1358 * If the module freed .init.text, we couldn't insert 1360 * If the module freed .init.text, we couldn't insert
@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
1361 if (within_module_init((unsigned long)p->addr, probed_mod) && 1363 if (within_module_init((unsigned long)p->addr, probed_mod) &&
1362 probed_mod->state != MODULE_STATE_COMING) { 1364 probed_mod->state != MODULE_STATE_COMING) {
1363 module_put(probed_mod); 1365 module_put(probed_mod);
1364 goto fail_with_jump_label; 1366 goto cannot_probe;
1365 } 1367 }
1366 /* ret will be updated by following code */ 1368 /* ret will be updated by following code */
1367 } 1369 }
@@ -1409,7 +1411,7 @@ out:
1409 1411
1410 return ret; 1412 return ret;
1411 1413
1412fail_with_jump_label: 1414cannot_probe:
1413 preempt_enable(); 1415 preempt_enable();
1414 jump_label_unlock(); 1416 jump_label_unlock();
1415 return ret; 1417 return ret;
diff --git a/kernel/printk.c b/kernel/printk.c
index 13c0a1143f49..32690a0b7a18 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -702,6 +702,9 @@ static bool printk_time = 0;
702#endif 702#endif
703module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); 703module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
704 704
705static bool always_kmsg_dump;
706module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
707
705/* Check if we have any console registered that can be called early in boot. */ 708/* Check if we have any console registered that can be called early in boot. */
706static int have_callable_console(void) 709static int have_callable_console(void)
707{ 710{
@@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)
1732 unsigned long l1, l2; 1735 unsigned long l1, l2;
1733 unsigned long flags; 1736 unsigned long flags;
1734 1737
1738 if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
1739 return;
1740
1735 /* Theoretically, the log could move on after we do this, but 1741 /* Theoretically, the log could move on after we do this, but
1736 there's not a lot we can do about that. The new messages 1742 there's not a lot we can do about that. The new messages
1737 will overwrite the start of what we dump. */ 1743 will overwrite the start of what we dump. */
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 77cb245f8e7b..0ab9ae8057f0 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -818,17 +818,9 @@ static int __init fixup_activate(void *addr, enum debug_obj_state state)
818 if (obj->static_init == 1) { 818 if (obj->static_init == 1) {
819 debug_object_init(obj, &descr_type_test); 819 debug_object_init(obj, &descr_type_test);
820 debug_object_activate(obj, &descr_type_test); 820 debug_object_activate(obj, &descr_type_test);
821 /* 821 return 0;
822 * Real code should return 0 here ! This is
823 * not a fixup of some bad behaviour. We
824 * merily call the debug_init function to keep
825 * track of the object.
826 */
827 return 1;
828 } else {
829 /* Real code needs to emit a warning here */
830 } 822 }
831 return 0; 823 return 1;
832 824
833 case ODEBUG_STATE_ACTIVE: 825 case ODEBUG_STATE_ACTIVE:
834 debug_object_deactivate(obj, &descr_type_test); 826 debug_object_deactivate(obj, &descr_type_test);
@@ -967,7 +959,7 @@ static void __init debug_objects_selftest(void)
967 959
968 obj.static_init = 1; 960 obj.static_init = 1;
969 debug_object_activate(&obj, &descr_type_test); 961 debug_object_activate(&obj, &descr_type_test);
970 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) 962 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
971 goto out; 963 goto out;
972 debug_object_init(&obj, &descr_type_test); 964 debug_object_init(&obj, &descr_type_test);
973 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 965 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 91d3efb25d15..8f7fc394f636 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
671 set_pmd_at(mm, haddr, pmd, entry); 671 set_pmd_at(mm, haddr, pmd, entry);
672 prepare_pmd_huge_pte(pgtable, mm); 672 prepare_pmd_huge_pte(pgtable, mm);
673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
674 mm->nr_ptes++;
674 spin_unlock(&mm->page_table_lock); 675 spin_unlock(&mm->page_table_lock);
675 } 676 }
676 677
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
789 pmd = pmd_mkold(pmd_wrprotect(pmd)); 790 pmd = pmd_mkold(pmd_wrprotect(pmd));
790 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 791 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
791 prepare_pmd_huge_pte(pgtable, dst_mm); 792 prepare_pmd_huge_pte(pgtable, dst_mm);
793 dst_mm->nr_ptes++;
792 794
793 ret = 0; 795 ret = 0;
794out_unlock: 796out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
887 } 889 }
888 kfree(pages); 890 kfree(pages);
889 891
890 mm->nr_ptes++;
891 smp_wmb(); /* make pte visible before pmd */ 892 smp_wmb(); /* make pte visible before pmd */
892 pmd_populate(mm, pmd, pgtable); 893 pmd_populate(mm, pmd, pgtable);
893 page_remove_rmap(page); 894 page_remove_rmap(page);
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1047 VM_BUG_ON(page_mapcount(page) < 0); 1048 VM_BUG_ON(page_mapcount(page) < 0);
1048 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1049 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1049 VM_BUG_ON(!PageHead(page)); 1050 VM_BUG_ON(!PageHead(page));
1051 tlb->mm->nr_ptes--;
1050 spin_unlock(&tlb->mm->page_table_lock); 1052 spin_unlock(&tlb->mm->page_table_lock);
1051 tlb_remove_page(tlb, page); 1053 tlb_remove_page(tlb, page);
1052 pte_free(tlb->mm, pgtable); 1054 pte_free(tlb->mm, pgtable);
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
1375 pte_unmap(pte); 1377 pte_unmap(pte);
1376 } 1378 }
1377 1379
1378 mm->nr_ptes++;
1379 smp_wmb(); /* make pte visible before pmd */ 1380 smp_wmb(); /* make pte visible before pmd */
1380 /* 1381 /*
1381 * Up to this point the pmd is present and huge and 1382 * Up to this point the pmd is present and huge and
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
1988 set_pmd_at(mm, address, pmd, _pmd); 1989 set_pmd_at(mm, address, pmd, _pmd);
1989 update_mmu_cache(vma, address, _pmd); 1990 update_mmu_cache(vma, address, _pmd);
1990 prepare_pmd_huge_pte(pgtable, mm); 1991 prepare_pmd_huge_pte(pgtable, mm);
1991 mm->nr_ptes--;
1992 spin_unlock(&mm->page_table_lock); 1992 spin_unlock(&mm->page_table_lock);
1993 1993
1994#ifndef CONFIG_NUMA 1994#ifndef CONFIG_NUMA
diff --git a/mm/ksm.c b/mm/ksm.c
index 1925ffbfb27f..310544a379ae 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,7 +28,6 @@
28#include <linux/kthread.h> 28#include <linux/kthread.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/memcontrol.h>
32#include <linux/rbtree.h> 31#include <linux/rbtree.h>
33#include <linux/memory.h> 32#include <linux/memory.h>
34#include <linux/mmu_notifier.h> 33#include <linux/mmu_notifier.h>
@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1572 1571
1573 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1572 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1574 if (new_page) { 1573 if (new_page) {
1575 /*
1576 * The memcg-specific accounting when moving
1577 * pages around the LRU lists relies on the
1578 * page's owner (memcg) to be valid. Usually,
1579 * pages are assigned to a new owner before
1580 * being put on the LRU list, but since this
1581 * is not the case here, the stale owner from
1582 * a previous allocation cycle must be reset.
1583 */
1584 mem_cgroup_reset_owner(new_page);
1585 copy_user_highpage(new_page, page, address, vma); 1574 copy_user_highpage(new_page, page, address, vma);
1586 1575
1587 SetPageDirty(new_page); 1576 SetPageDirty(new_page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 228d6461c12a..5585dc3d3646 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1042 1042
1043 pc = lookup_page_cgroup(page); 1043 pc = lookup_page_cgroup(page);
1044 memcg = pc->mem_cgroup; 1044 memcg = pc->mem_cgroup;
1045
1046 /*
1047 * Surreptitiously switch any uncharged page to root:
1048 * an uncharged page off lru does nothing to secure
1049 * its former mem_cgroup from sudden removal.
1050 *
1051 * Our caller holds lru_lock, and PageCgroupUsed is updated
1052 * under page_cgroup lock: between them, they make all uses
1053 * of pc->mem_cgroup safe.
1054 */
1055 if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1056 pc->mem_cgroup = memcg = root_mem_cgroup;
1057
1045 mz = page_cgroup_zoneinfo(memcg, page); 1058 mz = page_cgroup_zoneinfo(memcg, page);
1046 /* compound_order() is stabilized through lru_lock */ 1059 /* compound_order() is stabilized through lru_lock */
1047 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1060 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2408 struct page *page, 2421 struct page *page,
2409 unsigned int nr_pages, 2422 unsigned int nr_pages,
2410 struct page_cgroup *pc, 2423 struct page_cgroup *pc,
2411 enum charge_type ctype) 2424 enum charge_type ctype,
2425 bool lrucare)
2412{ 2426{
2427 struct zone *uninitialized_var(zone);
2428 bool was_on_lru = false;
2429
2413 lock_page_cgroup(pc); 2430 lock_page_cgroup(pc);
2414 if (unlikely(PageCgroupUsed(pc))) { 2431 if (unlikely(PageCgroupUsed(pc))) {
2415 unlock_page_cgroup(pc); 2432 unlock_page_cgroup(pc);
@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2420 * we don't need page_cgroup_lock about tail pages, becase they are not 2437 * we don't need page_cgroup_lock about tail pages, becase they are not
2421 * accessed by any other context at this point. 2438 * accessed by any other context at this point.
2422 */ 2439 */
2440
2441 /*
2442 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2443 * may already be on some other mem_cgroup's LRU. Take care of it.
2444 */
2445 if (lrucare) {
2446 zone = page_zone(page);
2447 spin_lock_irq(&zone->lru_lock);
2448 if (PageLRU(page)) {
2449 ClearPageLRU(page);
2450 del_page_from_lru_list(zone, page, page_lru(page));
2451 was_on_lru = true;
2452 }
2453 }
2454
2423 pc->mem_cgroup = memcg; 2455 pc->mem_cgroup = memcg;
2424 /* 2456 /*
2425 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2457 * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2443 break; 2475 break;
2444 } 2476 }
2445 2477
2478 if (lrucare) {
2479 if (was_on_lru) {
2480 VM_BUG_ON(PageLRU(page));
2481 SetPageLRU(page);
2482 add_page_to_lru_list(zone, page, page_lru(page));
2483 }
2484 spin_unlock_irq(&zone->lru_lock);
2485 }
2486
2446 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); 2487 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
2447 unlock_page_cgroup(pc); 2488 unlock_page_cgroup(pc);
2448 WARN_ON_ONCE(PageLRU(page)); 2489
2449 /* 2490 /*
2450 * "charge_statistics" updated event counter. Then, check it. 2491 * "charge_statistics" updated event counter. Then, check it.
2451 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2492 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2643 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2684 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2644 if (ret == -ENOMEM) 2685 if (ret == -ENOMEM)
2645 return ret; 2686 return ret;
2646 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); 2687 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
2647 return 0; 2688 return 0;
2648} 2689}
2649 2690
@@ -2663,35 +2704,6 @@ static void
2663__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, 2704__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2664 enum charge_type ctype); 2705 enum charge_type ctype);
2665 2706
2666static void
2667__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
2668 enum charge_type ctype)
2669{
2670 struct page_cgroup *pc = lookup_page_cgroup(page);
2671 struct zone *zone = page_zone(page);
2672 unsigned long flags;
2673 bool removed = false;
2674
2675 /*
2676 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2677 * is already on LRU. It means the page may on some other page_cgroup's
2678 * LRU. Take care of it.
2679 */
2680 spin_lock_irqsave(&zone->lru_lock, flags);
2681 if (PageLRU(page)) {
2682 del_page_from_lru_list(zone, page, page_lru(page));
2683 ClearPageLRU(page);
2684 removed = true;
2685 }
2686 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
2687 if (removed) {
2688 add_page_to_lru_list(zone, page, page_lru(page));
2689 SetPageLRU(page);
2690 }
2691 spin_unlock_irqrestore(&zone->lru_lock, flags);
2692 return;
2693}
2694
2695int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 2707int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2696 gfp_t gfp_mask) 2708 gfp_t gfp_mask)
2697{ 2709{
@@ -2769,13 +2781,16 @@ static void
2769__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 2781__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2770 enum charge_type ctype) 2782 enum charge_type ctype)
2771{ 2783{
2784 struct page_cgroup *pc;
2785
2772 if (mem_cgroup_disabled()) 2786 if (mem_cgroup_disabled())
2773 return; 2787 return;
2774 if (!memcg) 2788 if (!memcg)
2775 return; 2789 return;
2776 cgroup_exclude_rmdir(&memcg->css); 2790 cgroup_exclude_rmdir(&memcg->css);
2777 2791
2778 __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); 2792 pc = lookup_page_cgroup(page);
2793 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
2779 /* 2794 /*
2780 * Now swap is on-memory. This means this page may be 2795 * Now swap is on-memory. This means this page may be
2781 * counted both as mem and swap....double count. 2796 * counted both as mem and swap....double count.
@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
3027 batch->memcg = NULL; 3042 batch->memcg = NULL;
3028} 3043}
3029 3044
3030/*
3031 * A function for resetting pc->mem_cgroup for newly allocated pages.
3032 * This function should be called if the newpage will be added to LRU
3033 * before start accounting.
3034 */
3035void mem_cgroup_reset_owner(struct page *newpage)
3036{
3037 struct page_cgroup *pc;
3038
3039 if (mem_cgroup_disabled())
3040 return;
3041
3042 pc = lookup_page_cgroup(newpage);
3043 VM_BUG_ON(PageCgroupUsed(pc));
3044 pc->mem_cgroup = root_mem_cgroup;
3045}
3046
3047#ifdef CONFIG_SWAP 3045#ifdef CONFIG_SWAP
3048/* 3046/*
3049 * called after __delete_from_swap_cache() and drop "page" account. 3047 * called after __delete_from_swap_cache() and drop "page" account.
@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,
3248 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3246 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3249 else 3247 else
3250 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3248 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3251 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); 3249 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
3252 return ret; 3250 return ret;
3253} 3251}
3254 3252
@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
3332 * the newpage may be on LRU(or pagevec for LRU) already. We lock 3330 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3333 * LRU while we overwrite pc->mem_cgroup. 3331 * LRU while we overwrite pc->mem_cgroup.
3334 */ 3332 */
3335 __mem_cgroup_commit_charge_lrucare(newpage, memcg, type); 3333 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
3336} 3334}
3337 3335
3338#ifdef CONFIG_DEBUG_VM 3336#ifdef CONFIG_DEBUG_VM
@@ -5077,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5077 return NULL; 5075 return NULL;
5078 if (PageAnon(page)) { 5076 if (PageAnon(page)) {
5079 /* we don't move shared anon */ 5077 /* we don't move shared anon */
5080 if (!move_anon() || page_mapcount(page) > 2) 5078 if (!move_anon() || page_mapcount(page) > 1)
5081 return NULL; 5079 return NULL;
5082 } else if (!move_file()) 5080 } else if (!move_file())
5083 /* we ignore mapcount for file pages */ 5081 /* we ignore mapcount for file pages */
diff --git a/mm/migrate.c b/mm/migrate.c
index df141f60289e..1503b6b54ecb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
839 if (!newpage) 839 if (!newpage)
840 return -ENOMEM; 840 return -ENOMEM;
841 841
842 mem_cgroup_reset_owner(newpage);
843
844 if (page_count(page) == 1) { 842 if (page_count(page) == 1) {
845 /* page was freed from under us. So we are done. */ 843 /* page was freed from under us. So we are done. */
846 goto out; 844 goto out;
diff --git a/mm/swap.c b/mm/swap.c
index fff1ff7fb9ad..14380e9fbe33 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
652void lru_add_page_tail(struct zone* zone, 652void lru_add_page_tail(struct zone* zone,
653 struct page *page, struct page *page_tail) 653 struct page *page, struct page *page_tail)
654{ 654{
655 int active; 655 int uninitialized_var(active);
656 enum lru_list lru; 656 enum lru_list lru;
657 const int file = 0; 657 const int file = 0;
658 658
@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
672 active = 0; 672 active = 0;
673 lru = LRU_INACTIVE_ANON; 673 lru = LRU_INACTIVE_ANON;
674 } 674 }
675 update_page_reclaim_stat(zone, page_tail, file, active);
676 } else { 675 } else {
677 SetPageUnevictable(page_tail); 676 SetPageUnevictable(page_tail);
678 lru = LRU_UNEVICTABLE; 677 lru = LRU_UNEVICTABLE;
@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
693 list_head = page_tail->lru.prev; 692 list_head = page_tail->lru.prev;
694 list_move_tail(&page_tail->lru, list_head); 693 list_move_tail(&page_tail->lru, list_head);
695 } 694 }
695
696 if (!PageUnevictable(page))
697 update_page_reclaim_stat(zone, page_tail, file, active);
696} 698}
697#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 699#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
698 700
@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
710 SetPageLRU(page); 712 SetPageLRU(page);
711 if (active) 713 if (active)
712 SetPageActive(page); 714 SetPageActive(page);
713 update_page_reclaim_stat(zone, page, file, active);
714 add_page_to_lru_list(zone, page, lru); 715 add_page_to_lru_list(zone, page, lru);
716 update_page_reclaim_stat(zone, page, file, active);
715} 717}
716 718
717/* 719/*
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 470038a91873..ea6b32d61873 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
300 new_page = alloc_page_vma(gfp_mask, vma, addr); 300 new_page = alloc_page_vma(gfp_mask, vma, addr);
301 if (!new_page) 301 if (!new_page)
302 break; /* Out of memory */ 302 break; /* Out of memory */
303 /*
304 * The memcg-specific accounting when moving
305 * pages around the LRU lists relies on the
306 * page's owner (memcg) to be valid. Usually,
307 * pages are assigned to a new owner before
308 * being put on the LRU list, but since this
309 * is not the case here, the stale owner from
310 * a previous allocation cycle must be reset.
311 */
312 mem_cgroup_reset_owner(new_page);
313 } 303 }
314 304
315 /* 305 /*