aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLaurent Dufour <ldufour@linux.vnet.ibm.com>2015-06-24 19:56:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 20:49:41 -0400
commit83d3f0e90c6c8f833e3da91917c243a916fda69e (patch)
tree4485604af0dc0d2e3045f869465b38724f7825d2 /arch
parent4abad2ca4a4dbdd4a218c12451231ab628f2e60c (diff)
powerpc/mm: tracking vDSO remap
Some processes (CRIU) are moving the vDSO area using the mremap system call. As a consequence the kernel reference to the vDSO base address is no more valid and the signal return frame built once the vDSO has been moved is not pointing to the new sigreturn address. This patch handles vDSO remapping and unmapping. Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Reviewed-by: Ingo Molnar <mingo@kernel.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/mm-arch-hooks.h13
-rw-r--r--arch/powerpc/include/asm/mmu_context.h23
2 files changed, 35 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/mm-arch-hooks.h b/arch/powerpc/include/asm/mm-arch-hooks.h
index 63091a19de9f..f2a2da895897 100644
--- a/arch/powerpc/include/asm/mm-arch-hooks.h
+++ b/arch/powerpc/include/asm/mm-arch-hooks.h
@@ -12,4 +12,17 @@
12#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H 12#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H
13#define _ASM_POWERPC_MM_ARCH_HOOKS_H 13#define _ASM_POWERPC_MM_ARCH_HOOKS_H
14 14
15static inline void arch_remap(struct mm_struct *mm,
16 unsigned long old_start, unsigned long old_end,
17 unsigned long new_start, unsigned long new_end)
18{
19 /*
20 * mremap() doesn't allow moving multiple vmas so we can limit the
21 * check to old_start == vdso_base.
22 */
23 if (old_start == mm->context.vdso_base)
24 mm->context.vdso_base = new_start;
25}
26#define arch_remap arch_remap
27
15#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */ 28#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 3e5184210d9b..878c27771717 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -8,7 +8,6 @@
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <asm/mmu.h> 9#include <asm/mmu.h>
10#include <asm/cputable.h> 10#include <asm/cputable.h>
11#include <asm-generic/mm_hooks.h>
12#include <asm/cputhreads.h> 11#include <asm/cputhreads.h>
13 12
14/* 13/*
@@ -127,5 +126,27 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
127#endif 126#endif
128} 127}
129 128
129static inline void arch_dup_mmap(struct mm_struct *oldmm,
130 struct mm_struct *mm)
131{
132}
133
134static inline void arch_exit_mmap(struct mm_struct *mm)
135{
136}
137
138static inline void arch_unmap(struct mm_struct *mm,
139 struct vm_area_struct *vma,
140 unsigned long start, unsigned long end)
141{
142 if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
143 mm->context.vdso_base = 0;
144}
145
146static inline void arch_bprm_mm_init(struct mm_struct *mm,
147 struct vm_area_struct *vma)
148{
149}
150
130#endif /* __KERNEL__ */ 151#endif /* __KERNEL__ */
131#endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 152#endif /* __ASM_POWERPC_MMU_CONTEXT_H */