aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 17:24:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 17:24:20 -0500
commit3100e448e7d74489a96cb7b45d88fe6962774eaa (patch)
tree53e46a702bd191ca43639b560d2bb1d3b0ad18c8 /arch/x86/mm
parentc9f861c77269bc9950c16c6404a9476062241671 (diff)
parent26893107aa717cd11010f0c278d02535defa1ac9 (diff)
Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 vdso updates from Ingo Molnar: "Various vDSO updates from Andy Lutomirski, mostly cleanups and reorganization to improve maintainability, but also some micro-optimizations and robustization changes" * 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86_64/vsyscall: Restore orig_ax after vsyscall seccomp x86_64: Add a comment explaining the TASK_SIZE_MAX guard page x86_64,vsyscall: Make vsyscall emulation configurable x86_64, vsyscall: Rewrite comment and clean up headers in vsyscall code x86_64, vsyscall: Turn vsyscalls all the way off when vsyscall==none x86,vdso: Use LSL unconditionally for vgetcpu x86: vdso: Fix build with older gcc x86_64/vdso: Clean up vgetcpu init and merge the vdso initcalls x86_64/vdso: Remove jiffies from the vvar page x86/vdso: Make the PER_CPU segment 32 bits x86/vdso: Make the PER_CPU segment start out accessed x86/vdso: Change the PER_CPU segment to use struct desc_struct x86_64/vdso: Move getcpu code from vsyscall_64.c to vdso/vma.c x86_64/vsyscall: Move all of the gate_area code to vsyscall_64.c
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c49
1 files changed, 0 insertions, 49 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 78e53c80fc12..30eb05ae7061 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1204,55 +1204,6 @@ int kern_addr_valid(unsigned long addr)
1204 return pfn_valid(pte_pfn(*pte)); 1204 return pfn_valid(pte_pfn(*pte));
1205} 1205}
1206 1206
1207/*
1208 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
1209 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1210 * not need special handling anymore:
1211 */
1212static const char *gate_vma_name(struct vm_area_struct *vma)
1213{
1214 return "[vsyscall]";
1215}
1216static struct vm_operations_struct gate_vma_ops = {
1217 .name = gate_vma_name,
1218};
1219static struct vm_area_struct gate_vma = {
1220 .vm_start = VSYSCALL_ADDR,
1221 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
1222 .vm_page_prot = PAGE_READONLY_EXEC,
1223 .vm_flags = VM_READ | VM_EXEC,
1224 .vm_ops = &gate_vma_ops,
1225};
1226
1227struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1228{
1229#ifdef CONFIG_IA32_EMULATION
1230 if (!mm || mm->context.ia32_compat)
1231 return NULL;
1232#endif
1233 return &gate_vma;
1234}
1235
1236int in_gate_area(struct mm_struct *mm, unsigned long addr)
1237{
1238 struct vm_area_struct *vma = get_gate_vma(mm);
1239
1240 if (!vma)
1241 return 0;
1242
1243 return (addr >= vma->vm_start) && (addr < vma->vm_end);
1244}
1245
1246/*
1247 * Use this when you have no reliable mm, typically from interrupt
1248 * context. It is less reliable than using a task's mm and may give
1249 * false positives.
1250 */
1251int in_gate_area_no_mm(unsigned long addr)
1252{
1253 return (addr & PAGE_MASK) == VSYSCALL_ADDR;
1254}
1255
1256static unsigned long probe_memory_block_size(void) 1207static unsigned long probe_memory_block_size(void)
1257{ 1208{
1258 /* start from 2g */ 1209 /* start from 2g */