aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/page.h3
-rw-r--r--arch/arm64/kernel/vdso.c19
-rw-r--r--arch/ia64/include/asm/page.h2
-rw-r--r--arch/ia64/mm/init.c31
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/kernel/vdso.c16
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/kernel/vdso.c15
-rw-r--r--arch/sh/include/asm/page.h5
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c15
-rw-r--r--arch/tile/include/asm/page.h6
-rw-r--r--arch/tile/kernel/vdso.c15
-rw-r--r--arch/um/include/asm/page.h5
-rw-r--r--arch/x86/include/asm/page.h1
-rw-r--r--arch/x86/include/asm/page_64.h2
-rw-r--r--arch/x86/um/asm/elf.h1
-rw-r--r--arch/x86/um/mem_64.c15
-rw-r--r--arch/x86/vdso/vdso32-setup.c19
-rw-r--r--include/linux/mm.h17
-rw-r--r--mm/memory.c38
-rw-r--r--mm/nommu.c5
21 files changed, 53 insertions, 182 deletions
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 7a3f462133b0..22b16232bd60 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -28,9 +28,6 @@
28#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 28#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
29#define PAGE_MASK (~(PAGE_SIZE-1)) 29#define PAGE_MASK (~(PAGE_SIZE-1))
30 30
31/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
32#define __HAVE_ARCH_GATE_AREA 1
33
34/* 31/*
35 * The idmap and swapper page tables need some space reserved in the kernel 32 * The idmap and swapper page tables need some space reserved in the kernel
36 * image. Both require pgd, pud (4 levels only) and pmd tables to (section) 33 * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a81a446a5786..32aeea083d93 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -195,25 +195,6 @@ up_fail:
195} 195}
196 196
197/* 197/*
198 * We define AT_SYSINFO_EHDR, so we need these function stubs to keep
199 * Linux happy.
200 */
201int in_gate_area_no_mm(unsigned long addr)
202{
203 return 0;
204}
205
206int in_gate_area(struct mm_struct *mm, unsigned long addr)
207{
208 return 0;
209}
210
211struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
212{
213 return NULL;
214}
215
216/*
217 * Update the vDSO data page to keep in sync with kernel timekeeping. 198 * Update the vDSO data page to keep in sync with kernel timekeeping.
218 */ 199 */
219void update_vsyscall(struct timekeeper *tk) 200void update_vsyscall(struct timekeeper *tk)
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index f1e1b2e3cdb3..1f1bf144fe62 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -231,4 +231,6 @@ get_order (unsigned long size)
231#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 231#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
232#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) 232#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
233 233
234#define __HAVE_ARCH_GATE_AREA 1
235
234#endif /* _ASM_IA64_PAGE_H */ 236#endif /* _ASM_IA64_PAGE_H */
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 892d43e32f3b..6b3345758d3e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -278,6 +278,37 @@ setup_gate (void)
278 ia64_patch_gate(); 278 ia64_patch_gate();
279} 279}
280 280
281static struct vm_area_struct gate_vma;
282
283static int __init gate_vma_init(void)
284{
285 gate_vma.vm_mm = NULL;
286 gate_vma.vm_start = FIXADDR_USER_START;
287 gate_vma.vm_end = FIXADDR_USER_END;
288 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
289 gate_vma.vm_page_prot = __P101;
290
291 return 0;
292}
293__initcall(gate_vma_init);
294
295struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
296{
297 return &gate_vma;
298}
299
300int in_gate_area_no_mm(unsigned long addr)
301{
302 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
303 return 1;
304 return 0;
305}
306
307int in_gate_area(struct mm_struct *mm, unsigned long addr)
308{
309 return in_gate_area_no_mm(addr);
310}
311
281void ia64_mmu_init(void *my_cpu_data) 312void ia64_mmu_init(void *my_cpu_data)
282{ 313{
283 unsigned long pta, impl_va_bits; 314 unsigned long pta, impl_va_bits;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 32e4e212b9c1..26fe1ae15212 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -48,9 +48,6 @@ extern unsigned int HPAGE_SHIFT;
48#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) 48#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
49#endif 49#endif
50 50
51/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
52#define __HAVE_ARCH_GATE_AREA 1
53
54/* 51/*
55 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we 52 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
56 * assign PAGE_MASK to a larger type it gets extended the way we want 53 * assign PAGE_MASK to a larger type it gets extended the way we want
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index ce74c335a6a4..f174351842cf 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -840,19 +840,3 @@ static int __init vdso_init(void)
840 return 0; 840 return 0;
841} 841}
842arch_initcall(vdso_init); 842arch_initcall(vdso_init);
843
844int in_gate_area_no_mm(unsigned long addr)
845{
846 return 0;
847}
848
849int in_gate_area(struct mm_struct *mm, unsigned long addr)
850{
851 return 0;
852}
853
854struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
855{
856 return NULL;
857}
858
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 114258eeaacd..7b2ac6e44166 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -162,6 +162,4 @@ static inline int devmem_is_allowed(unsigned long pfn)
162#include <asm-generic/memory_model.h> 162#include <asm-generic/memory_model.h>
163#include <asm-generic/getorder.h> 163#include <asm-generic/getorder.h>
164 164
165#define __HAVE_ARCH_GATE_AREA 1
166
167#endif /* _S390_PAGE_H */ 165#endif /* _S390_PAGE_H */
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 613649096783..0bbb7e027c5a 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -316,18 +316,3 @@ static int __init vdso_init(void)
316 return 0; 316 return 0;
317} 317}
318early_initcall(vdso_init); 318early_initcall(vdso_init);
319
320int in_gate_area_no_mm(unsigned long addr)
321{
322 return 0;
323}
324
325int in_gate_area(struct mm_struct *mm, unsigned long addr)
326{
327 return 0;
328}
329
330struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
331{
332 return NULL;
333}
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 15d970328f71..fe20d14ae051 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -186,11 +186,6 @@ typedef struct page *pgtable_t;
186#include <asm-generic/memory_model.h> 186#include <asm-generic/memory_model.h>
187#include <asm-generic/getorder.h> 187#include <asm-generic/getorder.h>
188 188
189/* vDSO support */
190#ifdef CONFIG_VSYSCALL
191#define __HAVE_ARCH_GATE_AREA
192#endif
193
194/* 189/*
195 * Some drivers need to perform DMA into kmalloc'ed buffers 190 * Some drivers need to perform DMA into kmalloc'ed buffers
196 * and so we have to increase the kmalloc minalign for this. 191 * and so we have to increase the kmalloc minalign for this.
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 5ca579720a09..ea2aa1393b87 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -92,18 +92,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
92 92
93 return NULL; 93 return NULL;
94} 94}
95
96struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
97{
98 return NULL;
99}
100
101int in_gate_area(struct mm_struct *mm, unsigned long address)
102{
103 return 0;
104}
105
106int in_gate_area_no_mm(unsigned long address)
107{
108 return 0;
109}
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 672768008618..a213a8d84a95 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -39,12 +39,6 @@
39#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 39#define HPAGE_MASK (~(HPAGE_SIZE - 1))
40 40
41/* 41/*
42 * We do define AT_SYSINFO_EHDR to support vDSO,
43 * but don't use the gate mechanism.
44 */
45#define __HAVE_ARCH_GATE_AREA 1
46
47/*
48 * If the Kconfig doesn't specify, set a maximum zone order that 42 * If the Kconfig doesn't specify, set a maximum zone order that
49 * is enough so that we can create huge pages from small pages given 43 * is enough so that we can create huge pages from small pages given
50 * the respective sizes of the two page types. See <linux/mmzone.h>. 44 * the respective sizes of the two page types. See <linux/mmzone.h>.
diff --git a/arch/tile/kernel/vdso.c b/arch/tile/kernel/vdso.c
index 1533af24106e..5bc51d7dfdcb 100644
--- a/arch/tile/kernel/vdso.c
+++ b/arch/tile/kernel/vdso.c
@@ -121,21 +121,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
121 return NULL; 121 return NULL;
122} 122}
123 123
124struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
125{
126 return NULL;
127}
128
129int in_gate_area(struct mm_struct *mm, unsigned long address)
130{
131 return 0;
132}
133
134int in_gate_area_no_mm(unsigned long address)
135{
136 return 0;
137}
138
139int setup_vdso_pages(void) 124int setup_vdso_pages(void)
140{ 125{
141 struct page **pagelist; 126 struct page **pagelist;
diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
index 5ff53d9185f7..71c5d132062a 100644
--- a/arch/um/include/asm/page.h
+++ b/arch/um/include/asm/page.h
@@ -119,4 +119,9 @@ extern unsigned long uml_physmem;
119#include <asm-generic/getorder.h> 119#include <asm-generic/getorder.h>
120 120
121#endif /* __ASSEMBLY__ */ 121#endif /* __ASSEMBLY__ */
122
123#ifdef CONFIG_X86_32
124#define __HAVE_ARCH_GATE_AREA 1
125#endif
126
122#endif /* __UM_PAGE_H */ 127#endif /* __UM_PAGE_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 775873d3be55..802dde30c928 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -70,7 +70,6 @@ extern bool __virt_addr_valid(unsigned long kaddr);
70#include <asm-generic/memory_model.h> 70#include <asm-generic/memory_model.h>
71#include <asm-generic/getorder.h> 71#include <asm-generic/getorder.h>
72 72
73#define __HAVE_ARCH_GATE_AREA 1
74#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 73#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
75 74
76#endif /* __KERNEL__ */ 75#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 0f1ddee6a0ce..f408caf73430 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -39,4 +39,6 @@ void copy_page(void *to, void *from);
39 39
40#endif /* !__ASSEMBLY__ */ 40#endif /* !__ASSEMBLY__ */
41 41
42#define __HAVE_ARCH_GATE_AREA 1
43
42#endif /* _ASM_X86_PAGE_64_H */ 44#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h
index 0feee2fd5077..25a1022dd793 100644
--- a/arch/x86/um/asm/elf.h
+++ b/arch/x86/um/asm/elf.h
@@ -216,6 +216,5 @@ extern long elf_aux_hwcap;
216#define ELF_HWCAP (elf_aux_hwcap) 216#define ELF_HWCAP (elf_aux_hwcap)
217 217
218#define SET_PERSONALITY(ex) do ; while(0) 218#define SET_PERSONALITY(ex) do ; while(0)
219#define __HAVE_ARCH_GATE_AREA 1
220 219
221#endif 220#endif
diff --git a/arch/x86/um/mem_64.c b/arch/x86/um/mem_64.c
index c6492e75797b..f8fecaddcc0d 100644
--- a/arch/x86/um/mem_64.c
+++ b/arch/x86/um/mem_64.c
@@ -9,18 +9,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
9 9
10 return NULL; 10 return NULL;
11} 11}
12
13struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
14{
15 return NULL;
16}
17
18int in_gate_area(struct mm_struct *mm, unsigned long addr)
19{
20 return 0;
21}
22
23int in_gate_area_no_mm(unsigned long addr)
24{
25 return 0;
26}
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index e4f7781ee162..e904c270573b 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -115,23 +115,6 @@ static __init int ia32_binfmt_init(void)
115 return 0; 115 return 0;
116} 116}
117__initcall(ia32_binfmt_init); 117__initcall(ia32_binfmt_init);
118#endif 118#endif /* CONFIG_SYSCTL */
119
120#else /* CONFIG_X86_32 */
121
122struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
123{
124 return NULL;
125}
126
127int in_gate_area(struct mm_struct *mm, unsigned long addr)
128{
129 return 0;
130}
131
132int in_gate_area_no_mm(unsigned long addr)
133{
134 return 0;
135}
136 119
137#endif /* CONFIG_X86_64 */ 120#endif /* CONFIG_X86_64 */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e03dd29145a0..8981cc882ed2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2014,13 +2014,20 @@ static inline bool kernel_page_present(struct page *page) { return true; }
2014#endif /* CONFIG_HIBERNATION */ 2014#endif /* CONFIG_HIBERNATION */
2015#endif 2015#endif
2016 2016
2017#ifdef __HAVE_ARCH_GATE_AREA
2017extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2018extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2018#ifdef __HAVE_ARCH_GATE_AREA 2019extern int in_gate_area_no_mm(unsigned long addr);
2019int in_gate_area_no_mm(unsigned long addr); 2020extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2020int in_gate_area(struct mm_struct *mm, unsigned long addr);
2021#else 2021#else
2022int in_gate_area_no_mm(unsigned long addr); 2022static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2023#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);}) 2023{
2024 return NULL;
2025}
2026static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2027static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2028{
2029 return 0;
2030}
2024#endif /* __HAVE_ARCH_GATE_AREA */ 2031#endif /* __HAVE_ARCH_GATE_AREA */
2025 2032
2026#ifdef CONFIG_SYSCTL 2033#ifdef CONFIG_SYSCTL
diff --git a/mm/memory.c b/mm/memory.c
index 2a899e4e82ba..ab3537bcfed2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3430,44 +3430,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3430} 3430}
3431#endif /* __PAGETABLE_PMD_FOLDED */ 3431#endif /* __PAGETABLE_PMD_FOLDED */
3432 3432
3433#if !defined(__HAVE_ARCH_GATE_AREA)
3434
3435#if defined(AT_SYSINFO_EHDR)
3436static struct vm_area_struct gate_vma;
3437
3438static int __init gate_vma_init(void)
3439{
3440 gate_vma.vm_mm = NULL;
3441 gate_vma.vm_start = FIXADDR_USER_START;
3442 gate_vma.vm_end = FIXADDR_USER_END;
3443 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
3444 gate_vma.vm_page_prot = __P101;
3445
3446 return 0;
3447}
3448__initcall(gate_vma_init);
3449#endif
3450
3451struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3452{
3453#ifdef AT_SYSINFO_EHDR
3454 return &gate_vma;
3455#else
3456 return NULL;
3457#endif
3458}
3459
3460int in_gate_area_no_mm(unsigned long addr)
3461{
3462#ifdef AT_SYSINFO_EHDR
3463 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
3464 return 1;
3465#endif
3466 return 0;
3467}
3468
3469#endif /* __HAVE_ARCH_GATE_AREA */
3470
3471static int __follow_pte(struct mm_struct *mm, unsigned long address, 3433static int __follow_pte(struct mm_struct *mm, unsigned long address,
3472 pte_t **ptepp, spinlock_t **ptlp) 3434 pte_t **ptepp, spinlock_t **ptlp)
3473{ 3435{
diff --git a/mm/nommu.c b/mm/nommu.c
index 4a852f6c5709..a881d9673c6b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1981,11 +1981,6 @@ error:
1981 return -ENOMEM; 1981 return -ENOMEM;
1982} 1982}
1983 1983
1984int in_gate_area_no_mm(unsigned long addr)
1985{
1986 return 0;
1987}
1988
1989int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1984int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1990{ 1985{
1991 BUG(); 1986 BUG();