aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 15:26:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 15:26:43 -0400
commitc6f21243ce1e8d81ad8361da4d2eaa5947b667c4 (patch)
tree5157ca1de2e7a5371575fff92ce23f0d09e3e7ea /mm
parent9447dc43941cd1c006cae934984524b7c957b803 (diff)
parent37c975545ec63320789962bf307f000f08fabd48 (diff)
Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 vdso changes from Peter Anvin: "This is the revamp of the 32-bit vdso and the associated cleanups. This adds timekeeping support to the 32-bit vdso that we already have in the 64-bit vdso. Although 32-bit x86 is legacy, it is likely to remain in the embedded space for a very long time to come. This removes the traditional COMPAT_VDSO support; the configuration variable is reused for simply removing the 32-bit vdso, which will produce correct results but obviously suffer a performance penalty. Only one beta version of glibc was affected, but that version was unfortunately included in one OpenSUSE release. This is not the end of the vdso cleanups. Stefani and Andy have agreed to continue work for the next kernel cycle; in fact Andy has already produced another set of cleanups that came too late for this cycle. An incidental, but arguably important, change is that this ensures that unused space in the VVAR page is properly zeroed. It wasn't before, and would contain whatever garbage was left in memory by BIOS or the bootloader. Since the VVAR page is accessible to user space this had the potential of information leaks" * 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) x86, vdso: Fix the symbol versions on the 32-bit vDSO x86, vdso, build: Don't rebuild 32-bit vdsos on every make x86, vdso: Actually discard the .discard sections x86, vdso: Fix size of get_unmapped_area() x86, vdso: Finish removing VDSO32_PRELINK x86, vdso: Move more vdso definitions into vdso.h x86: Load the 32-bit vdso in place, just like the 64-bit vdsos x86, vdso32: handle 32 bit vDSO larger one page x86, vdso32: Disable stack protector, adjust optimizations x86, vdso: Zero-pad the VVAR page x86, vdso: Add 32 bit VDSO time support for 64 bit kernel x86, vdso: Add 32 bit VDSO time support for 32 bit kernel x86, vdso: Patch alternatives in the 32-bit VDSO x86, vdso: Introduce VVAR marco for vdso32 x86, vdso: Cleanup __vdso_gettimeofday() x86, vdso: Replace VVAR(vsyscall_gtod_data) by gtod macro x86, vdso: __vdso_clock_gettime() cleanup x86, vdso: Revamp vclock_gettime.c mm: Add new func _install_special_mapping() to mmap.c x86, vdso: Make vsyscall_gtod_data handling x86 generic ...
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 20ff0c33274c..81ba54ff96c7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2918,7 +2918,7 @@ static const struct vm_operations_struct special_mapping_vmops = {
2918 * The array pointer and the pages it points to are assumed to stay alive 2918 * The array pointer and the pages it points to are assumed to stay alive
2919 * for as long as this mapping might exist. 2919 * for as long as this mapping might exist.
2920 */ 2920 */
2921int install_special_mapping(struct mm_struct *mm, 2921struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2922 unsigned long addr, unsigned long len, 2922 unsigned long addr, unsigned long len,
2923 unsigned long vm_flags, struct page **pages) 2923 unsigned long vm_flags, struct page **pages)
2924{ 2924{
@@ -2927,7 +2927,7 @@ int install_special_mapping(struct mm_struct *mm,
2927 2927
2928 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 2928 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2929 if (unlikely(vma == NULL)) 2929 if (unlikely(vma == NULL))
2930 return -ENOMEM; 2930 return ERR_PTR(-ENOMEM);
2931 2931
2932 INIT_LIST_HEAD(&vma->anon_vma_chain); 2932 INIT_LIST_HEAD(&vma->anon_vma_chain);
2933 vma->vm_mm = mm; 2933 vma->vm_mm = mm;
@@ -2948,11 +2948,23 @@ int install_special_mapping(struct mm_struct *mm,
2948 2948
2949 perf_event_mmap(vma); 2949 perf_event_mmap(vma);
2950 2950
2951 return 0; 2951 return vma;
2952 2952
2953out: 2953out:
2954 kmem_cache_free(vm_area_cachep, vma); 2954 kmem_cache_free(vm_area_cachep, vma);
2955 return ret; 2955 return ERR_PTR(ret);
2956}
2957
2958int install_special_mapping(struct mm_struct *mm,
2959 unsigned long addr, unsigned long len,
2960 unsigned long vm_flags, struct page **pages)
2961{
2962 struct vm_area_struct *vma = _install_special_mapping(mm,
2963 addr, len, vm_flags, pages);
2964
2965 if (IS_ERR(vma))
2966 return PTR_ERR(vma);
2967 return 0;
2956} 2968}
2957 2969
2958static DEFINE_MUTEX(mm_all_locks_mutex); 2970static DEFINE_MUTEX(mm_all_locks_mutex);