aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-05 11:05:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-05 11:05:29 -0400
commita0abcf2e8f8017051830f738ac1bf5ef42703243 (patch)
treeef6ff14b5eb9cf14cd135c0f0f09fa0944192ef0 /mm
parent2071b3e34fd33e496ebd7b90331ac5b3b0ac3b81 (diff)
parentc191920f737a09a7252088f018f6747f0d2f484d (diff)
Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 cdso updates from Peter Anvin: "Vdso cleanups and improvements largely from Andy Lutomirski. This makes the vdso a lot less ''special''" * 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso, build: Make LE access macros clearer, host-safe x86/vdso, build: Fix cross-compilation from big-endian architectures x86/vdso, build: When vdso2c fails, unlink the output x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls x86, mm: Improve _install_special_mapping and fix x86 vdso naming mm, fs: Add vm_ops->name as an alternative to arch_vma_name x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO x86, vdso: Move the 32-bit vdso special pages after the text x86, vdso: Reimplement vdso.so preparation in build-time C x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c x86, vdso: Clean up 32-bit vs 64-bit vdso params x86, mm: Ensure correct alignment of the fixmap
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c89
1 files changed, 60 insertions, 29 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 8a56d39df4ed..ced5efcdd4b6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2871,6 +2871,31 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2871 return 1; 2871 return 1;
2872} 2872}
2873 2873
2874static int special_mapping_fault(struct vm_area_struct *vma,
2875 struct vm_fault *vmf);
2876
2877/*
2878 * Having a close hook prevents vma merging regardless of flags.
2879 */
2880static void special_mapping_close(struct vm_area_struct *vma)
2881{
2882}
2883
2884static const char *special_mapping_name(struct vm_area_struct *vma)
2885{
2886 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
2887}
2888
2889static const struct vm_operations_struct special_mapping_vmops = {
2890 .close = special_mapping_close,
2891 .fault = special_mapping_fault,
2892 .name = special_mapping_name,
2893};
2894
2895static const struct vm_operations_struct legacy_special_mapping_vmops = {
2896 .close = special_mapping_close,
2897 .fault = special_mapping_fault,
2898};
2874 2899
2875static int special_mapping_fault(struct vm_area_struct *vma, 2900static int special_mapping_fault(struct vm_area_struct *vma,
2876 struct vm_fault *vmf) 2901 struct vm_fault *vmf)
@@ -2886,7 +2911,13 @@ static int special_mapping_fault(struct vm_area_struct *vma,
2886 */ 2911 */
2887 pgoff = vmf->pgoff - vma->vm_pgoff; 2912 pgoff = vmf->pgoff - vma->vm_pgoff;
2888 2913
2889 for (pages = vma->vm_private_data; pgoff && *pages; ++pages) 2914 if (vma->vm_ops == &legacy_special_mapping_vmops)
2915 pages = vma->vm_private_data;
2916 else
2917 pages = ((struct vm_special_mapping *)vma->vm_private_data)->
2918 pages;
2919
2920 for (; pgoff && *pages; ++pages)
2890 pgoff--; 2921 pgoff--;
2891 2922
2892 if (*pages) { 2923 if (*pages) {
@@ -2899,30 +2930,11 @@ static int special_mapping_fault(struct vm_area_struct *vma,
2899 return VM_FAULT_SIGBUS; 2930 return VM_FAULT_SIGBUS;
2900} 2931}
2901 2932
2902/* 2933static struct vm_area_struct *__install_special_mapping(
2903 * Having a close hook prevents vma merging regardless of flags. 2934 struct mm_struct *mm,
2904 */ 2935 unsigned long addr, unsigned long len,
2905static void special_mapping_close(struct vm_area_struct *vma) 2936 unsigned long vm_flags, const struct vm_operations_struct *ops,
2906{ 2937 void *priv)
2907}
2908
2909static const struct vm_operations_struct special_mapping_vmops = {
2910 .close = special_mapping_close,
2911 .fault = special_mapping_fault,
2912};
2913
2914/*
2915 * Called with mm->mmap_sem held for writing.
2916 * Insert a new vma covering the given region, with the given flags.
2917 * Its pages are supplied by the given array of struct page *.
2918 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2919 * The region past the last page supplied will always produce SIGBUS.
2920 * The array pointer and the pages it points to are assumed to stay alive
2921 * for as long as this mapping might exist.
2922 */
2923struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2924 unsigned long addr, unsigned long len,
2925 unsigned long vm_flags, struct page **pages)
2926{ 2938{
2927 int ret; 2939 int ret;
2928 struct vm_area_struct *vma; 2940 struct vm_area_struct *vma;
@@ -2939,8 +2951,8 @@ struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2939 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; 2951 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
2940 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2952 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2941 2953
2942 vma->vm_ops = &special_mapping_vmops; 2954 vma->vm_ops = ops;
2943 vma->vm_private_data = pages; 2955 vma->vm_private_data = priv;
2944 2956
2945 ret = insert_vm_struct(mm, vma); 2957 ret = insert_vm_struct(mm, vma);
2946 if (ret) 2958 if (ret)
@@ -2957,12 +2969,31 @@ out:
2957 return ERR_PTR(ret); 2969 return ERR_PTR(ret);
2958} 2970}
2959 2971
2972/*
2973 * Called with mm->mmap_sem held for writing.
2974 * Insert a new vma covering the given region, with the given flags.
2975 * Its pages are supplied by the given array of struct page *.
2976 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2977 * The region past the last page supplied will always produce SIGBUS.
2978 * The array pointer and the pages it points to are assumed to stay alive
2979 * for as long as this mapping might exist.
2980 */
2981struct vm_area_struct *_install_special_mapping(
2982 struct mm_struct *mm,
2983 unsigned long addr, unsigned long len,
2984 unsigned long vm_flags, const struct vm_special_mapping *spec)
2985{
2986 return __install_special_mapping(mm, addr, len, vm_flags,
2987 &special_mapping_vmops, (void *)spec);
2988}
2989
2960int install_special_mapping(struct mm_struct *mm, 2990int install_special_mapping(struct mm_struct *mm,
2961 unsigned long addr, unsigned long len, 2991 unsigned long addr, unsigned long len,
2962 unsigned long vm_flags, struct page **pages) 2992 unsigned long vm_flags, struct page **pages)
2963{ 2993{
2964 struct vm_area_struct *vma = _install_special_mapping(mm, 2994 struct vm_area_struct *vma = __install_special_mapping(
2965 addr, len, vm_flags, pages); 2995 mm, addr, len, vm_flags, &legacy_special_mapping_vmops,
2996 (void *)pages);
2966 2997
2967 return PTR_ERR_OR_ZERO(vma); 2998 return PTR_ERR_OR_ZERO(vma);
2968} 2999}