aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@linaro.org>2013-04-29 09:29:48 -0400
committerSteve Capper <steve.capper@linaro.org>2013-06-14 04:39:46 -0400
commitcfe28c5d63d86b558a1bf1990db7a0aa55b2dec9 (patch)
treed615c2bdbe4ec24514e8f658976d06d340c1101c /arch
parent3212b535f200c85b5a67cbfaea18431da71b5c72 (diff)
x86: mm: Remove x86 version of huge_pmd_share.
The huge_pmd_share code has been copied over to mm/hugetlb.c to make it accessible to other architectures. Remove the x86 copy of the huge_pmd_share code and enable the ARCH_WANT_HUGE_PMD_SHARE config flag. That way we reference the general one. Signed-off-by: Steve Capper <steve.capper@linaro.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/mm/hugetlbpage.c120
2 files changed, 3 insertions, 120 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 685692c94f05..476f786c11e9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -207,6 +207,9 @@ config ARCH_HIBERNATION_POSSIBLE
207config ARCH_SUSPEND_POSSIBLE 207config ARCH_SUSPEND_POSSIBLE
208 def_bool y 208 def_bool y
209 209
210config ARCH_WANT_HUGE_PMD_SHARE
211 def_bool y
212
210config ZONE_DMA32 213config ZONE_DMA32
211 bool 214 bool
212 default X86_64 215 default X86_64
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index ae1aa71d0115..7e522a359972 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -16,126 +16,6 @@
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18 18
19static unsigned long page_table_shareable(struct vm_area_struct *svma,
20 struct vm_area_struct *vma,
21 unsigned long addr, pgoff_t idx)
22{
23 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
24 svma->vm_start;
25 unsigned long sbase = saddr & PUD_MASK;
26 unsigned long s_end = sbase + PUD_SIZE;
27
28 /* Allow segments to share if only one is marked locked */
29 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
30 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
31
32 /*
33 * match the virtual addresses, permission and the alignment of the
34 * page table page.
35 */
36 if (pmd_index(addr) != pmd_index(saddr) ||
37 vm_flags != svm_flags ||
38 sbase < svma->vm_start || svma->vm_end < s_end)
39 return 0;
40
41 return saddr;
42}
43
44static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
45{
46 unsigned long base = addr & PUD_MASK;
47 unsigned long end = base + PUD_SIZE;
48
49 /*
50 * check on proper vm_flags and page table alignment
51 */
52 if (vma->vm_flags & VM_MAYSHARE &&
53 vma->vm_start <= base && end <= vma->vm_end)
54 return 1;
55 return 0;
56}
57
58/*
59 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
60 * and returns the corresponding pte. While this is not necessary for the
61 * !shared pmd case because we can allocate the pmd later as well, it makes the
62 * code much cleaner. pmd allocation is essential for the shared case because
63 * pud has to be populated inside the same i_mmap_mutex section - otherwise
64 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
65 * bad pmd for sharing.
66 */
67static pte_t *
68huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
69{
70 struct vm_area_struct *vma = find_vma(mm, addr);
71 struct address_space *mapping = vma->vm_file->f_mapping;
72 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
73 vma->vm_pgoff;
74 struct vm_area_struct *svma;
75 unsigned long saddr;
76 pte_t *spte = NULL;
77 pte_t *pte;
78
79 if (!vma_shareable(vma, addr))
80 return (pte_t *)pmd_alloc(mm, pud, addr);
81
82 mutex_lock(&mapping->i_mmap_mutex);
83 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
84 if (svma == vma)
85 continue;
86
87 saddr = page_table_shareable(svma, vma, addr, idx);
88 if (saddr) {
89 spte = huge_pte_offset(svma->vm_mm, saddr);
90 if (spte) {
91 get_page(virt_to_page(spte));
92 break;
93 }
94 }
95 }
96
97 if (!spte)
98 goto out;
99
100 spin_lock(&mm->page_table_lock);
101 if (pud_none(*pud))
102 pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
103 else
104 put_page(virt_to_page(spte));
105 spin_unlock(&mm->page_table_lock);
106out:
107 pte = (pte_t *)pmd_alloc(mm, pud, addr);
108 mutex_unlock(&mapping->i_mmap_mutex);
109 return pte;
110}
111
112/*
113 * unmap huge page backed by shared pte.
114 *
115 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
116 * indicated by page_count > 1, unmap is achieved by clearing pud and
117 * decrementing the ref count. If count == 1, the pte page is not shared.
118 *
119 * called with vma->vm_mm->page_table_lock held.
120 *
121 * returns: 1 successfully unmapped a shared pte page
122 * 0 the underlying pte page is not shared, or it is the last user
123 */
124int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
125{
126 pgd_t *pgd = pgd_offset(mm, *addr);
127 pud_t *pud = pud_offset(pgd, *addr);
128
129 BUG_ON(page_count(virt_to_page(ptep)) == 0);
130 if (page_count(virt_to_page(ptep)) == 1)
131 return 0;
132
133 pud_clear(pud);
134 put_page(virt_to_page(ptep));
135 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
136 return 1;
137}
138
139pte_t *huge_pte_alloc(struct mm_struct *mm, 19pte_t *huge_pte_alloc(struct mm_struct *mm,
140 unsigned long addr, unsigned long sz) 20 unsigned long addr, unsigned long sz)
141{ 21{