aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2013-06-20 05:00:25 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-06-21 02:01:57 -0400
commitd8e355a20f9dd45deea4c33db649dda59bdbd293 (patch)
tree1a073713fca82f117006b03efed2592fa24946d5 /arch/powerpc/mm
parenta00e7bea0dde6a44b9bbe84f30b731d9ec73858b (diff)
powerpc: split hugepage when using subpage protection
We find all the overlapping vma and mark them such that we don't allocate hugepage in that range. Also we split existing huge page so that the normal page hash can be invalidated and new page faulted in with new protection bits. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/subpage-prot.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 7c415ddde948..aa74acb0fdfc 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -130,6 +130,53 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
130 up_write(&mm->mmap_sem); 130 up_write(&mm->mmap_sem);
131} 131}
132 132
133#ifdef CONFIG_TRANSPARENT_HUGEPAGE
134static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
135 unsigned long end, struct mm_walk *walk)
136{
137 struct vm_area_struct *vma = walk->private;
138 split_huge_page_pmd(vma, addr, pmd);
139 return 0;
140}
141
142static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
143 unsigned long len)
144{
145 struct vm_area_struct *vma;
146 struct mm_walk subpage_proto_walk = {
147 .mm = mm,
148 .pmd_entry = subpage_walk_pmd_entry,
149 };
150
151 /*
152 * We don't try too hard, we just mark all the vma in that range
153 * VM_NOHUGEPAGE and split them.
154 */
155 vma = find_vma(mm, addr);
156 /*
157 * If the range is in unmapped range, just return
158 */
159 if (vma && ((addr + len) <= vma->vm_start))
160 return;
161
162 while (vma) {
163 if (vma->vm_start >= (addr + len))
164 break;
165 vma->vm_flags |= VM_NOHUGEPAGE;
166 subpage_proto_walk.private = vma;
167 walk_page_range(vma->vm_start, vma->vm_end,
168 &subpage_proto_walk);
169 vma = vma->vm_next;
170 }
171}
172#else
173static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
174 unsigned long len)
175{
176 return;
177}
178#endif
179
133/* 180/*
134 * Copy in a subpage protection map for an address range. 181 * Copy in a subpage protection map for an address range.
135 * The map has 2 bits per 4k subpage, so 32 bits per 64k page. 182 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
@@ -168,6 +215,7 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
168 return -EFAULT; 215 return -EFAULT;
169 216
170 down_write(&mm->mmap_sem); 217 down_write(&mm->mmap_sem);
218 subpage_mark_vma_nohuge(mm, addr, len);
171 for (limit = addr + len; addr < limit; addr = next) { 219 for (limit = addr + len; addr < limit; addr = next) {
172 next = pmd_addr_end(addr, limit); 220 next = pmd_addr_end(addr, limit);
173 err = -ENOMEM; 221 err = -ENOMEM;