aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-04-19 16:29:16 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org.(none)>2005-04-19 16:29:16 -0400
commit3bf5ee95648c694bac4d13529563c230cd4fe5f2 (patch)
tree9430e6e4f4c3d586ecb7375cd780fd17694888c7 /mm
parentee39b37b23da0b6ec53a8ebe90ff41c016f8ae27 (diff)
[PATCH] freepgt: hugetlb_free_pgd_range
ia64 and ppc64 had hugetlb_free_pgtables functions which were no longer being called, and it wasn't obvious what to do about them. The ppc64 case turns out to be easy: the associated tables are noted elsewhere and freed later, safe to either skip its hugetlb areas or go through the motions of freeing nothing. Since ia64 does need a special case, restore to ppc64 the special case of skipping them. The ia64 hugetlb case has been broken since pgd_addr_end went in, though it probably appeared to work okay if you just had one such area; in fact it's been broken much longer if you consider a long munmap spanning from another region into the hugetlb region. In the ia64 hugetlb region, more virtual address bits are available than in the other regions, yet the page tables are structured the same way: the page at the bottom is larger. Here we need to scale down each addr before passing it to the standard free_pgd_range. Was about to write a hugely_scaled_down macro, but found htlbpage_to_page already exists for just this purpose. Fixed off-by-one in ia64 is_hugepage_only_range. Uninline free_pgd_range to make it available to ia64. Make sure the vma-gathering loop in free_pgtables cannot join a hugepage_only_range to any other (safe to join huges? probably but don't bother). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 854bd90eeca1..6bad4c4064e7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -190,7 +190,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
190 * 190 *
191 * Must be called with pagetable lock held. 191 * Must be called with pagetable lock held.
192 */ 192 */
193static inline void free_pgd_range(struct mmu_gather *tlb, 193void free_pgd_range(struct mmu_gather **tlb,
194 unsigned long addr, unsigned long end, 194 unsigned long addr, unsigned long end,
195 unsigned long floor, unsigned long ceiling) 195 unsigned long floor, unsigned long ceiling)
196{ 196{
@@ -241,37 +241,47 @@ static inline void free_pgd_range(struct mmu_gather *tlb,
241 return; 241 return;
242 242
243 start = addr; 243 start = addr;
244 pgd = pgd_offset(tlb->mm, addr); 244 pgd = pgd_offset((*tlb)->mm, addr);
245 do { 245 do {
246 next = pgd_addr_end(addr, end); 246 next = pgd_addr_end(addr, end);
247 if (pgd_none_or_clear_bad(pgd)) 247 if (pgd_none_or_clear_bad(pgd))
248 continue; 248 continue;
249 free_pud_range(tlb, pgd, addr, next, floor, ceiling); 249 free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
250 } while (pgd++, addr = next, addr != end); 250 } while (pgd++, addr = next, addr != end);
251 251
252 if (!tlb_is_full_mm(tlb)) 252 if (!tlb_is_full_mm(*tlb))
253 flush_tlb_pgtables(tlb->mm, start, end); 253 flush_tlb_pgtables((*tlb)->mm, start, end);
254} 254}
255 255
256void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 256void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
257 unsigned long floor, unsigned long ceiling) 257 unsigned long floor, unsigned long ceiling)
258{ 258{
259 while (vma) { 259 while (vma) {
260 struct vm_area_struct *next = vma->vm_next; 260 struct vm_area_struct *next = vma->vm_next;
261 unsigned long addr = vma->vm_start; 261 unsigned long addr = vma->vm_start;
262 262
263 /* Optimization: gather nearby vmas into a single call down */ 263 if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
264 while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { 264 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
265 vma = next; 265 floor, next? next->vm_start: ceiling);
266 next = vma->vm_next; 266 } else {
267 } 267 /*
268 free_pgd_range(*tlb, addr, vma->vm_end, 268 * Optimization: gather nearby vmas into one call down
269 */
270 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
271 && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
272 HPAGE_SIZE)) {
273 vma = next;
274 next = vma->vm_next;
275 }
276 free_pgd_range(tlb, addr, vma->vm_end,
269 floor, next? next->vm_start: ceiling); 277 floor, next? next->vm_start: ceiling);
278 }
270 vma = next; 279 vma = next;
271 } 280 }
272} 281}
273 282
274pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 283pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
284 unsigned long address)
275{ 285{
276 if (!pmd_present(*pmd)) { 286 if (!pmd_present(*pmd)) {
277 struct page *new; 287 struct page *new;