aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/hugetlbpage.c29
-rw-r--r--arch/ppc64/mm/hugetlbpage.c10
-rw-r--r--include/asm-ia64/page.h2
-rw-r--r--include/asm-ia64/pgtable.h4
-rw-r--r--include/asm-ppc64/pgtable.h12
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/memory.c36
8 files changed, 65 insertions, 38 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 626258ae9742..df08ae7634b6 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -186,13 +186,30 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri
186 return NULL; 186 return NULL;
187} 187}
188 188
189/* 189void hugetlb_free_pgd_range(struct mmu_gather **tlb,
190 * Do nothing, until we've worked out what to do! To allow build, we 190 unsigned long addr, unsigned long end,
191 * must remove reference to clear_page_range since it no longer exists. 191 unsigned long floor, unsigned long ceiling)
192 */
193void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
194 unsigned long start, unsigned long end)
195{ 192{
193 /*
194 * This is called only when is_hugepage_only_range(addr,),
195 * and it follows that is_hugepage_only_range(end,) also.
196 *
197 * The offset of these addresses from the base of the hugetlb
198 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
199 * the standard free_pgd_range will free the right page tables.
200 *
201 * If floor and ceiling are also in the hugetlb region, they
202 * must likewise be scaled down; but if outside, left unchanged.
203 */
204
205 addr = htlbpage_to_page(addr);
206 end = htlbpage_to_page(end);
207 if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
208 floor = htlbpage_to_page(floor);
209 if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
210 ceiling = htlbpage_to_page(ceiling);
211
212 free_pgd_range(tlb, addr, end, floor, ceiling);
196} 213}
197 214
198void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 215void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index c62ddaff0720..8665bb57e42b 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -430,16 +430,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
430 flush_tlb_pending(); 430 flush_tlb_pending();
431} 431}
432 432
433void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
434 unsigned long start, unsigned long end)
435{
436 /* Because the huge pgtables are only 2 level, they can take
437 * at most around 4M, much less than one hugepage which the
438 * process is presumably entitled to use. So we don't bother
439 * freeing up the pagetables on unmap, and wait until
440 * destroy_context() to clean up the lot. */
441}
442
443int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) 433int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
444{ 434{
445 struct mm_struct *mm = current->mm; 435 struct mm_struct *mm = current->mm;
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 24aab801a8ca..08894f73abf0 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -139,7 +139,7 @@ typedef union ia64_va {
139# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 139# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
140# define is_hugepage_only_range(mm, addr, len) \ 140# define is_hugepage_only_range(mm, addr, len) \
141 (REGION_NUMBER(addr) == REGION_HPAGE && \ 141 (REGION_NUMBER(addr) == REGION_HPAGE && \
142 REGION_NUMBER((addr)+(len)) == REGION_HPAGE) 142 REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
143extern unsigned int hpage_shift; 143extern unsigned int hpage_shift;
144#endif 144#endif
145 145
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 1757a811f436..bbf6dd757003 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -472,8 +472,8 @@ extern struct page *zero_page_memmap_ptr;
472#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) 472#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
473#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) 473#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
474struct mmu_gather; 474struct mmu_gather;
475extern void hugetlb_free_pgtables(struct mmu_gather *tlb, 475void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
476 struct vm_area_struct * prev, unsigned long start, unsigned long end); 476 unsigned long end, unsigned long floor, unsigned long ceiling);
477#endif 477#endif
478 478
479/* 479/*
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 4c4824653e80..33b90e2aa47d 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -500,9 +500,15 @@ extern pgd_t ioremap_dir[1024];
500 500
501extern void paging_init(void); 501extern void paging_init(void);
502 502
503struct mmu_gather; 503/*
504void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, 504 * Because the huge pgtables are only 2 level, they can take
505 unsigned long start, unsigned long end); 505 * at most around 4M, much less than one hugepage which the
506 * process is presumably entitled to use. So we don't bother
507 * freeing up the pagetables on unmap, and wait until
508 * destroy_context() to clean up the lot.
509 */
510#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
511 do { } while (0)
506 512
507/* 513/*
508 * This gets called at the end of handling a page fault, when 514 * This gets called at the end of handling a page fault, when
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ae45676d27ba..6af1ae4a8211 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -37,7 +37,8 @@ extern int sysctl_hugetlb_shm_group;
37 37
38#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE 38#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
39#define is_hugepage_only_range(mm, addr, len) 0 39#define is_hugepage_only_range(mm, addr, len) 0
40#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0) 40#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
41 do { } while (0)
41#endif 42#endif
42 43
43#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE 44#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
@@ -72,7 +73,8 @@ static inline unsigned long hugetlb_total_pages(void)
72#define prepare_hugepage_range(addr, len) (-EINVAL) 73#define prepare_hugepage_range(addr, len) (-EINVAL)
73#define pmd_huge(x) 0 74#define pmd_huge(x) 0
74#define is_hugepage_only_range(mm, addr, len) 0 75#define is_hugepage_only_range(mm, addr, len) 0
75#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0) 76#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
77 do { } while (0)
76#define alloc_huge_page() ({ NULL; }) 78#define alloc_huge_page() ({ NULL; })
77#define free_huge_page(p) ({ (void)(p); BUG(); }) 79#define free_huge_page(p) ({ (void)(p); BUG(); })
78 80
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 59eca28b5ae2..c74a74ca401d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -587,7 +587,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,
587 struct vm_area_struct *start_vma, unsigned long start_addr, 587 struct vm_area_struct *start_vma, unsigned long start_addr,
588 unsigned long end_addr, unsigned long *nr_accounted, 588 unsigned long end_addr, unsigned long *nr_accounted,
589 struct zap_details *); 589 struct zap_details *);
590void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 590void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
591 unsigned long end, unsigned long floor, unsigned long ceiling);
592void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
591 unsigned long floor, unsigned long ceiling); 593 unsigned long floor, unsigned long ceiling);
592int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 594int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
593 struct vm_area_struct *vma); 595 struct vm_area_struct *vma);
diff --git a/mm/memory.c b/mm/memory.c
index 854bd90eeca1..6bad4c4064e7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -190,7 +190,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
190 * 190 *
191 * Must be called with pagetable lock held. 191 * Must be called with pagetable lock held.
192 */ 192 */
193static inline void free_pgd_range(struct mmu_gather *tlb, 193void free_pgd_range(struct mmu_gather **tlb,
194 unsigned long addr, unsigned long end, 194 unsigned long addr, unsigned long end,
195 unsigned long floor, unsigned long ceiling) 195 unsigned long floor, unsigned long ceiling)
196{ 196{
@@ -241,37 +241,47 @@ static inline void free_pgd_range(struct mmu_gather *tlb,
241 return; 241 return;
242 242
243 start = addr; 243 start = addr;
244 pgd = pgd_offset(tlb->mm, addr); 244 pgd = pgd_offset((*tlb)->mm, addr);
245 do { 245 do {
246 next = pgd_addr_end(addr, end); 246 next = pgd_addr_end(addr, end);
247 if (pgd_none_or_clear_bad(pgd)) 247 if (pgd_none_or_clear_bad(pgd))
248 continue; 248 continue;
249 free_pud_range(tlb, pgd, addr, next, floor, ceiling); 249 free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
250 } while (pgd++, addr = next, addr != end); 250 } while (pgd++, addr = next, addr != end);
251 251
252 if (!tlb_is_full_mm(tlb)) 252 if (!tlb_is_full_mm(*tlb))
253 flush_tlb_pgtables(tlb->mm, start, end); 253 flush_tlb_pgtables((*tlb)->mm, start, end);
254} 254}
255 255
256void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 256void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
257 unsigned long floor, unsigned long ceiling) 257 unsigned long floor, unsigned long ceiling)
258{ 258{
259 while (vma) { 259 while (vma) {
260 struct vm_area_struct *next = vma->vm_next; 260 struct vm_area_struct *next = vma->vm_next;
261 unsigned long addr = vma->vm_start; 261 unsigned long addr = vma->vm_start;
262 262
263 /* Optimization: gather nearby vmas into a single call down */ 263 if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
264 while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { 264 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
265 vma = next; 265 floor, next? next->vm_start: ceiling);
266 next = vma->vm_next; 266 } else {
267 } 267 /*
268 free_pgd_range(*tlb, addr, vma->vm_end, 268 * Optimization: gather nearby vmas into one call down
269 */
270 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
271 && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
272 HPAGE_SIZE)) {
273 vma = next;
274 next = vma->vm_next;
275 }
276 free_pgd_range(tlb, addr, vma->vm_end,
269 floor, next? next->vm_start: ceiling); 277 floor, next? next->vm_start: ceiling);
278 }
270 vma = next; 279 vma = next;
271 } 280 }
272} 281}
273 282
274pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 283pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
284 unsigned long address)
275{ 285{
276 if (!pmd_present(*pmd)) { 286 if (!pmd_present(*pmd)) {
277 struct page *new; 287 struct page *new;