diff options
author | David S. Miller <davem@davemloft.net> | 2013-09-25 16:48:49 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-11-12 18:22:34 -0500 |
commit | 37b3a8ff3e086cd5c369e77d2383b691b2874cd6 (patch) | |
tree | 285994f51243e9e271cfbb70ff863b2b452fea31 /arch/sparc | |
parent | b2d438348024b75a1ee8b66b85d77f569a5dfed8 (diff) |
sparc64: Move from 4MB to 8MB huge pages.
The impetus for this is that we would like to move to 64-bit PMDs and
PGDs, but that would result in only supporting a 42-bit address space
with the current page table layout. It'd be nice to support at least
43-bits.
The reason we'd end up with only 42-bits after making PMDs and PGDs
64-bit is that we only use half-page sized PTE tables in order to make
PMDs line up to 4MB, the hardware huge page size we use.
So what we do here is we make huge pages 8MB, and fabricate them using
4MB hw TLB entries.
Facilitate this by providing a "REAL_HPAGE_SHIFT" which is used in
places that really need to operate on hardware 4MB pages.
Use full pages (512 entries) for PTE tables, and adjust PMD_SHIFT,
PGD_SHIFT, and the build time CPP test as needed. Use a CPP test to
make sure REAL_HPAGE_SHIFT and the _PAGE_SZHUGE_* we use match up.
This makes the pgtable cache completely unused, so remove the code
managing it and the state used in mm_context_t. Now we have less
spinlocks taken in the page table allocation path.
The technique we use to fabricate the 8MB pages is to transfer bit 22
from the missing virtual address into the PTEs physical address field.
That takes care of the transparent huge pages case.
For hugetlb, we fill things in at the PTE level and that code already
puts the sub huge page physical bits into the PTEs, based upon the
offset, so there is nothing special we need to do. It all just works
out.
So, a small amount of complexity in the THP case, but this code is
about to get much simpler when we move the 64-bit PMDs as we can move
away from the fancy 32-bit huge PMD encoding and just put a real PTE
value in there.
With bug fixes and help from Bob Picco.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/mmu_64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_64.h | 5 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 12 | ||||
-rw-r--r-- | arch/sparc/include/asm/tsb.h | 17 | ||||
-rw-r--r-- | arch/sparc/kernel/sun4v_tlb_miss.S | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/tsb.S | 2 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 74 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 6 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 13 |
9 files changed, 47 insertions, 85 deletions
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 76092c4dd277..f668797ae234 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h | |||
@@ -93,7 +93,6 @@ typedef struct { | |||
93 | spinlock_t lock; | 93 | spinlock_t lock; |
94 | unsigned long sparc64_ctx_val; | 94 | unsigned long sparc64_ctx_val; |
95 | unsigned long huge_pte_count; | 95 | unsigned long huge_pte_count; |
96 | struct page *pgtable_page; | ||
97 | struct tsb_config tsb_block[MM_NUM_TSBS]; | 96 | struct tsb_config tsb_block[MM_NUM_TSBS]; |
98 | struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; | 97 | struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; |
99 | } mm_context_t; | 98 | } mm_context_t; |
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 89e07fd0ac88..1958bfbe300c 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h | |||
@@ -15,7 +15,10 @@ | |||
15 | #define DCACHE_ALIASING_POSSIBLE | 15 | #define DCACHE_ALIASING_POSSIBLE |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | #define HPAGE_SHIFT 22 | 18 | #define HPAGE_SHIFT 23 |
19 | #define REAL_HPAGE_SHIFT 22 | ||
20 | |||
21 | #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) | ||
19 | 22 | ||
20 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 23 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
21 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) | 24 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 36760317814f..012b6eeeb06a 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -48,18 +48,18 @@ | |||
48 | /* PMD_SHIFT determines the size of the area a second-level page | 48 | /* PMD_SHIFT determines the size of the area a second-level page |
49 | * table can map | 49 | * table can map |
50 | */ | 50 | */ |
51 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4)) | 51 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) |
52 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) | 52 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) |
53 | #define PMD_MASK (~(PMD_SIZE-1)) | 53 | #define PMD_MASK (~(PMD_SIZE-1)) |
54 | #define PMD_BITS (PAGE_SHIFT - 2) | 54 | #define PMD_BITS (PAGE_SHIFT - 2) |
55 | 55 | ||
56 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 56 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
57 | #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS) | 57 | #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) |
58 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) | 58 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) |
59 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 59 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
60 | #define PGDIR_BITS (PAGE_SHIFT - 2) | 60 | #define PGDIR_BITS (PAGE_SHIFT - 2) |
61 | 61 | ||
62 | #if (PGDIR_SHIFT + PGDIR_BITS) != 44 | 62 | #if (PGDIR_SHIFT + PGDIR_BITS) != 45 |
63 | #error Page table parameters do not cover virtual address space properly. | 63 | #error Page table parameters do not cover virtual address space properly. |
64 | #endif | 64 | #endif |
65 | 65 | ||
@@ -95,7 +95,7 @@ | |||
95 | #include <linux/sched.h> | 95 | #include <linux/sched.h> |
96 | 96 | ||
97 | /* Entries per page directory level. */ | 97 | /* Entries per page directory level. */ |
98 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-4)) | 98 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) |
99 | #define PTRS_PER_PMD (1UL << PMD_BITS) | 99 | #define PTRS_PER_PMD (1UL << PMD_BITS) |
100 | #define PTRS_PER_PGD (1UL << PGDIR_BITS) | 100 | #define PTRS_PER_PGD (1UL << PGDIR_BITS) |
101 | 101 | ||
@@ -180,6 +180,10 @@ | |||
180 | #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U | 180 | #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U |
181 | #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V | 181 | #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V |
182 | 182 | ||
183 | #if REAL_HPAGE_SHIFT != 22 | ||
184 | #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up | ||
185 | #endif | ||
186 | |||
183 | #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U | 187 | #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U |
184 | #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V | 188 | #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V |
185 | 189 | ||
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index e696432b950d..16e577711a7b 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h | |||
@@ -152,7 +152,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
152 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | 152 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
153 | brz,pn REG1, FAIL_LABEL; \ | 153 | brz,pn REG1, FAIL_LABEL; \ |
154 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | 154 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
155 | srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ | 155 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
156 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ | 156 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ |
157 | andn REG2, 0x7, REG2; \ | 157 | andn REG2, 0x7, REG2; \ |
158 | add REG1, REG2, REG1; | 158 | add REG1, REG2, REG1; |
@@ -177,8 +177,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
177 | or REG, _PAGE_##NAME##_4V, REG; \ | 177 | or REG, _PAGE_##NAME##_4V, REG; \ |
178 | .previous; | 178 | .previous; |
179 | 179 | ||
180 | /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ | 180 | /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. |
181 | #define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ | 181 | * |
182 | * We are fabricating an 8MB page using 2 4MB HW pages here. | ||
183 | */ | ||
184 | #define BUILD_PTE_VALID_SZHUGE_CACHE(VADDR, PADDR_BITS, REG) \ | ||
185 | sethi %hi(4 * 1024 * 1024), REG; \ | ||
186 | andn PADDR_BITS, REG, PADDR_BITS; \ | ||
187 | and VADDR, REG, REG; \ | ||
188 | or PADDR_BITS, REG, PADDR_BITS; \ | ||
182 | 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ | 189 | 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ |
183 | .section .sun4v_1insn_patch, "ax"; \ | 190 | .section .sun4v_1insn_patch, "ax"; \ |
184 | .word 661b; \ | 191 | .word 661b; \ |
@@ -231,7 +238,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
231 | nop; \ | 238 | nop; \ |
232 | OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \ | 239 | OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \ |
233 | /* REG1 can now be clobbered, build final PTE */ \ | 240 | /* REG1 can now be clobbered, build final PTE */ \ |
234 | 1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ | 241 | 1: BUILD_PTE_VALID_SZHUGE_CACHE(VADDR, REG2, REG1); \ |
235 | ba,pt %xcc, PTE_LABEL; \ | 242 | ba,pt %xcc, PTE_LABEL; \ |
236 | or REG1, REG2, REG1; \ | 243 | or REG1, REG2, REG1; \ |
237 | 700: | 244 | 700: |
@@ -263,7 +270,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
263 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | 270 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
264 | USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ | 271 | USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ |
265 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | 272 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
266 | srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ | 273 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
267 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ | 274 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ |
268 | andn REG2, 0x7, REG2; \ | 275 | andn REG2, 0x7, REG2; \ |
269 | add REG1, REG2, REG1; \ | 276 | add REG1, REG2, REG1; \ |
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S index bde867fd71e8..e0c09bf85610 100644 --- a/arch/sparc/kernel/sun4v_tlb_miss.S +++ b/arch/sparc/kernel/sun4v_tlb_miss.S | |||
@@ -182,7 +182,7 @@ sun4v_tsb_miss_common: | |||
182 | cmp %g5, -1 | 182 | cmp %g5, -1 |
183 | be,pt %xcc, 80f | 183 | be,pt %xcc, 80f |
184 | nop | 184 | nop |
185 | COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7) | 185 | COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7) |
186 | 186 | ||
187 | /* That clobbered %g2, reload it. */ | 187 | /* That clobbered %g2, reload it. */ |
188 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | 188 | ldxa [%g0] ASI_SCRATCHPAD, %g2 |
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index a313e4a9399b..14158d40ba76 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S | |||
@@ -75,7 +75,7 @@ tsb_miss_page_table_walk: | |||
75 | mov 512, %g7 | 75 | mov 512, %g7 |
76 | andn %g5, 0x7, %g5 | 76 | andn %g5, 0x7, %g5 |
77 | sllx %g7, %g6, %g7 | 77 | sllx %g7, %g6, %g7 |
78 | srlx %g4, HPAGE_SHIFT, %g6 | 78 | srlx %g4, REAL_HPAGE_SHIFT, %g6 |
79 | sub %g7, 1, %g7 | 79 | sub %g7, 1, %g7 |
80 | and %g6, %g7, %g6 | 80 | and %g6, %g7, %g6 |
81 | sllx %g6, 4, %g6 | 81 | sllx %g6, 4, %g6 |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 7a97b5a28b4b..807e10833512 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -354,7 +354,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * | |||
354 | 354 | ||
355 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 355 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
356 | if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) | 356 | if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) |
357 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, | 357 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, |
358 | address, pte_val(pte)); | 358 | address, pte_val(pte)); |
359 | else | 359 | else |
360 | #endif | 360 | #endif |
@@ -2547,53 +2547,13 @@ void __flush_tlb_all(void) | |||
2547 | : : "r" (pstate)); | 2547 | : : "r" (pstate)); |
2548 | } | 2548 | } |
2549 | 2549 | ||
2550 | static pte_t *get_from_cache(struct mm_struct *mm) | ||
2551 | { | ||
2552 | struct page *page; | ||
2553 | pte_t *ret; | ||
2554 | |||
2555 | spin_lock(&mm->page_table_lock); | ||
2556 | page = mm->context.pgtable_page; | ||
2557 | ret = NULL; | ||
2558 | if (page) { | ||
2559 | void *p = page_address(page); | ||
2560 | |||
2561 | mm->context.pgtable_page = NULL; | ||
2562 | |||
2563 | ret = (pte_t *) (p + (PAGE_SIZE / 2)); | ||
2564 | } | ||
2565 | spin_unlock(&mm->page_table_lock); | ||
2566 | |||
2567 | return ret; | ||
2568 | } | ||
2569 | |||
2570 | static struct page *__alloc_for_cache(struct mm_struct *mm) | ||
2571 | { | ||
2572 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | | ||
2573 | __GFP_REPEAT | __GFP_ZERO); | ||
2574 | |||
2575 | if (page) { | ||
2576 | spin_lock(&mm->page_table_lock); | ||
2577 | if (!mm->context.pgtable_page) { | ||
2578 | atomic_set(&page->_count, 2); | ||
2579 | mm->context.pgtable_page = page; | ||
2580 | } | ||
2581 | spin_unlock(&mm->page_table_lock); | ||
2582 | } | ||
2583 | return page; | ||
2584 | } | ||
2585 | |||
2586 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 2550 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
2587 | unsigned long address) | 2551 | unsigned long address) |
2588 | { | 2552 | { |
2589 | struct page *page; | 2553 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | |
2590 | pte_t *pte; | 2554 | __GFP_REPEAT | __GFP_ZERO); |
2591 | 2555 | pte_t *pte = NULL; | |
2592 | pte = get_from_cache(mm); | ||
2593 | if (pte) | ||
2594 | return pte; | ||
2595 | 2556 | ||
2596 | page = __alloc_for_cache(mm); | ||
2597 | if (page) | 2557 | if (page) |
2598 | pte = (pte_t *) page_address(page); | 2558 | pte = (pte_t *) page_address(page); |
2599 | 2559 | ||
@@ -2603,14 +2563,10 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
2603 | pgtable_t pte_alloc_one(struct mm_struct *mm, | 2563 | pgtable_t pte_alloc_one(struct mm_struct *mm, |
2604 | unsigned long address) | 2564 | unsigned long address) |
2605 | { | 2565 | { |
2606 | struct page *page; | 2566 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | |
2607 | pte_t *pte; | 2567 | __GFP_REPEAT | __GFP_ZERO); |
2608 | 2568 | pte_t *pte = NULL; | |
2609 | pte = get_from_cache(mm); | ||
2610 | if (pte) | ||
2611 | return pte; | ||
2612 | 2569 | ||
2613 | page = __alloc_for_cache(mm); | ||
2614 | if (page) { | 2570 | if (page) { |
2615 | pgtable_page_ctor(page); | 2571 | pgtable_page_ctor(page); |
2616 | pte = (pte_t *) page_address(page); | 2572 | pte = (pte_t *) page_address(page); |
@@ -2621,18 +2577,15 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
2621 | 2577 | ||
2622 | void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | 2578 | void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
2623 | { | 2579 | { |
2624 | struct page *page = virt_to_page(pte); | 2580 | free_page((unsigned long)pte); |
2625 | if (put_page_testzero(page)) | ||
2626 | free_hot_cold_page(page, 0); | ||
2627 | } | 2581 | } |
2628 | 2582 | ||
2629 | static void __pte_free(pgtable_t pte) | 2583 | static void __pte_free(pgtable_t pte) |
2630 | { | 2584 | { |
2631 | struct page *page = virt_to_page(pte); | 2585 | struct page *page = virt_to_page(pte); |
2632 | if (put_page_testzero(page)) { | 2586 | |
2633 | pgtable_page_dtor(page); | 2587 | pgtable_page_dtor(page); |
2634 | free_hot_cold_page(page, 0); | 2588 | __free_page(page); |
2635 | } | ||
2636 | } | 2589 | } |
2637 | 2590 | ||
2638 | void pte_free(struct mm_struct *mm, pgtable_t pte) | 2591 | void pte_free(struct mm_struct *mm, pgtable_t pte) |
@@ -2752,6 +2705,9 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
2752 | pte <<= PMD_PADDR_SHIFT; | 2705 | pte <<= PMD_PADDR_SHIFT; |
2753 | pte |= _PAGE_VALID; | 2706 | pte |= _PAGE_VALID; |
2754 | 2707 | ||
2708 | /* We are fabricating 8MB pages using 4MB real hw pages. */ | ||
2709 | pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); | ||
2710 | |||
2755 | prot = pmd_pgprot(entry); | 2711 | prot = pmd_pgprot(entry); |
2756 | 2712 | ||
2757 | if (tlb_type == hypervisor) | 2713 | if (tlb_type == hypervisor) |
@@ -2766,7 +2722,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
2766 | spin_lock_irqsave(&mm->context.lock, flags); | 2722 | spin_lock_irqsave(&mm->context.lock, flags); |
2767 | 2723 | ||
2768 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) | 2724 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) |
2769 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, | 2725 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, |
2770 | addr, pte); | 2726 | addr, pte); |
2771 | 2727 | ||
2772 | spin_unlock_irqrestore(&mm->context.lock, flags); | 2728 | spin_unlock_irqrestore(&mm->context.lock, flags); |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 7a91f288c708..97d1e56e9863 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -181,10 +181,12 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
181 | bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); | 181 | bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); |
182 | 182 | ||
183 | addr &= HPAGE_MASK; | 183 | addr &= HPAGE_MASK; |
184 | if (pmd_val(orig) & PMD_ISHUGE) | 184 | if (pmd_val(orig) & PMD_ISHUGE) { |
185 | tlb_batch_add_one(mm, addr, exec); | 185 | tlb_batch_add_one(mm, addr, exec); |
186 | else | 186 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); |
187 | } else { | ||
187 | tlb_batch_pmd_scan(mm, addr, orig, exec); | 188 | tlb_batch_pmd_scan(mm, addr, orig, exec); |
189 | } | ||
188 | } | 190 | } |
189 | } | 191 | } |
190 | 192 | ||
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 2cc3bce5ee91..3b3a360b429a 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -87,7 +87,7 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
87 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | 87 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
88 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 88 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
89 | base = __pa(base); | 89 | base = __pa(base); |
90 | __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); | 90 | __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); |
91 | } | 91 | } |
92 | #endif | 92 | #endif |
93 | spin_unlock_irqrestore(&mm->context.lock, flags); | 93 | spin_unlock_irqrestore(&mm->context.lock, flags); |
@@ -111,7 +111,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) | |||
111 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | 111 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
112 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 112 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
113 | base = __pa(base); | 113 | base = __pa(base); |
114 | __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); | 114 | __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries); |
115 | } | 115 | } |
116 | #endif | 116 | #endif |
117 | spin_unlock_irqrestore(&mm->context.lock, flags); | 117 | spin_unlock_irqrestore(&mm->context.lock, flags); |
@@ -472,8 +472,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
472 | mm->context.huge_pte_count = 0; | 472 | mm->context.huge_pte_count = 0; |
473 | #endif | 473 | #endif |
474 | 474 | ||
475 | mm->context.pgtable_page = NULL; | ||
476 | |||
477 | /* copy_mm() copies over the parent's mm_struct before calling | 475 | /* copy_mm() copies over the parent's mm_struct before calling |
478 | * us, so we need to zero out the TSB pointer or else tsb_grow() | 476 | * us, so we need to zero out the TSB pointer or else tsb_grow() |
479 | * will be confused and think there is an older TSB to free up. | 477 | * will be confused and think there is an older TSB to free up. |
@@ -512,17 +510,10 @@ static void tsb_destroy_one(struct tsb_config *tp) | |||
512 | void destroy_context(struct mm_struct *mm) | 510 | void destroy_context(struct mm_struct *mm) |
513 | { | 511 | { |
514 | unsigned long flags, i; | 512 | unsigned long flags, i; |
515 | struct page *page; | ||
516 | 513 | ||
517 | for (i = 0; i < MM_NUM_TSBS; i++) | 514 | for (i = 0; i < MM_NUM_TSBS; i++) |
518 | tsb_destroy_one(&mm->context.tsb_block[i]); | 515 | tsb_destroy_one(&mm->context.tsb_block[i]); |
519 | 516 | ||
520 | page = mm->context.pgtable_page; | ||
521 | if (page && put_page_testzero(page)) { | ||
522 | pgtable_page_dtor(page); | ||
523 | free_hot_cold_page(page, 0); | ||
524 | } | ||
525 | |||
526 | spin_lock_irqsave(&ctx_alloc_lock, flags); | 517 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
527 | 518 | ||
528 | if (CTX_VALID(mm->context)) { | 519 | if (CTX_VALID(mm->context)) { |