diff options
Diffstat (limited to 'arch/powerpc/mm/tlb_64.c')
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index 09ab81a10f4f..53e31b834ace 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * as published by the Free Software Foundation; either version | 21 | * as published by the Free Software Foundation; either version |
22 | * 2 of the License, or (at your option) any later version. | 22 | * 2 of the License, or (at your option) any later version. |
23 | */ | 23 | */ |
24 | |||
24 | #include <linux/config.h> | 25 | #include <linux/config.h> |
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
@@ -30,7 +31,7 @@ | |||
30 | #include <asm/pgalloc.h> | 31 | #include <asm/pgalloc.h> |
31 | #include <asm/tlbflush.h> | 32 | #include <asm/tlbflush.h> |
32 | #include <asm/tlb.h> | 33 | #include <asm/tlb.h> |
33 | #include <linux/highmem.h> | 34 | #include <asm/bug.h> |
34 | 35 | ||
35 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | 36 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); |
36 | 37 | ||
@@ -126,28 +127,46 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | |||
126 | * (if we remove it we should clear the _PTE_HPTEFLAGS bits). | 127 | * (if we remove it we should clear the _PTE_HPTEFLAGS bits). |
127 | */ | 128 | */ |
128 | void hpte_update(struct mm_struct *mm, unsigned long addr, | 129 | void hpte_update(struct mm_struct *mm, unsigned long addr, |
129 | unsigned long pte, int wrprot) | 130 | pte_t *ptep, unsigned long pte, int huge) |
130 | { | 131 | { |
131 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 132 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
132 | unsigned long vsid; | 133 | unsigned long vsid; |
134 | unsigned int psize = mmu_virtual_psize; | ||
133 | int i; | 135 | int i; |
134 | 136 | ||
135 | i = batch->index; | 137 | i = batch->index; |
136 | 138 | ||
139 | /* We mask the address for the base page size. Huge pages will | ||
140 | * have applied their own masking already | ||
141 | */ | ||
142 | addr &= PAGE_MASK; | ||
143 | |||
144 | /* Get page size (maybe move back to caller) */ | ||
145 | if (huge) { | ||
146 | #ifdef CONFIG_HUGETLB_PAGE | ||
147 | psize = mmu_huge_psize; | ||
148 | #else | ||
149 | BUG(); | ||
150 | #endif | ||
151 | } | ||
152 | |||
137 | /* | 153 | /* |
138 | * This can happen when we are in the middle of a TLB batch and | 154 | * This can happen when we are in the middle of a TLB batch and |
139 | * we encounter memory pressure (eg copy_page_range when it tries | 155 | * we encounter memory pressure (eg copy_page_range when it tries |
140 | * to allocate a new pte). If we have to reclaim memory and end | 156 | * to allocate a new pte). If we have to reclaim memory and end |
141 | * up scanning and resetting referenced bits then our batch context | 157 | * up scanning and resetting referenced bits then our batch context |
142 | * will change mid stream. | 158 | * will change mid stream. |
159 | * | ||
160 | * We also need to ensure only one page size is present in a given | ||
161 | * batch | ||
143 | */ | 162 | */ |
144 | if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) { | 163 | if (i != 0 && (mm != batch->mm || batch->psize != psize)) { |
145 | flush_tlb_pending(); | 164 | flush_tlb_pending(); |
146 | i = 0; | 165 | i = 0; |
147 | } | 166 | } |
148 | if (i == 0) { | 167 | if (i == 0) { |
149 | batch->mm = mm; | 168 | batch->mm = mm; |
150 | batch->large = pte_huge(pte); | 169 | batch->psize = psize; |
151 | } | 170 | } |
152 | if (addr < KERNELBASE) { | 171 | if (addr < KERNELBASE) { |
153 | vsid = get_vsid(mm->context.id, addr); | 172 | vsid = get_vsid(mm->context.id, addr); |
@@ -155,7 +174,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
155 | } else | 174 | } else |
156 | vsid = get_kernel_vsid(addr); | 175 | vsid = get_kernel_vsid(addr); |
157 | batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); | 176 | batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); |
158 | batch->pte[i] = __pte(pte); | 177 | batch->pte[i] = __real_pte(__pte(pte), ptep); |
159 | batch->index = ++i; | 178 | batch->index = ++i; |
160 | if (i >= PPC64_TLB_BATCH_NR) | 179 | if (i >= PPC64_TLB_BATCH_NR) |
161 | flush_tlb_pending(); | 180 | flush_tlb_pending(); |
@@ -177,7 +196,8 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | |||
177 | local = 1; | 196 | local = 1; |
178 | 197 | ||
179 | if (i == 1) | 198 | if (i == 1) |
180 | flush_hash_page(batch->vaddr[0], batch->pte[0], local); | 199 | flush_hash_page(batch->vaddr[0], batch->pte[0], |
200 | batch->psize, local); | ||
181 | else | 201 | else |
182 | flush_hash_range(i, local); | 202 | flush_hash_range(i, local); |
183 | batch->index = 0; | 203 | batch->index = 0; |