aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm/tlb.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-09-19 23:52:50 -0400
committerPaul Mackerras <paulus@samba.org>2005-09-21 05:21:07 -0400
commit61b1a94254a2158d053458764a5bd30331d73a54 (patch)
tree4a2aa56ae5d3182c1015b5e9a4351aeb8fab2f3a /arch/ppc64/mm/tlb.c
parent637a6ff6ce525d8495df944550efea0f023dd521 (diff)
[PATCH] ppc64: Store virtual address in TLB flush batches
This patch slightly change the TLB flush batch mecanism so that we store the full vaddr (including vsid) when adding an entry to the batch so that the flush part doesn't have to get to the context. This cleans it a bit, and paves the way to future updates like dynamic vsids. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64/mm/tlb.c')
-rw-r--r--arch/ppc64/mm/tlb.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index d8a6593a13f0..31afd95bf870 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -128,12 +128,10 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
128void hpte_update(struct mm_struct *mm, unsigned long addr, 128void hpte_update(struct mm_struct *mm, unsigned long addr,
129 unsigned long pte, int wrprot) 129 unsigned long pte, int wrprot)
130{ 130{
131 int i;
132 unsigned long context = 0;
133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 131 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
132 unsigned long vsid;
133 int i;
134 134
135 if (REGION_ID(addr) == USER_REGION_ID)
136 context = mm->context.id;
137 i = batch->index; 135 i = batch->index;
138 136
139 /* 137 /*
@@ -143,17 +141,19 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 141 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 142 * will change mid stream.
145 */ 143 */
146 if (unlikely(i != 0 && context != batch->context)) { 144 if (unlikely(i != 0 && mm != batch->mm)) {
147 flush_tlb_pending(); 145 flush_tlb_pending();
148 i = 0; 146 i = 0;
149 } 147 }
150 148 if (i == 0)
151 if (i == 0) {
152 batch->context = context;
153 batch->mm = mm; 149 batch->mm = mm;
154 } 150 if (addr < KERNELBASE) {
151 vsid = get_vsid(mm->context.id, addr);
152 WARN_ON(vsid == 0);
153 } else
154 vsid = get_kernel_vsid(addr);
155 batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
155 batch->pte[i] = __pte(pte); 156 batch->pte[i] = __pte(pte);
156 batch->addr[i] = addr;
157 batch->index = ++i; 157 batch->index = ++i;
158 if (i >= PPC64_TLB_BATCH_NR) 158 if (i >= PPC64_TLB_BATCH_NR)
159 flush_tlb_pending(); 159 flush_tlb_pending();
@@ -175,10 +175,9 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
175 local = 1; 175 local = 1;
176 176
177 if (i == 1) 177 if (i == 1)
178 flush_hash_page(batch->context, batch->addr[0], batch->pte[0], 178 flush_hash_page(batch->vaddr[0], batch->pte[0], local);
179 local);
180 else 179 else
181 flush_hash_range(batch->context, i, local); 180 flush_hash_range(i, local);
182 batch->index = 0; 181 batch->index = 0;
183 put_cpu(); 182 put_cpu();
184} 183}