aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-09-19 23:52:50 -0400
committerPaul Mackerras <paulus@samba.org>2005-09-21 05:21:07 -0400
commit61b1a94254a2158d053458764a5bd30331d73a54 (patch)
tree4a2aa56ae5d3182c1015b5e9a4351aeb8fab2f3a
parent637a6ff6ce525d8495df944550efea0f023dd521 (diff)
[PATCH] ppc64: Store virtual address in TLB flush batches
This patch slightly change the TLB flush batch mecanism so that we store the full vaddr (including vsid) when adding an entry to the batch so that the flush part doesn't have to get to the context. This cleans it a bit, and paves the way to future updates like dynamic vsids. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/ppc64/kernel/pSeries_lpar.c5
-rw-r--r--arch/ppc64/mm/hash_native.c13
-rw-r--r--arch/ppc64/mm/hash_utils.c21
-rw-r--r--arch/ppc64/mm/tlb.c25
-rw-r--r--include/asm-ppc64/machdep.h5
-rw-r--r--include/asm-ppc64/tlbflush.h7
6 files changed, 28 insertions, 48 deletions
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/ppc64/kernel/pSeries_lpar.c
index a6de83f2078f..268d8362dde7 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/ppc64/kernel/pSeries_lpar.c
@@ -486,8 +486,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie 486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
487 * lock. 487 * lock.
488 */ 488 */
489void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, 489void pSeries_lpar_flush_hash_range(unsigned long number, int local)
490 int local)
491{ 490{
492 int i; 491 int i;
493 unsigned long flags = 0; 492 unsigned long flags = 0;
@@ -498,7 +497,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
498 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 497 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
499 498
500 for (i = 0; i < number; i++) 499 for (i = 0; i < number; i++)
501 flush_hash_page(context, batch->addr[i], batch->pte[i], local); 500 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
502 501
503 if (lock_tlbie) 502 if (lock_tlbie)
504 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 503 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 7626bb59954d..29b074505d3e 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -335,10 +335,9 @@ static void native_hpte_clear(void)
335 local_irq_restore(flags); 335 local_irq_restore(flags);
336} 336}
337 337
338static void native_flush_hash_range(unsigned long context, 338static void native_flush_hash_range(unsigned long number, int local)
339 unsigned long number, int local)
340{ 339{
341 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn; 340 unsigned long va, vpn, hash, secondary, slot, flags, avpn;
342 int i, j; 341 int i, j;
343 hpte_t *hptep; 342 hpte_t *hptep;
344 unsigned long hpte_v; 343 unsigned long hpte_v;
@@ -351,13 +350,7 @@ static void native_flush_hash_range(unsigned long context,
351 350
352 j = 0; 351 j = 0;
353 for (i = 0; i < number; i++) { 352 for (i = 0; i < number; i++) {
354 if (batch->addr[i] < KERNELBASE) 353 va = batch->vaddr[j];
355 vsid = get_vsid(context, batch->addr[i]);
356 else
357 vsid = get_kernel_vsid(batch->addr[i]);
358
359 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
360 batch->vaddr[j] = va;
361 if (large) 354 if (large)
362 vpn = va >> HPAGE_SHIFT; 355 vpn = va >> HPAGE_SHIFT;
363 else 356 else
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 09475c8edf7c..36cf474b3d36 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -355,18 +355,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
355 return ret; 355 return ret;
356} 356}
357 357
358void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, 358void flush_hash_page(unsigned long va, pte_t pte, int local)
359 int local)
360{ 359{
361 unsigned long vsid, vpn, va, hash, secondary, slot; 360 unsigned long vpn, hash, secondary, slot;
362 unsigned long huge = pte_huge(pte); 361 unsigned long huge = pte_huge(pte);
363 362
364 if (ea < KERNELBASE)
365 vsid = get_vsid(context, ea);
366 else
367 vsid = get_kernel_vsid(ea);
368
369 va = (vsid << 28) | (ea & 0x0fffffff);
370 if (huge) 363 if (huge)
371 vpn = va >> HPAGE_SHIFT; 364 vpn = va >> HPAGE_SHIFT;
372 else 365 else
@@ -381,17 +374,17 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
381 ppc_md.hpte_invalidate(slot, va, huge, local); 374 ppc_md.hpte_invalidate(slot, va, huge, local);
382} 375}
383 376
384void flush_hash_range(unsigned long context, unsigned long number, int local) 377void flush_hash_range(unsigned long number, int local)
385{ 378{
386 if (ppc_md.flush_hash_range) { 379 if (ppc_md.flush_hash_range) {
387 ppc_md.flush_hash_range(context, number, local); 380 ppc_md.flush_hash_range(number, local);
388 } else { 381 } else {
389 int i; 382 int i;
390 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 383 struct ppc64_tlb_batch *batch =
384 &__get_cpu_var(ppc64_tlb_batch);
391 385
392 for (i = 0; i < number; i++) 386 for (i = 0; i < number; i++)
393 flush_hash_page(context, batch->addr[i], batch->pte[i], 387 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
394 local);
395 } 388 }
396} 389}
397 390
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index d8a6593a13f0..31afd95bf870 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -128,12 +128,10 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
128void hpte_update(struct mm_struct *mm, unsigned long addr, 128void hpte_update(struct mm_struct *mm, unsigned long addr,
129 unsigned long pte, int wrprot) 129 unsigned long pte, int wrprot)
130{ 130{
131 int i;
132 unsigned long context = 0;
133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 131 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
132 unsigned long vsid;
133 int i;
134 134
135 if (REGION_ID(addr) == USER_REGION_ID)
136 context = mm->context.id;
137 i = batch->index; 135 i = batch->index;
138 136
139 /* 137 /*
@@ -143,17 +141,19 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 141 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 142 * will change mid stream.
145 */ 143 */
146 if (unlikely(i != 0 && context != batch->context)) { 144 if (unlikely(i != 0 && mm != batch->mm)) {
147 flush_tlb_pending(); 145 flush_tlb_pending();
148 i = 0; 146 i = 0;
149 } 147 }
150 148 if (i == 0)
151 if (i == 0) {
152 batch->context = context;
153 batch->mm = mm; 149 batch->mm = mm;
154 } 150 if (addr < KERNELBASE) {
151 vsid = get_vsid(mm->context.id, addr);
152 WARN_ON(vsid == 0);
153 } else
154 vsid = get_kernel_vsid(addr);
155 batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
155 batch->pte[i] = __pte(pte); 156 batch->pte[i] = __pte(pte);
156 batch->addr[i] = addr;
157 batch->index = ++i; 157 batch->index = ++i;
158 if (i >= PPC64_TLB_BATCH_NR) 158 if (i >= PPC64_TLB_BATCH_NR)
159 flush_tlb_pending(); 159 flush_tlb_pending();
@@ -175,10 +175,9 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
175 local = 1; 175 local = 1;
176 176
177 if (i == 1) 177 if (i == 1)
178 flush_hash_page(batch->context, batch->addr[0], batch->pte[0], 178 flush_hash_page(batch->vaddr[0], batch->pte[0], local);
179 local);
180 else 179 else
181 flush_hash_range(batch->context, i, local); 180 flush_hash_range(i, local);
182 batch->index = 0; 181 batch->index = 0;
183 put_cpu(); 182 put_cpu();
184} 183}
diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h
index 8027160ec96d..d35d9d3e44cf 100644
--- a/include/asm-ppc64/machdep.h
+++ b/include/asm-ppc64/machdep.h
@@ -56,9 +56,8 @@ struct machdep_calls {
56 unsigned long vflags, 56 unsigned long vflags,
57 unsigned long rflags); 57 unsigned long rflags);
58 long (*hpte_remove)(unsigned long hpte_group); 58 long (*hpte_remove)(unsigned long hpte_group);
59 void (*flush_hash_range)(unsigned long context, 59 void (*flush_hash_range)(unsigned long number, int local);
60 unsigned long number, 60
61 int local);
62 /* special for kexec, to be called in real mode, linar mapping is 61 /* special for kexec, to be called in real mode, linar mapping is
63 * destroyed as well */ 62 * destroyed as well */
64 void (*hpte_clear_all)(void); 63 void (*hpte_clear_all)(void);
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
index 45411a67e082..800bc0010cfb 100644
--- a/include/asm-ppc64/tlbflush.h
+++ b/include/asm-ppc64/tlbflush.h
@@ -20,10 +20,8 @@
20struct mm_struct; 20struct mm_struct;
21struct ppc64_tlb_batch { 21struct ppc64_tlb_batch {
22 unsigned long index; 22 unsigned long index;
23 unsigned long context;
24 struct mm_struct *mm; 23 struct mm_struct *mm;
25 pte_t pte[PPC64_TLB_BATCH_NR]; 24 pte_t pte[PPC64_TLB_BATCH_NR];
26 unsigned long addr[PPC64_TLB_BATCH_NR];
27 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 25 unsigned long vaddr[PPC64_TLB_BATCH_NR];
28}; 26};
29DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 27DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
@@ -47,8 +45,7 @@ static inline void flush_tlb_pending(void)
47#define flush_tlb_kernel_range(start, end) flush_tlb_pending() 45#define flush_tlb_kernel_range(start, end) flush_tlb_pending()
48#define flush_tlb_pgtables(mm, start, end) do { } while (0) 46#define flush_tlb_pgtables(mm, start, end) do { } while (0)
49 47
50extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, 48extern void flush_hash_page(unsigned long va, pte_t pte, int local);
51 int local); 49void flush_hash_range(unsigned long number, int local);
52void flush_hash_range(unsigned long context, unsigned long number, int local);
53 50
54#endif /* _PPC64_TLBFLUSH_H */ 51#endif /* _PPC64_TLBFLUSH_H */