aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-09-28 00:45:45 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-28 10:46:42 -0400
commit0f9578b70a9f112bfb541e1d5ab486a376e64503 (patch)
tree00e93df9f9920c43ace34e28298255dc8a0f9263
parent485ef69edefd7fc7f351c94d0d77b3ed8a242f7b (diff)
[PATCH] ppc64: More hugepage fixes
My previous patch fixing invalidation of huge PTEs wasn't good enough, we still had an issue if a PTE invalidation batch contained both small and large pages. This patch fixes this by making sure the batch is flushed if the page size fed to it changes. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ppc64/mm/hash_native.c5
-rw-r--r--arch/ppc64/mm/tlb.c4
-rw-r--r--include/asm-ppc64/tlbflush.h1
3 files changed, 6 insertions, 4 deletions
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index eb1bbb5b6c16..bfd385b7713c 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -343,7 +343,7 @@ static void native_flush_hash_range(unsigned long context,
343 hpte_t *hptep; 343 hpte_t *hptep;
344 unsigned long hpte_v; 344 unsigned long hpte_v;
345 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 345 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
346 unsigned long large; 346 unsigned long large = batch->large;
347 347
348 local_irq_save(flags); 348 local_irq_save(flags);
349 349
@@ -356,7 +356,6 @@ static void native_flush_hash_range(unsigned long context,
356 356
357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff); 357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
358 batch->vaddr[j] = va; 358 batch->vaddr[j] = va;
359 large = pte_huge(batch->pte[i]);
360 if (large) 359 if (large)
361 vpn = va >> HPAGE_SHIFT; 360 vpn = va >> HPAGE_SHIFT;
362 else 361 else
@@ -406,7 +405,7 @@ static void native_flush_hash_range(unsigned long context,
406 asm volatile("ptesync":::"memory"); 405 asm volatile("ptesync":::"memory");
407 406
408 for (i = 0; i < j; i++) 407 for (i = 0; i < j; i++)
409 __tlbie(batch->vaddr[i], 0); 408 __tlbie(batch->vaddr[i], large);
410 409
411 asm volatile("eieio; tlbsync; ptesync":::"memory"); 410 asm volatile("eieio; tlbsync; ptesync":::"memory");
412 411
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index d8a6593a13f0..21fbffb23a43 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 143 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 144 * will change mid stream.
145 */ 145 */
146 if (unlikely(i != 0 && context != batch->context)) { 146 if (i != 0 && (context != batch->context ||
147 batch->large != pte_huge(pte))) {
147 flush_tlb_pending(); 148 flush_tlb_pending();
148 i = 0; 149 i = 0;
149 } 150 }
@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
151 if (i == 0) { 152 if (i == 0) {
152 batch->context = context; 153 batch->context = context;
153 batch->mm = mm; 154 batch->mm = mm;
155 batch->large = pte_huge(pte);
154 } 156 }
155 batch->pte[i] = __pte(pte); 157 batch->pte[i] = __pte(pte);
156 batch->addr[i] = addr; 158 batch->addr[i] = addr;
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
index 45411a67e082..74271d7c1d16 100644
--- a/include/asm-ppc64/tlbflush.h
+++ b/include/asm-ppc64/tlbflush.h
@@ -25,6 +25,7 @@ struct ppc64_tlb_batch {
25 pte_t pte[PPC64_TLB_BATCH_NR]; 25 pte_t pte[PPC64_TLB_BATCH_NR];
26 unsigned long addr[PPC64_TLB_BATCH_NR]; 26 unsigned long addr[PPC64_TLB_BATCH_NR];
27 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 27 unsigned long vaddr[PPC64_TLB_BATCH_NR];
28 unsigned int large;
28}; 29};
29DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 30DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
30 31