aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-10-31 01:42:19 -0400
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2007-11-01 08:15:30 -0400
commitb98ac05d5e460301fbea24cceed0f2a601c82e22 (patch)
tree2e556ad28a007d13339300fbbd4942d0ec9f023c
parente701d269aa28996f3502780951fe1b12d5d66b49 (diff)
[POWERPC] 4xx: Deal with 44x virtually tagged icache
The 44x family has an interesting "feature" which is a virtually tagged instruction cache (yuck !). So far, we haven't dealt with it properly, which means we've been mostly lucky or people didn't report the problems, unless people have been running custom patches in their distro... This is an attempt at fixing it properly. I chose to do it by setting a global flag whenever we change a PTE that was previously marked executable, and flush the entire instruction cache upon return to user space when that happens. This is a bit heavy handed, but it's hard to do more fine grained flushes as the icbi instruction, on those processor, for some very strange reasons (since the cache is virtually mapped) still requires a valid TLB entry for reading in the target address space, which isn't something I want to deal with. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
-rw-r--r--arch/powerpc/kernel/entry_32.S23
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/mm/44x_mmu.c1
-rw-r--r--arch/ppc/kernel/entry.S23
-rw-r--r--arch/ppc/kernel/misc.S9
-rw-r--r--arch/ppc/mm/44x_mmu.c1
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h13
7 files changed, 79 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 21d889e63e87..a7572cf464bd 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -244,6 +244,13 @@ syscall_exit_cont:
244 andis. r10,r0,DBCR0_IC@h 244 andis. r10,r0,DBCR0_IC@h
245 bnel- load_dbcr0 245 bnel- load_dbcr0
246#endif 246#endif
247#ifdef CONFIG_44x
248 lis r4,icache_44x_need_flush@ha
249 lwz r5,icache_44x_need_flush@l(r4)
250 cmplwi cr0,r5,0
251 bne- 2f
2521:
253#endif /* CONFIG_44x */
247 stwcx. r0,0,r1 /* to clear the reservation */ 254 stwcx. r0,0,r1 /* to clear the reservation */
248 lwz r4,_LINK(r1) 255 lwz r4,_LINK(r1)
249 lwz r5,_CCR(r1) 256 lwz r5,_CCR(r1)
@@ -258,6 +265,12 @@ syscall_exit_cont:
258 mtspr SPRN_SRR1,r8 265 mtspr SPRN_SRR1,r8
259 SYNC 266 SYNC
260 RFI 267 RFI
268#ifdef CONFIG_44x
2692: li r7,0
270 iccci r0,r0
271 stw r7,icache_44x_need_flush@l(r4)
272 b 1b
273#endif /* CONFIG_44x */
261 274
26266: li r3,-ENOSYS 27566: li r3,-ENOSYS
263 b ret_from_syscall 276 b ret_from_syscall
@@ -683,6 +696,16 @@ resume_kernel:
683 696
684 /* interrupts are hard-disabled at this point */ 697 /* interrupts are hard-disabled at this point */
685restore: 698restore:
699#ifdef CONFIG_44x
700 lis r4,icache_44x_need_flush@ha
701 lwz r5,icache_44x_need_flush@l(r4)
702 cmplwi cr0,r5,0
703 beq+ 1f
704 li r6,0
705 iccci r0,r0
706 stw r6,icache_44x_need_flush@l(r4)
7071:
708#endif /* CONFIG_44x */
686 lwz r0,GPR0(r1) 709 lwz r0,GPR0(r1)
687 lwz r2,GPR2(r1) 710 lwz r2,GPR2(r1)
688 REST_4GPRS(3, r1) 711 REST_4GPRS(3, r1)
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 0ed2c7eddc9e..8b642ab26d37 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -543,12 +543,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
543 addi r3,r3,L1_CACHE_BYTES 543 addi r3,r3,L1_CACHE_BYTES
544 bdnz 0b 544 bdnz 0b
545 sync 545 sync
546#ifndef CONFIG_44x
547 /* We don't flush the icache on 44x. Those have a virtual icache
548 * and we don't have access to the virtual address here (it's
549 * not the page vaddr but where it's mapped in user space). The
550 * flushing of the icache on these is handled elsewhere, when
551 * a change in the address space occurs, before returning to
552 * user space
553 */
546 mtctr r4 554 mtctr r4
5471: icbi 0,r6 5551: icbi 0,r6
548 addi r6,r6,L1_CACHE_BYTES 556 addi r6,r6,L1_CACHE_BYTES
549 bdnz 1b 557 bdnz 1b
550 sync 558 sync
551 isync 559 isync
560#endif /* CONFIG_44x */
552 blr 561 blr
553 562
554/* 563/*
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index c3df50476539..04dc08798d3d 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -35,6 +35,7 @@
35 */ 35 */
36unsigned int tlb_44x_index; /* = 0 */ 36unsigned int tlb_44x_index; /* = 0 */
37unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; 37unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
38int icache_44x_need_flush;
38 39
39/* 40/*
40 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem 41 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index fba7ca17a67e..b19bfef2034d 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -244,6 +244,13 @@ syscall_exit_cont:
244 andis. r10,r0,DBCR0_IC@h 244 andis. r10,r0,DBCR0_IC@h
245 bnel- load_dbcr0 245 bnel- load_dbcr0
246#endif 246#endif
247#ifdef CONFIG_44x
248 lis r4,icache_44x_need_flush@ha
249 lwz r5,icache_44x_need_flush@l(r4)
250 cmplwi cr0,r5,0
251 bne- 2f
2521:
253#endif /* CONFIG_44x */
247 stwcx. r0,0,r1 /* to clear the reservation */ 254 stwcx. r0,0,r1 /* to clear the reservation */
248 lwz r4,_LINK(r1) 255 lwz r4,_LINK(r1)
249 lwz r5,_CCR(r1) 256 lwz r5,_CCR(r1)
@@ -258,6 +265,12 @@ syscall_exit_cont:
258 mtspr SPRN_SRR1,r8 265 mtspr SPRN_SRR1,r8
259 SYNC 266 SYNC
260 RFI 267 RFI
268#ifdef CONFIG_44x
2692: li r7,0
270 iccci r0,r0
271 stw r7,icache_44x_need_flush@l(r4)
272 b 1b
273#endif /* CONFIG_44x */
261 274
26266: li r3,-ENOSYS 27566: li r3,-ENOSYS
263 b ret_from_syscall 276 b ret_from_syscall
@@ -679,6 +692,16 @@ resume_kernel:
679 692
680 /* interrupts are hard-disabled at this point */ 693 /* interrupts are hard-disabled at this point */
681restore: 694restore:
695#ifdef CONFIG_44x
696 lis r4,icache_44x_need_flush@ha
697 lwz r5,icache_44x_need_flush@l(r4)
698 cmplwi cr0,r5,0
699 beq+ 1f
700 li r6,0
701 iccci r0,r0
702 stw r6,icache_44x_need_flush@l(r4)
7031:
704#endif /* CONFIG_44x */
682 lwz r0,GPR0(r1) 705 lwz r0,GPR0(r1)
683 lwz r2,GPR2(r1) 706 lwz r2,GPR2(r1)
684 REST_4GPRS(3, r1) 707 REST_4GPRS(3, r1)
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 2b81e71d6b2d..e0c850d85c53 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -499,12 +499,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
499 addi r3,r3,L1_CACHE_BYTES 499 addi r3,r3,L1_CACHE_BYTES
500 bdnz 0b 500 bdnz 0b
501 sync 501 sync
502#ifndef CONFIG_44x
503 /* We don't flush the icache on 44x. Those have a virtual icache
504 * and we don't have access to the virtual address here (it's
505 * not the page vaddr but where it's mapped in user space). The
506 * flushing of the icache on these is handled elsewhere, when
507 * a change in the address space occurs, before returning to
508 * user space
509 */
502 mtctr r4 510 mtctr r4
5031: icbi 0,r6 5111: icbi 0,r6
504 addi r6,r6,L1_CACHE_BYTES 512 addi r6,r6,L1_CACHE_BYTES
505 bdnz 1b 513 bdnz 1b
506 sync 514 sync
507 isync 515 isync
516#endif /* CONFIG_44x */
508 blr 517 blr
509 518
510/* 519/*
diff --git a/arch/ppc/mm/44x_mmu.c b/arch/ppc/mm/44x_mmu.c
index 0a0a0487b334..6536a25cfcb8 100644
--- a/arch/ppc/mm/44x_mmu.c
+++ b/arch/ppc/mm/44x_mmu.c
@@ -61,6 +61,7 @@ extern char etext[], _stext[];
61 */ 61 */
62unsigned int tlb_44x_index = 0; 62unsigned int tlb_44x_index = 0;
63unsigned int tlb_44x_hwater = 62; 63unsigned int tlb_44x_hwater = 62;
64int icache_44x_need_flush;
64 65
65/* 66/*
66 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem 67 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 86a54a4a8a2a..fea2d8ff1e73 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -11,6 +11,11 @@
11extern unsigned long va_to_phys(unsigned long address); 11extern unsigned long va_to_phys(unsigned long address);
12extern pte_t *va_to_pte(unsigned long address); 12extern pte_t *va_to_pte(unsigned long address);
13extern unsigned long ioremap_bot, ioremap_base; 13extern unsigned long ioremap_bot, ioremap_base;
14
15#ifdef CONFIG_44x
16extern int icache_44x_need_flush;
17#endif
18
14#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
15 20
16/* 21/*
@@ -562,6 +567,10 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
562 : "=&r" (old), "=&r" (tmp), "=m" (*p) 567 : "=&r" (old), "=&r" (tmp), "=m" (*p)
563 : "r" (p), "r" (clr), "r" (set), "m" (*p) 568 : "r" (p), "r" (clr), "r" (set), "m" (*p)
564 : "cc" ); 569 : "cc" );
570#ifdef CONFIG_44x
571 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
572 icache_44x_need_flush = 1;
573#endif
565 return old; 574 return old;
566} 575}
567#else 576#else
@@ -582,6 +591,10 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
582 : "=&r" (old), "=&r" (tmp), "=m" (*p) 591 : "=&r" (old), "=&r" (tmp), "=m" (*p)
583 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) 592 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
584 : "cc" ); 593 : "cc" );
594#ifdef CONFIG_44x
595 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
596 icache_44x_need_flush = 1;
597#endif
585 return old; 598 return old;
586} 599}
587#endif 600#endif