diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-09-06 00:59:47 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-06 02:57:46 -0400 |
commit | 14b34661615ec036ab4c91637913706e4caccc93 (patch) | |
tree | 98915b1889422383a85186d8455ecca69fa2327b /arch | |
parent | 0fdf0b8634055b016f7b93cfcdea2eb9091f0271 (diff) |
[PATCH] Invert sense of SLB class bit
Currently, we set the class bit in kernel SLB entries, and clear it on
user SLB entries. On POWER5, ERAT entries created in real mode have
the class bit clear. So to avoid flushing kernel ERAT entries on each
context switch, this patch inverts our usage of the class bit, setting
it on user SLB entries and clearing it on kernel SLB entries.
Booted on POWER5 and G5.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ppc64/kernel/entry.S | 11 | ||||
-rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 6 | ||||
-rw-r--r-- | arch/ppc64/mm/slb.c | 4 |
3 files changed, 11 insertions, 10 deletions
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S index b61572eb2a71..bf99b4a92f20 100644 --- a/arch/ppc64/kernel/entry.S +++ b/arch/ppc64/kernel/entry.S | |||
@@ -400,15 +400,14 @@ BEGIN_FTR_SECTION | |||
400 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | 400 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ |
401 | cror eq,4*cr1+eq,eq | 401 | cror eq,4*cr1+eq,eq |
402 | beq 2f /* if yes, don't slbie it */ | 402 | beq 2f /* if yes, don't slbie it */ |
403 | oris r0,r6,0x0800 /* set C (class) bit */ | ||
404 | 403 | ||
405 | /* Bolt in the new stack SLB entry */ | 404 | /* Bolt in the new stack SLB entry */ |
406 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | 405 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ |
407 | oris r6,r6,(SLB_ESID_V)@h | 406 | oris r0,r6,(SLB_ESID_V)@h |
408 | ori r6,r6,(SLB_NUM_BOLTED-1)@l | 407 | ori r0,r0,(SLB_NUM_BOLTED-1)@l |
409 | slbie r0 | 408 | slbie r6 |
410 | slbie r0 /* Workaround POWER5 < DD2.1 issue */ | 409 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ |
411 | slbmte r7,r6 | 410 | slbmte r7,r0 |
412 | isync | 411 | isync |
413 | 412 | ||
414 | 2: | 413 | 2: |
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index e7833c80eb68..338771ec70d7 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c | |||
@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm) | |||
144 | for (i = 0; i < NUM_LOW_AREAS; i++) { | 144 | for (i = 0; i < NUM_LOW_AREAS; i++) { |
145 | if (! (areas & (1U << i))) | 145 | if (! (areas & (1U << i))) |
146 | continue; | 146 | continue; |
147 | asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); | 147 | asm volatile("slbie %0" |
148 | : : "r" ((i << SID_SHIFT) | SLBIE_C)); | ||
148 | } | 149 | } |
149 | 150 | ||
150 | asm volatile("isync" : : : "memory"); | 151 | asm volatile("isync" : : : "memory"); |
@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm) | |||
164 | continue; | 165 | continue; |
165 | for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) | 166 | for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) |
166 | asm volatile("slbie %0" | 167 | asm volatile("slbie %0" |
167 | :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT))); | 168 | :: "r" (((i << HTLB_AREA_SHIFT) |
169 | + (j << SID_SHIFT)) | SLBIE_C)); | ||
168 | } | 170 | } |
169 | 171 | ||
170 | asm volatile("isync" : : : "memory"); | 172 | asm volatile("isync" : : : "memory"); |
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c index 244150a0bc18..0473953f6a37 100644 --- a/arch/ppc64/mm/slb.c +++ b/arch/ppc64/mm/slb.c | |||
@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
87 | int i; | 87 | int i; |
88 | asm volatile("isync" : : : "memory"); | 88 | asm volatile("isync" : : : "memory"); |
89 | for (i = 0; i < offset; i++) { | 89 | for (i = 0; i < offset; i++) { |
90 | esid_data = (unsigned long)get_paca()->slb_cache[i] | 90 | esid_data = ((unsigned long)get_paca()->slb_cache[i] |
91 | << SID_SHIFT; | 91 | << SID_SHIFT) | SLBIE_C; |
92 | asm volatile("slbie %0" : : "r" (esid_data)); | 92 | asm volatile("slbie %0" : : "r" (esid_data)); |
93 | } | 93 | } |
94 | asm volatile("isync" : : : "memory"); | 94 | asm volatile("isync" : : : "memory"); |