aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-09-06 00:59:47 -0400
committerPaul Mackerras <paulus@samba.org>2005-09-06 02:57:46 -0400
commit14b34661615ec036ab4c91637913706e4caccc93 (patch)
tree98915b1889422383a85186d8455ecca69fa2327b /arch/ppc64/mm/hugetlbpage.c
parent0fdf0b8634055b016f7b93cfcdea2eb9091f0271 (diff)
[PATCH] Invert sense of SLB class bit
Currently, we set the class bit in kernel SLB entries, and clear it on user SLB entries. On POWER5, ERAT entries created in real mode have the class bit clear. So to avoid flushing kernel ERAT entries on each context switch, this patch inverts our usage of the class bit, setting it on user SLB entries and clearing it on kernel SLB entries. Booted on POWER5 and G5. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64/mm/hugetlbpage.c')
-rw-r--r--arch/ppc64/mm/hugetlbpage.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index e7833c80eb68..338771ec70d7 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm)
144 for (i = 0; i < NUM_LOW_AREAS; i++) { 144 for (i = 0; i < NUM_LOW_AREAS; i++) {
145 if (! (areas & (1U << i))) 145 if (! (areas & (1U << i)))
146 continue; 146 continue;
147 asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); 147 asm volatile("slbie %0"
148 : : "r" ((i << SID_SHIFT) | SLBIE_C));
148 } 149 }
149 150
150 asm volatile("isync" : : : "memory"); 151 asm volatile("isync" : : : "memory");
@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm)
164 continue; 165 continue;
165 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) 166 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
166 asm volatile("slbie %0" 167 asm volatile("slbie %0"
167 :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT))); 168 :: "r" (((i << HTLB_AREA_SHIFT)
169 + (j << SID_SHIFT)) | SLBIE_C));
168 } 170 }
169 171
170 asm volatile("isync" : : : "memory"); 172 asm volatile("isync" : : : "memory");