aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2007-08-24 02:58:37 -0400
committerPaul Mackerras <paulus@samba.org>2007-09-19 00:40:54 -0400
commit00efee7d5d0d7888aafbf0d2de76943ee8aca47a (patch)
tree672112cc5404b389f0547b4f5428ec06ca1ca122 /arch
parent61a564fd2e7ab13ab11a6ce8305433baacf344ef (diff)
[POWERPC] Remove barriers from the SLB shadow buffer update
After talking to an IBM POWER hypervisor (PHYP) design and development guy, there seems to be no need for memory barriers when updating the SLB shadow buffer provided we only update it from the current CPU, which we do. Also, these guys see no need in the future for these barriers. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/mm/slb.c6
2 files changed, 6 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 952eba6701f4..fbbd3f6f0064 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -385,15 +385,15 @@ BEGIN_FTR_SECTION
385 oris r0,r6,(SLB_ESID_V)@h 385 oris r0,r6,(SLB_ESID_V)@h
386 ori r0,r0,(SLB_NUM_BOLTED-1)@l 386 ori r0,r0,(SLB_NUM_BOLTED-1)@l
387 387
388 /* Update the last bolted SLB */ 388 /* Update the last bolted SLB. No write barriers are needed
389 * here, provided we only update the current CPU's SLB shadow
390 * buffer.
391 */
389 ld r9,PACA_SLBSHADOWPTR(r13) 392 ld r9,PACA_SLBSHADOWPTR(r13)
390 li r12,0 393 li r12,0
391 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 394 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
392 eieio
393 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 395 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
394 eieio
395 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 396 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
396 eieio
397 397
398 slbie r6 398 slbie r6
399 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 399 slbie r6 /* Workaround POWER5 < DD2.1 issue */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index ff1811ac6c81..4bee1cfa9dea 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -59,14 +59,12 @@ static inline void slb_shadow_update(unsigned long ea,
59{ 59{
60 /* 60 /*
61 * Clear the ESID first so the entry is not valid while we are 61 * Clear the ESID first so the entry is not valid while we are
62 * updating it. 62 * updating it. No write barriers are needed here, provided
63 * we only update the current CPU's SLB shadow buffer.
63 */ 64 */
64 get_slb_shadow()->save_area[entry].esid = 0; 65 get_slb_shadow()->save_area[entry].esid = 0;
65 smp_wmb();
66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); 66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
67 smp_wmb();
68 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); 67 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
69 smp_wmb();
70} 68}
71 69
72static inline void slb_shadow_clear(unsigned long entry) 70static inline void slb_shadow_clear(unsigned long entry)