aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c35
1 files changed, 14 insertions, 21 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index c3fdf2969d9f..bc3914d54e26 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -19,6 +19,7 @@
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/paca.h> 21#include <asm/paca.h>
22#include <asm/ppc-opcode.h>
22#include <asm/cputable.h> 23#include <asm/cputable.h>
23#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
24#include <asm/smp.h> 25#include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
58 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 59 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
59} 60}
60 61
61static void assert_slb_exists(unsigned long ea) 62static void assert_slb_presence(bool present, unsigned long ea)
62{ 63{
63#ifdef CONFIG_DEBUG_VM 64#ifdef CONFIG_DEBUG_VM
64 unsigned long tmp; 65 unsigned long tmp;
65 66
66 WARN_ON_ONCE(mfmsr() & MSR_EE); 67 WARN_ON_ONCE(mfmsr() & MSR_EE);
67 68
68 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 69 if (!cpu_has_feature(CPU_FTR_ARCH_206))
69 WARN_ON(tmp == 0); 70 return;
70#endif
71}
72
73static void assert_slb_notexists(unsigned long ea)
74{
75#ifdef CONFIG_DEBUG_VM
76 unsigned long tmp;
77 71
78 WARN_ON_ONCE(mfmsr() & MSR_EE); 72 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
79 73
80 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 74 WARN_ON(present == (tmp == 0));
81 WARN_ON(tmp != 0);
82#endif 75#endif
83} 76}
84 77
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
114 */ 107 */
115 slb_shadow_update(ea, ssize, flags, index); 108 slb_shadow_update(ea, ssize, flags, index);
116 109
117 assert_slb_notexists(ea); 110 assert_slb_presence(false, ea);
118 asm volatile("slbmte %0,%1" : 111 asm volatile("slbmte %0,%1" :
119 : "r" (mk_vsid_data(ea, ssize, flags)), 112 : "r" (mk_vsid_data(ea, ssize, flags)),
120 "r" (mk_esid_data(ea, ssize, index)) 113 "r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
137 "r" (be64_to_cpu(p->save_area[index].esid))); 130 "r" (be64_to_cpu(p->save_area[index].esid)));
138 } 131 }
139 132
140 assert_slb_exists(local_paca->kstack); 133 assert_slb_presence(true, local_paca->kstack);
141} 134}
142 135
143/* 136/*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
185 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), 178 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
186 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) 179 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
187 : "memory"); 180 : "memory");
188 assert_slb_exists(get_paca()->kstack); 181 assert_slb_presence(true, get_paca()->kstack);
189 182
190 get_paca()->slb_cache_ptr = 0; 183 get_paca()->slb_cache_ptr = 0;
191 184
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
443 ea = (unsigned long) 436 ea = (unsigned long)
444 get_paca()->slb_cache[i] << SID_SHIFT; 437 get_paca()->slb_cache[i] << SID_SHIFT;
445 /* 438 /*
446 * Could assert_slb_exists here, but hypervisor 439 * Could assert_slb_presence(true) here, but
447 * or machine check could have come in and 440 * hypervisor or machine check could have come
448 * removed the entry at this point. 441 * in and removed the entry at this point.
449 */ 442 */
450 443
451 slbie_data = ea; 444 slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
676 * User preloads should add isync afterwards in case the kernel 669 * User preloads should add isync afterwards in case the kernel
677 * accesses user memory before it returns to userspace with rfid. 670 * accesses user memory before it returns to userspace with rfid.
678 */ 671 */
679 assert_slb_notexists(ea); 672 assert_slb_presence(false, ea);
680 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); 673 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
681 674
682 barrier(); 675 barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
715 return -EFAULT; 708 return -EFAULT;
716 709
717 if (ea < H_VMALLOC_END) 710 if (ea < H_VMALLOC_END)
718 flags = get_paca()->vmalloc_sllp; 711 flags = local_paca->vmalloc_sllp;
719 else 712 else
720 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; 713 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
721 } else { 714 } else {