diff options
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r-- | arch/powerpc/mm/slb.c | 69 |
1 files changed, 32 insertions, 37 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 304375a73574..a73d2d700973 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) | |||
53 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; | 53 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, | 56 | static inline void slb_shadow_update(unsigned long ea, |
57 | unsigned long flags, | ||
57 | unsigned long entry) | 58 | unsigned long entry) |
58 | { | 59 | { |
59 | /* | 60 | /* |
@@ -61,28 +62,16 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, | |||
61 | * updating it. | 62 | * updating it. |
62 | */ | 63 | */ |
63 | get_slb_shadow()->save_area[entry].esid = 0; | 64 | get_slb_shadow()->save_area[entry].esid = 0; |
64 | barrier(); | 65 | smp_wmb(); |
65 | get_slb_shadow()->save_area[entry].vsid = vsid; | 66 | get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); |
66 | barrier(); | 67 | smp_wmb(); |
67 | get_slb_shadow()->save_area[entry].esid = esid; | 68 | get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); |
68 | 69 | smp_wmb(); | |
69 | } | 70 | } |
70 | 71 | ||
71 | static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, | 72 | static inline void slb_shadow_clear(unsigned long entry) |
72 | unsigned long entry) | ||
73 | { | 73 | { |
74 | /* | 74 | get_slb_shadow()->save_area[entry].esid = 0; |
75 | * Updating the shadow buffer before writing the SLB ensures | ||
76 | * we don't get a stale entry here if we get preempted by PHYP | ||
77 | * between these two statements. | ||
78 | */ | ||
79 | slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), | ||
80 | entry); | ||
81 | |||
82 | asm volatile("slbmte %0,%1" : | ||
83 | : "r" (mk_vsid_data(ea, flags)), | ||
84 | "r" (mk_esid_data(ea, entry)) | ||
85 | : "memory" ); | ||
86 | } | 75 | } |
87 | 76 | ||
88 | void slb_flush_and_rebolt(void) | 77 | void slb_flush_and_rebolt(void) |
@@ -100,12 +89,13 @@ void slb_flush_and_rebolt(void) | |||
100 | vflags = SLB_VSID_KERNEL | vmalloc_llp; | 89 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
101 | 90 | ||
102 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | 91 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); |
103 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) | 92 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) { |
104 | ksp_esid_data &= ~SLB_ESID_V; | 93 | ksp_esid_data &= ~SLB_ESID_V; |
105 | 94 | slb_shadow_clear(2); | |
106 | /* Only third entry (stack) may change here so only resave that */ | 95 | } else { |
107 | slb_shadow_update(ksp_esid_data, | 96 | /* Update stack entry; others don't change */ |
108 | mk_vsid_data(ksp_esid_data, lflags), 2); | 97 | slb_shadow_update(get_paca()->kstack, lflags, 2); |
98 | } | ||
109 | 99 | ||
110 | /* We need to do this all in asm, so we're sure we don't touch | 100 | /* We need to do this all in asm, so we're sure we don't touch |
111 | * the stack between the slbia and rebolting it. */ | 101 | * the stack between the slbia and rebolting it. */ |
@@ -123,6 +113,15 @@ void slb_flush_and_rebolt(void) | |||
123 | : "memory"); | 113 | : "memory"); |
124 | } | 114 | } |
125 | 115 | ||
116 | void slb_vmalloc_update(void) | ||
117 | { | ||
118 | unsigned long vflags; | ||
119 | |||
120 | vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; | ||
121 | slb_shadow_update(VMALLOC_START, vflags, 1); | ||
122 | slb_flush_and_rebolt(); | ||
123 | } | ||
124 | |||
126 | /* Flush all user entries from the segment table of the current processor. */ | 125 | /* Flush all user entries from the segment table of the current processor. */ |
127 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 126 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
128 | { | 127 | { |
@@ -227,16 +226,12 @@ void slb_initialize(void) | |||
227 | vflags = SLB_VSID_KERNEL | vmalloc_llp; | 226 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
228 | 227 | ||
229 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ | 228 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ |
230 | asm volatile("isync":::"memory"); | 229 | slb_shadow_update(PAGE_OFFSET, lflags, 0); |
231 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | 230 | asm volatile("isync; slbia; sync; slbmte %0,%1; isync" :: |
232 | asm volatile("isync; slbia; isync":::"memory"); | 231 | "r" (get_slb_shadow()->save_area[0].vsid), |
233 | create_shadowed_slbe(PAGE_OFFSET, lflags, 0); | 232 | "r" (get_slb_shadow()->save_area[0].esid) : "memory"); |
234 | 233 | ||
235 | create_shadowed_slbe(VMALLOC_START, vflags, 1); | 234 | slb_shadow_update(VMALLOC_START, vflags, 1); |
236 | 235 | ||
237 | /* We don't bolt the stack for the time being - we're in boot, | 236 | slb_flush_and_rebolt(); |
238 | * so the stack is in the bolted segment. By the time it goes | ||
239 | * elsewhere, we'll call _switch() which will bolt in the new | ||
240 | * one. */ | ||
241 | asm volatile("isync":::"memory"); | ||
242 | } | 237 | } |