diff options
Diffstat (limited to 'arch/powerpc/mm')
| -rw-r--r-- | arch/powerpc/mm/slb.c | 37 | ||||
| -rw-r--r-- | arch/powerpc/mm/stab.c | 11 |
2 files changed, 36 insertions, 12 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5b7038f248b6..a685652effeb 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
| @@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
| 92 | : "memory" ); | 92 | : "memory" ); |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | void slb_flush_and_rebolt(void) | 95 | static void __slb_flush_and_rebolt(void) |
| 96 | { | 96 | { |
| 97 | /* If you change this make sure you change SLB_NUM_BOLTED | 97 | /* If you change this make sure you change SLB_NUM_BOLTED |
| 98 | * appropriately too. */ | 98 | * appropriately too. */ |
| 99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
| 100 | unsigned long ksp_esid_data, ksp_vsid_data; | 100 | unsigned long ksp_esid_data, ksp_vsid_data; |
| 101 | 101 | ||
| 102 | WARN_ON(!irqs_disabled()); | ||
| 103 | |||
| 104 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 102 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
| 105 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | 103 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
| 106 | lflags = SLB_VSID_KERNEL | linear_llp; | 104 | lflags = SLB_VSID_KERNEL | linear_llp; |
| @@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void) | |||
| 117 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; | 115 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; |
| 118 | } | 116 | } |
| 119 | 117 | ||
| 120 | /* | ||
| 121 | * We can't take a PMU exception in the following code, so hard | ||
| 122 | * disable interrupts. | ||
| 123 | */ | ||
| 124 | hard_irq_disable(); | ||
| 125 | |||
| 126 | /* We need to do this all in asm, so we're sure we don't touch | 118 | /* We need to do this all in asm, so we're sure we don't touch |
| 127 | * the stack between the slbia and rebolting it. */ | 119 | * the stack between the slbia and rebolting it. */ |
| 128 | asm volatile("isync\n" | 120 | asm volatile("isync\n" |
| @@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void) | |||
| 139 | : "memory"); | 131 | : "memory"); |
| 140 | } | 132 | } |
| 141 | 133 | ||
| 134 | void slb_flush_and_rebolt(void) | ||
| 135 | { | ||
| 136 | |||
| 137 | WARN_ON(!irqs_disabled()); | ||
| 138 | |||
| 139 | /* | ||
| 140 | * We can't take a PMU exception in the following code, so hard | ||
| 141 | * disable interrupts. | ||
| 142 | */ | ||
| 143 | hard_irq_disable(); | ||
| 144 | |||
| 145 | __slb_flush_and_rebolt(); | ||
| 146 | get_paca()->slb_cache_ptr = 0; | ||
| 147 | } | ||
| 148 | |||
| 142 | void slb_vmalloc_update(void) | 149 | void slb_vmalloc_update(void) |
| 143 | { | 150 | { |
| 144 | unsigned long vflags; | 151 | unsigned long vflags; |
| @@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
| 180 | /* Flush all user entries from the segment table of the current processor. */ | 187 | /* Flush all user entries from the segment table of the current processor. */ |
| 181 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 188 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
| 182 | { | 189 | { |
| 183 | unsigned long offset = get_paca()->slb_cache_ptr; | 190 | unsigned long offset; |
| 184 | unsigned long slbie_data = 0; | 191 | unsigned long slbie_data = 0; |
| 185 | unsigned long pc = KSTK_EIP(tsk); | 192 | unsigned long pc = KSTK_EIP(tsk); |
| 186 | unsigned long stack = KSTK_ESP(tsk); | 193 | unsigned long stack = KSTK_ESP(tsk); |
| 187 | unsigned long unmapped_base; | 194 | unsigned long unmapped_base; |
| 188 | 195 | ||
| 196 | /* | ||
| 197 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
| 198 | * so that a PMU interrupt can't occur, which might try to access | ||
| 199 | * user memory (to get a stack trace) and possible cause an SLB miss | ||
| 200 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | ||
| 201 | */ | ||
| 202 | hard_irq_disable(); | ||
| 203 | offset = get_paca()->slb_cache_ptr; | ||
| 189 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 204 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && |
| 190 | offset <= SLB_CACHE_ENTRIES) { | 205 | offset <= SLB_CACHE_ENTRIES) { |
| 191 | int i; | 206 | int i; |
| @@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
| 200 | } | 215 | } |
| 201 | asm volatile("isync" : : : "memory"); | 216 | asm volatile("isync" : : : "memory"); |
| 202 | } else { | 217 | } else { |
| 203 | slb_flush_and_rebolt(); | 218 | __slb_flush_and_rebolt(); |
| 204 | } | 219 | } |
| 205 | 220 | ||
| 206 | /* Workaround POWER5 < DD2.1 issue */ | 221 | /* Workaround POWER5 < DD2.1 issue */ |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 98cd1dc2ae75..ab5fb48b3e90 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
| @@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
| 164 | { | 164 | { |
| 165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | 165 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; |
| 166 | struct stab_entry *ste; | 166 | struct stab_entry *ste; |
| 167 | unsigned long offset = __get_cpu_var(stab_cache_ptr); | 167 | unsigned long offset; |
| 168 | unsigned long pc = KSTK_EIP(tsk); | 168 | unsigned long pc = KSTK_EIP(tsk); |
| 169 | unsigned long stack = KSTK_ESP(tsk); | 169 | unsigned long stack = KSTK_ESP(tsk); |
| 170 | unsigned long unmapped_base; | 170 | unsigned long unmapped_base; |
| @@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
| 172 | /* Force previous translations to complete. DRENG */ | 172 | /* Force previous translations to complete. DRENG */ |
| 173 | asm volatile("isync" : : : "memory"); | 173 | asm volatile("isync" : : : "memory"); |
| 174 | 174 | ||
| 175 | /* | ||
| 176 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
| 177 | * so that a PMU interrupt can't occur, which might try to access | ||
| 178 | * user memory (to get a stack trace) and possible cause an STAB miss | ||
| 179 | * which would update the stab_cache/stab_cache_ptr per-cpu variables. | ||
| 180 | */ | ||
| 181 | hard_irq_disable(); | ||
| 182 | |||
| 183 | offset = __get_cpu_var(stab_cache_ptr); | ||
| 175 | if (offset <= NR_STAB_CACHE_ENTRIES) { | 184 | if (offset <= NR_STAB_CACHE_ENTRIES) { |
| 176 | int i; | 185 | int i; |
| 177 | 186 | ||
