diff options
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r-- | arch/powerpc/mm/slb.c | 73 |
1 files changed, 43 insertions, 30 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index ff1811ac6c81..6c164cec9d2c 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -43,30 +43,37 @@ static void slb_allocate(unsigned long ea) | |||
43 | slb_allocate_realmode(ea); | 43 | slb_allocate_realmode(ea); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) | 46 | static inline unsigned long mk_esid_data(unsigned long ea, int ssize, |
47 | unsigned long slot) | ||
47 | { | 48 | { |
48 | return (ea & ESID_MASK) | SLB_ESID_V | slot; | 49 | unsigned long mask; |
50 | |||
51 | mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T; | ||
52 | return (ea & mask) | SLB_ESID_V | slot; | ||
49 | } | 53 | } |
50 | 54 | ||
51 | static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) | 55 | #define slb_vsid_shift(ssize) \ |
56 | ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) | ||
57 | |||
58 | static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, | ||
59 | unsigned long flags) | ||
52 | { | 60 | { |
53 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; | 61 | return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags | |
62 | ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); | ||
54 | } | 63 | } |
55 | 64 | ||
56 | static inline void slb_shadow_update(unsigned long ea, | 65 | static inline void slb_shadow_update(unsigned long ea, int ssize, |
57 | unsigned long flags, | 66 | unsigned long flags, |
58 | unsigned long entry) | 67 | unsigned long entry) |
59 | { | 68 | { |
60 | /* | 69 | /* |
61 | * Clear the ESID first so the entry is not valid while we are | 70 | * Clear the ESID first so the entry is not valid while we are |
62 | * updating it. | 71 | * updating it. No write barriers are needed here, provided |
72 | * we only update the current CPU's SLB shadow buffer. | ||
63 | */ | 73 | */ |
64 | get_slb_shadow()->save_area[entry].esid = 0; | 74 | get_slb_shadow()->save_area[entry].esid = 0; |
65 | smp_wmb(); | 75 | get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); |
66 | get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); | 76 | get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); |
67 | smp_wmb(); | ||
68 | get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); | ||
69 | smp_wmb(); | ||
70 | } | 77 | } |
71 | 78 | ||
72 | static inline void slb_shadow_clear(unsigned long entry) | 79 | static inline void slb_shadow_clear(unsigned long entry) |
@@ -74,7 +81,8 @@ static inline void slb_shadow_clear(unsigned long entry) | |||
74 | get_slb_shadow()->save_area[entry].esid = 0; | 81 | get_slb_shadow()->save_area[entry].esid = 0; |
75 | } | 82 | } |
76 | 83 | ||
77 | static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, | 84 | static inline void create_shadowed_slbe(unsigned long ea, int ssize, |
85 | unsigned long flags, | ||
78 | unsigned long entry) | 86 | unsigned long entry) |
79 | { | 87 | { |
80 | /* | 88 | /* |
@@ -82,11 +90,11 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, | |||
82 | * we don't get a stale entry here if we get preempted by PHYP | 90 | * we don't get a stale entry here if we get preempted by PHYP |
83 | * between these two statements. | 91 | * between these two statements. |
84 | */ | 92 | */ |
85 | slb_shadow_update(ea, flags, entry); | 93 | slb_shadow_update(ea, ssize, flags, entry); |
86 | 94 | ||
87 | asm volatile("slbmte %0,%1" : | 95 | asm volatile("slbmte %0,%1" : |
88 | : "r" (mk_vsid_data(ea, flags)), | 96 | : "r" (mk_vsid_data(ea, ssize, flags)), |
89 | "r" (mk_esid_data(ea, entry)) | 97 | "r" (mk_esid_data(ea, ssize, entry)) |
90 | : "memory" ); | 98 | : "memory" ); |
91 | } | 99 | } |
92 | 100 | ||
@@ -95,7 +103,7 @@ void slb_flush_and_rebolt(void) | |||
95 | /* If you change this make sure you change SLB_NUM_BOLTED | 103 | /* If you change this make sure you change SLB_NUM_BOLTED |
96 | * appropriately too. */ | 104 | * appropriately too. */ |
97 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 105 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
98 | unsigned long ksp_esid_data; | 106 | unsigned long ksp_esid_data, ksp_vsid_data; |
99 | 107 | ||
100 | WARN_ON(!irqs_disabled()); | 108 | WARN_ON(!irqs_disabled()); |
101 | 109 | ||
@@ -104,13 +112,15 @@ void slb_flush_and_rebolt(void) | |||
104 | lflags = SLB_VSID_KERNEL | linear_llp; | 112 | lflags = SLB_VSID_KERNEL | linear_llp; |
105 | vflags = SLB_VSID_KERNEL | vmalloc_llp; | 113 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
106 | 114 | ||
107 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | 115 | ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2); |
108 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) { | 116 | if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { |
109 | ksp_esid_data &= ~SLB_ESID_V; | 117 | ksp_esid_data &= ~SLB_ESID_V; |
118 | ksp_vsid_data = 0; | ||
110 | slb_shadow_clear(2); | 119 | slb_shadow_clear(2); |
111 | } else { | 120 | } else { |
112 | /* Update stack entry; others don't change */ | 121 | /* Update stack entry; others don't change */ |
113 | slb_shadow_update(get_paca()->kstack, lflags, 2); | 122 | slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); |
123 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; | ||
114 | } | 124 | } |
115 | 125 | ||
116 | /* We need to do this all in asm, so we're sure we don't touch | 126 | /* We need to do this all in asm, so we're sure we don't touch |
@@ -122,9 +132,9 @@ void slb_flush_and_rebolt(void) | |||
122 | /* Slot 2 - kernel stack */ | 132 | /* Slot 2 - kernel stack */ |
123 | "slbmte %2,%3\n" | 133 | "slbmte %2,%3\n" |
124 | "isync" | 134 | "isync" |
125 | :: "r"(mk_vsid_data(VMALLOC_START, vflags)), | 135 | :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)), |
126 | "r"(mk_esid_data(VMALLOC_START, 1)), | 136 | "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)), |
127 | "r"(mk_vsid_data(ksp_esid_data, lflags)), | 137 | "r"(ksp_vsid_data), |
128 | "r"(ksp_esid_data) | 138 | "r"(ksp_esid_data) |
129 | : "memory"); | 139 | : "memory"); |
130 | } | 140 | } |
@@ -134,7 +144,7 @@ void slb_vmalloc_update(void) | |||
134 | unsigned long vflags; | 144 | unsigned long vflags; |
135 | 145 | ||
136 | vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; | 146 | vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; |
137 | slb_shadow_update(VMALLOC_START, vflags, 1); | 147 | slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1); |
138 | slb_flush_and_rebolt(); | 148 | slb_flush_and_rebolt(); |
139 | } | 149 | } |
140 | 150 | ||
@@ -142,7 +152,7 @@ void slb_vmalloc_update(void) | |||
142 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 152 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
143 | { | 153 | { |
144 | unsigned long offset = get_paca()->slb_cache_ptr; | 154 | unsigned long offset = get_paca()->slb_cache_ptr; |
145 | unsigned long esid_data = 0; | 155 | unsigned long slbie_data = 0; |
146 | unsigned long pc = KSTK_EIP(tsk); | 156 | unsigned long pc = KSTK_EIP(tsk); |
147 | unsigned long stack = KSTK_ESP(tsk); | 157 | unsigned long stack = KSTK_ESP(tsk); |
148 | unsigned long unmapped_base; | 158 | unsigned long unmapped_base; |
@@ -151,9 +161,12 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
151 | int i; | 161 | int i; |
152 | asm volatile("isync" : : : "memory"); | 162 | asm volatile("isync" : : : "memory"); |
153 | for (i = 0; i < offset; i++) { | 163 | for (i = 0; i < offset; i++) { |
154 | esid_data = ((unsigned long)get_paca()->slb_cache[i] | 164 | slbie_data = (unsigned long)get_paca()->slb_cache[i] |
155 | << SID_SHIFT) | SLBIE_C; | 165 | << SID_SHIFT; /* EA */ |
156 | asm volatile("slbie %0" : : "r" (esid_data)); | 166 | slbie_data |= user_segment_size(slbie_data) |
167 | << SLBIE_SSIZE_SHIFT; | ||
168 | slbie_data |= SLBIE_C; /* C set for user addresses */ | ||
169 | asm volatile("slbie %0" : : "r" (slbie_data)); | ||
157 | } | 170 | } |
158 | asm volatile("isync" : : : "memory"); | 171 | asm volatile("isync" : : : "memory"); |
159 | } else { | 172 | } else { |
@@ -162,7 +175,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
162 | 175 | ||
163 | /* Workaround POWER5 < DD2.1 issue */ | 176 | /* Workaround POWER5 < DD2.1 issue */ |
164 | if (offset == 1 || offset > SLB_CACHE_ENTRIES) | 177 | if (offset == 1 || offset > SLB_CACHE_ENTRIES) |
165 | asm volatile("slbie %0" : : "r" (esid_data)); | 178 | asm volatile("slbie %0" : : "r" (slbie_data)); |
166 | 179 | ||
167 | get_paca()->slb_cache_ptr = 0; | 180 | get_paca()->slb_cache_ptr = 0; |
168 | get_paca()->context = mm->context; | 181 | get_paca()->context = mm->context; |
@@ -245,9 +258,9 @@ void slb_initialize(void) | |||
245 | asm volatile("isync":::"memory"); | 258 | asm volatile("isync":::"memory"); |
246 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | 259 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); |
247 | asm volatile("isync; slbia; isync":::"memory"); | 260 | asm volatile("isync; slbia; isync":::"memory"); |
248 | create_shadowed_slbe(PAGE_OFFSET, lflags, 0); | 261 | create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); |
249 | 262 | ||
250 | create_shadowed_slbe(VMALLOC_START, vflags, 1); | 263 | create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); |
251 | 264 | ||
252 | /* We don't bolt the stack for the time being - we're in boot, | 265 | /* We don't bolt the stack for the time being - we're in boot, |
253 | * so the stack is in the bolted segment. By the time it goes | 266 | * so the stack is in the bolted segment. By the time it goes |