aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-12-09 00:45:17 -0500
committerPaul Mackerras <paulus@samba.org>2005-12-09 00:57:35 -0500
commit23ed6cb9a237902cce6018a24d1993c346abddb4 (patch)
treee74b61d6da509fe85f5ec028a47c7e952bdae71a /arch/powerpc
parentcbf52afdc0eb88492cf7808cc4b4f58a46f1b1ad (diff)
[PATCH] powerpc: Fix SLB flushing path in hugepage
On ppc64, when opening a new hugepage region, we need to make sure any old normal-page SLBs for the area are flushed on all CPUs. There was a bug in this logic - after putting the new hugepage area masks into the thread structure, we copied it into the paca (read by the SLB miss handler) only on one CPU, not on all. This could cause incorrect SLB entries to be loaded when a multithreaded program was running simultaneously on several CPUs. This patch corrects the error, copying the context information into the PACA on all CPUs using the mm in question before flushing any existing SLB entries. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 97512b89e7b..54131b877da 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
148 return 0; 148 return 0;
149} 149}
150 150
151struct slb_flush_info {
152 struct mm_struct *mm;
153 u16 newareas;
154};
155
151static void flush_low_segments(void *parm) 156static void flush_low_segments(void *parm)
152{ 157{
153 u16 areas = (unsigned long) parm; 158 struct slb_flush_info *fi = parm;
154 unsigned long i; 159 unsigned long i;
155 160
156 asm volatile("isync" : : : "memory"); 161 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
162
163 if (current->active_mm != fi->mm)
164 return;
157 165
158 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); 166 /* Only need to do anything if this CPU is working in the same
167 * mm as the one which has changed */
168
169 /* update the paca copy of the context struct */
170 get_paca()->context = current->active_mm->context;
159 171
172 asm volatile("isync" : : : "memory");
160 for (i = 0; i < NUM_LOW_AREAS; i++) { 173 for (i = 0; i < NUM_LOW_AREAS; i++) {
161 if (! (areas & (1U << i))) 174 if (! (fi->newareas & (1U << i)))
162 continue; 175 continue;
163 asm volatile("slbie %0" 176 asm volatile("slbie %0"
164 : : "r" ((i << SID_SHIFT) | SLBIE_C)); 177 : : "r" ((i << SID_SHIFT) | SLBIE_C));
165 } 178 }
166
167 asm volatile("isync" : : : "memory"); 179 asm volatile("isync" : : : "memory");
168} 180}
169 181
170static void flush_high_segments(void *parm) 182static void flush_high_segments(void *parm)
171{ 183{
172 u16 areas = (unsigned long) parm; 184 struct slb_flush_info *fi = parm;
173 unsigned long i, j; 185 unsigned long i, j;
174 186
175 asm volatile("isync" : : : "memory");
176 187
177 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); 188 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
189
190 if (current->active_mm != fi->mm)
191 return;
192
193 /* Only need to do anything if this CPU is working in the same
194 * mm as the one which has changed */
178 195
196 /* update the paca copy of the context struct */
197 get_paca()->context = current->active_mm->context;
198
199 asm volatile("isync" : : : "memory");
179 for (i = 0; i < NUM_HIGH_AREAS; i++) { 200 for (i = 0; i < NUM_HIGH_AREAS; i++) {
180 if (! (areas & (1U << i))) 201 if (! (fi->newareas & (1U << i)))
181 continue; 202 continue;
182 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) 203 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
183 asm volatile("slbie %0" 204 asm volatile("slbie %0"
184 :: "r" (((i << HTLB_AREA_SHIFT) 205 :: "r" (((i << HTLB_AREA_SHIFT)
185 + (j << SID_SHIFT)) | SLBIE_C)); 206 + (j << SID_SHIFT)) | SLBIE_C));
186 } 207 }
187
188 asm volatile("isync" : : : "memory"); 208 asm volatile("isync" : : : "memory");
189} 209}
190 210
@@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
229static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) 249static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
230{ 250{
231 unsigned long i; 251 unsigned long i;
252 struct slb_flush_info fi;
232 253
233 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); 254 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
234 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); 255 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
@@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
244 265
245 mm->context.low_htlb_areas |= newareas; 266 mm->context.low_htlb_areas |= newareas;
246 267
247 /* update the paca copy of the context struct */
248 get_paca()->context = mm->context;
249
250 /* the context change must make it to memory before the flush, 268 /* the context change must make it to memory before the flush,
251 * so that further SLB misses do the right thing. */ 269 * so that further SLB misses do the right thing. */
252 mb(); 270 mb();
253 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); 271
272 fi.mm = mm;
273 fi.newareas = newareas;
274 on_each_cpu(flush_low_segments, &fi, 0, 1);
254 275
255 return 0; 276 return 0;
256} 277}
257 278
258static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) 279static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
259{ 280{
281 struct slb_flush_info fi;
260 unsigned long i; 282 unsigned long i;
261 283
262 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); 284 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
@@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
280 /* the context change must make it to memory before the flush, 302 /* the context change must make it to memory before the flush,
281 * so that further SLB misses do the right thing. */ 303 * so that further SLB misses do the right thing. */
282 mb(); 304 mb();
283 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); 305
306 fi.mm = mm;
307 fi.newareas = newareas;
308 on_each_cpu(flush_high_segments, &fi, 0, 1);
284 309
285 return 0; 310 return 0;
286} 311}