aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-09-14 11:30:52 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2018-09-19 08:01:46 -0400
commit8fed04d0f6aedf99b3d811ba58d38bb7f938a47a (patch)
treec0119afa2bf361697e2a2690157854e1496cef1c
parent5e46e29e6a977a71f6b5bed414b7bcdbff5a6a43 (diff)
powerpc/64s/hash: remove user SLB data from the paca
User SLB mappig data is copied into the PACA from the mm->context so it can be accessed by the SLB miss handlers. After the C conversion, SLB miss handlers now run with relocation on, and user SLB misses are able to take recursive kernel SLB misses, so the user SLB mapping data can be removed from the paca and accessed directly. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h1
-rw-r--r--arch/powerpc/include/asm/paca.h13
-rw-r--r--arch/powerpc/kernel/asm-offsets.c9
-rw-r--r--arch/powerpc/kernel/paca.c22
-rw-r--r--arch/powerpc/mm/hash_utils_64.c46
-rw-r--r--arch/powerpc/mm/mmu_context.c3
-rw-r--r--arch/powerpc/mm/slb.c20
-rw-r--r--arch/powerpc/mm/slice.c29
8 files changed, 40 insertions, 103 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index e0e4ce8f77d6..d3064c7d1b1f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -501,6 +501,7 @@ struct slb_entry {
501}; 501};
502 502
503extern void slb_initialize(void); 503extern void slb_initialize(void);
504extern void core_flush_all_slbs(struct mm_struct *mm);
504extern void slb_flush_and_rebolt(void); 505extern void slb_flush_and_rebolt(void);
505void slb_flush_all_realmode(void); 506void slb_flush_all_realmode(void);
506void __slb_restore_bolted_realmode(void); 507void __slb_restore_bolted_realmode(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 7b6e23af3808..8144d673541a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -143,18 +143,6 @@ struct paca_struct {
143 struct tlb_core_data tcd; 143 struct tlb_core_data tcd;
144#endif /* CONFIG_PPC_BOOK3E */ 144#endif /* CONFIG_PPC_BOOK3E */
145 145
146#ifdef CONFIG_PPC_BOOK3S
147 mm_context_id_t mm_ctx_id;
148#ifdef CONFIG_PPC_MM_SLICES
149 unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
150 unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
151 unsigned long mm_ctx_slb_addr_limit;
152#else
153 u16 mm_ctx_user_psize;
154 u16 mm_ctx_sllp;
155#endif
156#endif
157
158 /* 146 /*
159 * then miscellaneous read-write fields 147 * then miscellaneous read-write fields
160 */ 148 */
@@ -258,7 +246,6 @@ struct paca_struct {
258#endif /* CONFIG_PPC_BOOK3S_64 */ 246#endif /* CONFIG_PPC_BOOK3S_64 */
259} ____cacheline_aligned; 247} ____cacheline_aligned;
260 248
261extern void copy_mm_to_paca(struct mm_struct *mm);
262extern struct paca_struct **paca_ptrs; 249extern struct paca_struct **paca_ptrs;
263extern void initialise_paca(struct paca_struct *new_paca, int cpu); 250extern void initialise_paca(struct paca_struct *new_paca, int cpu);
264extern void setup_paca(struct paca_struct *new_paca); 251extern void setup_paca(struct paca_struct *new_paca);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 89cf15566c4e..ce3ac40fd96e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -181,15 +181,6 @@ int main(void)
181 OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask); 181 OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
182 OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); 182 OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
183 OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled); 183 OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
184#ifdef CONFIG_PPC_BOOK3S
185 OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
186#ifdef CONFIG_PPC_MM_SLICES
187 OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
188 OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
189 OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
190 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
191#endif /* CONFIG_PPC_MM_SLICES */
192#endif
193 184
194#ifdef CONFIG_PPC_BOOK3E 185#ifdef CONFIG_PPC_BOOK3E
195 OFFSET(PACAPGD, paca_struct, pgd); 186 OFFSET(PACAPGD, paca_struct, pgd);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0ee3e6d50f28..0cf84e30d1cd 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -258,25 +258,3 @@ void __init free_unused_pacas(void)
258 printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n", 258 printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
259 paca_ptrs_size + paca_struct_size, nr_cpu_ids); 259 paca_ptrs_size + paca_struct_size, nr_cpu_ids);
260} 260}
261
262void copy_mm_to_paca(struct mm_struct *mm)
263{
264#ifdef CONFIG_PPC_BOOK3S
265 mm_context_t *context = &mm->context;
266
267 get_paca()->mm_ctx_id = context->id;
268#ifdef CONFIG_PPC_MM_SLICES
269 VM_BUG_ON(!mm->context.slb_addr_limit);
270 get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
271 memcpy(&get_paca()->mm_ctx_low_slices_psize,
272 &context->low_slices_psize, sizeof(context->low_slices_psize));
273 memcpy(&get_paca()->mm_ctx_high_slices_psize,
274 &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
275#else /* CONFIG_PPC_MM_SLICES */
276 get_paca()->mm_ctx_user_psize = context->user_psize;
277 get_paca()->mm_ctx_sllp = context->sllp;
278#endif
279#else /* !CONFIG_PPC_BOOK3S */
280 return;
281#endif
282}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f23a89d8e4ce..88c95dc8b141 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1088,16 +1088,16 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
1088} 1088}
1089 1089
1090#ifdef CONFIG_PPC_MM_SLICES 1090#ifdef CONFIG_PPC_MM_SLICES
1091static unsigned int get_paca_psize(unsigned long addr) 1091static unsigned int get_psize(struct mm_struct *mm, unsigned long addr)
1092{ 1092{
1093 unsigned char *psizes; 1093 unsigned char *psizes;
1094 unsigned long index, mask_index; 1094 unsigned long index, mask_index;
1095 1095
1096 if (addr < SLICE_LOW_TOP) { 1096 if (addr < SLICE_LOW_TOP) {
1097 psizes = get_paca()->mm_ctx_low_slices_psize; 1097 psizes = mm->context.low_slices_psize;
1098 index = GET_LOW_SLICE_INDEX(addr); 1098 index = GET_LOW_SLICE_INDEX(addr);
1099 } else { 1099 } else {
1100 psizes = get_paca()->mm_ctx_high_slices_psize; 1100 psizes = mm->context.high_slices_psize;
1101 index = GET_HIGH_SLICE_INDEX(addr); 1101 index = GET_HIGH_SLICE_INDEX(addr);
1102 } 1102 }
1103 mask_index = index & 0x1; 1103 mask_index = index & 0x1;
@@ -1105,9 +1105,9 @@ static unsigned int get_paca_psize(unsigned long addr)
1105} 1105}
1106 1106
1107#else 1107#else
1108unsigned int get_paca_psize(unsigned long addr) 1108unsigned int get_psize(struct mm_struct *mm, unsigned long addr)
1109{ 1109{
1110 return get_paca()->mm_ctx_user_psize; 1110 return mm->context.user_psize;
1111} 1111}
1112#endif 1112#endif
1113 1113
@@ -1118,15 +1118,11 @@ unsigned int get_paca_psize(unsigned long addr)
1118#ifdef CONFIG_PPC_64K_PAGES 1118#ifdef CONFIG_PPC_64K_PAGES
1119void demote_segment_4k(struct mm_struct *mm, unsigned long addr) 1119void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
1120{ 1120{
1121 if (get_slice_psize(mm, addr) == MMU_PAGE_4K) 1121 if (get_psize(mm, addr) == MMU_PAGE_4K)
1122 return; 1122 return;
1123 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); 1123 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
1124 copro_flush_all_slbs(mm); 1124 copro_flush_all_slbs(mm);
1125 if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { 1125 core_flush_all_slbs(mm);
1126
1127 copy_mm_to_paca(mm);
1128 slb_flush_and_rebolt();
1129 }
1130} 1126}
1131#endif /* CONFIG_PPC_64K_PAGES */ 1127#endif /* CONFIG_PPC_64K_PAGES */
1132 1128
@@ -1191,22 +1187,6 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
1191 trap, vsid, ssize, psize, lpsize, pte); 1187 trap, vsid, ssize, psize, lpsize, pte);
1192} 1188}
1193 1189
1194static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
1195 int psize, bool user_region)
1196{
1197 if (user_region) {
1198 if (psize != get_paca_psize(ea)) {
1199 copy_mm_to_paca(mm);
1200 slb_flush_and_rebolt();
1201 }
1202 } else if (get_paca()->vmalloc_sllp !=
1203 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
1204 get_paca()->vmalloc_sllp =
1205 mmu_psize_defs[mmu_vmalloc_psize].sllp;
1206 slb_vmalloc_update();
1207 }
1208}
1209
1210/* Result code is: 1190/* Result code is:
1211 * 0 - handled 1191 * 0 - handled
1212 * 1 - normal page fault 1192 * 1 - normal page fault
@@ -1239,7 +1219,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
1239 rc = 1; 1219 rc = 1;
1240 goto bail; 1220 goto bail;
1241 } 1221 }
1242 psize = get_slice_psize(mm, ea); 1222 psize = get_psize(mm, ea);
1243 ssize = user_segment_size(ea); 1223 ssize = user_segment_size(ea);
1244 vsid = get_user_vsid(&mm->context, ea, ssize); 1224 vsid = get_user_vsid(&mm->context, ea, ssize);
1245 break; 1225 break;
@@ -1327,9 +1307,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
1327 WARN_ON(1); 1307 WARN_ON(1);
1328 } 1308 }
1329#endif 1309#endif
1330 if (current->mm == mm)
1331 check_paca_psize(ea, mm, psize, user_region);
1332
1333 goto bail; 1310 goto bail;
1334 } 1311 }
1335 1312
@@ -1364,15 +1341,14 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
1364 "to 4kB pages because of " 1341 "to 4kB pages because of "
1365 "non-cacheable mapping\n"); 1342 "non-cacheable mapping\n");
1366 psize = mmu_vmalloc_psize = MMU_PAGE_4K; 1343 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
1344 slb_vmalloc_update();
1367 copro_flush_all_slbs(mm); 1345 copro_flush_all_slbs(mm);
1346 core_flush_all_slbs(mm);
1368 } 1347 }
1369 } 1348 }
1370 1349
1371#endif /* CONFIG_PPC_64K_PAGES */ 1350#endif /* CONFIG_PPC_64K_PAGES */
1372 1351
1373 if (current->mm == mm)
1374 check_paca_psize(ea, mm, psize, user_region);
1375
1376#ifdef CONFIG_PPC_64K_PAGES 1352#ifdef CONFIG_PPC_64K_PAGES
1377 if (psize == MMU_PAGE_64K) 1353 if (psize == MMU_PAGE_64K)
1378 rc = __hash_page_64K(ea, access, vsid, ptep, trap, 1354 rc = __hash_page_64K(ea, access, vsid, ptep, trap,
@@ -1460,7 +1436,7 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
1460#ifdef CONFIG_PPC_MM_SLICES 1436#ifdef CONFIG_PPC_MM_SLICES
1461static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) 1437static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
1462{ 1438{
1463 int psize = get_slice_psize(mm, ea); 1439 int psize = get_psize(mm, ea);
1464 1440
1465 /* We only prefault standard pages for now */ 1441 /* We only prefault standard pages for now */
1466 if (unlikely(psize != mm->context.user_psize)) 1442 if (unlikely(psize != mm->context.user_psize))
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index f84e14f23e50..28ae2835db3d 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -54,8 +54,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
54 * MMU context id, which is then moved to SPRN_PID. 54 * MMU context id, which is then moved to SPRN_PID.
55 * 55 *
56 * For the hash MMU it is either the first load from slb_cache 56 * For the hash MMU it is either the first load from slb_cache
57 * in switch_slb(), and/or the store of paca->mm_ctx_id in 57 * in switch_slb(), and/or load of MMU context id.
58 * copy_mm_to_paca().
59 * 58 *
60 * On the other side, the barrier is in mm/tlb-radix.c for 59 * On the other side, the barrier is in mm/tlb-radix.c for
61 * radix which orders earlier stores to clear the PTEs vs 60 * radix which orders earlier stores to clear the PTEs vs
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6fec2ce3ccf4..1347ab86d32e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -347,8 +347,6 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
347 get_paca()->slb_cache_ptr = 0; 347 get_paca()->slb_cache_ptr = 0;
348 } 348 }
349 349
350 copy_mm_to_paca(mm);
351
352 /* 350 /*
353 * preload some userspace segments into the SLB. 351 * preload some userspace segments into the SLB.
354 * Almost all 32 and 64bit PowerPC executables are linked at 352 * Almost all 32 and 64bit PowerPC executables are linked at
@@ -375,6 +373,24 @@ void slb_set_size(u16 size)
375 mmu_slb_size = size; 373 mmu_slb_size = size;
376} 374}
377 375
376static void cpu_flush_slb(void *parm)
377{
378 struct mm_struct *mm = parm;
379 unsigned long flags;
380
381 if (mm != current->active_mm)
382 return;
383
384 local_irq_save(flags);
385 slb_flush_and_rebolt();
386 local_irq_restore(flags);
387}
388
389void core_flush_all_slbs(struct mm_struct *mm)
390{
391 on_each_cpu(cpu_flush_slb, mm, 1);
392}
393
378void slb_initialize(void) 394void slb_initialize(void)
379{ 395{
380 unsigned long linear_llp, vmalloc_llp, io_llp; 396 unsigned long linear_llp, vmalloc_llp, io_llp;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 205fe557ca10..606f424aac47 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -207,23 +207,6 @@ static bool slice_check_range_fits(struct mm_struct *mm,
207 return true; 207 return true;
208} 208}
209 209
210static void slice_flush_segments(void *parm)
211{
212#ifdef CONFIG_PPC64
213 struct mm_struct *mm = parm;
214 unsigned long flags;
215
216 if (mm != current->active_mm)
217 return;
218
219 copy_mm_to_paca(current->active_mm);
220
221 local_irq_save(flags);
222 slb_flush_and_rebolt();
223 local_irq_restore(flags);
224#endif
225}
226
227static void slice_convert(struct mm_struct *mm, 210static void slice_convert(struct mm_struct *mm,
228 const struct slice_mask *mask, int psize) 211 const struct slice_mask *mask, int psize)
229{ 212{
@@ -289,6 +272,9 @@ static void slice_convert(struct mm_struct *mm,
289 spin_unlock_irqrestore(&slice_convert_lock, flags); 272 spin_unlock_irqrestore(&slice_convert_lock, flags);
290 273
291 copro_flush_all_slbs(mm); 274 copro_flush_all_slbs(mm);
275#ifdef CONFIG_PPC64
276 core_flush_all_slbs(mm);
277#endif
292} 278}
293 279
294/* 280/*
@@ -502,8 +488,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
502 * be already initialised beyond the old address limit. 488 * be already initialised beyond the old address limit.
503 */ 489 */
504 mm->context.slb_addr_limit = high_limit; 490 mm->context.slb_addr_limit = high_limit;
505 491#ifdef CONFIG_PPC64
506 on_each_cpu(slice_flush_segments, mm, 1); 492 core_flush_all_slbs(mm);
493#endif
507 } 494 }
508 495
509 /* Sanity checks */ 496 /* Sanity checks */
@@ -665,8 +652,10 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
665 (SLICE_NUM_HIGH && 652 (SLICE_NUM_HIGH &&
666 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) { 653 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
667 slice_convert(mm, &potential_mask, psize); 654 slice_convert(mm, &potential_mask, psize);
655#ifdef CONFIG_PPC64
668 if (psize > MMU_PAGE_BASE) 656 if (psize > MMU_PAGE_BASE)
669 on_each_cpu(slice_flush_segments, mm, 1); 657 core_flush_all_slbs(mm);
658#endif
670 } 659 }
671 return newaddr; 660 return newaddr;
672 661