aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-06-06 04:30:45 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-06-13 09:58:21 -0400
commita9809407f6b6b4b55df7b6cf5522e39476b7e5e6 (patch)
tree77fbe1921cbe1b71978b47d6eaf2742d1f46c73d /arch/s390/mm
parentfd5ada04030cb584251c381cb70daa41e984ae82 (diff)
s390/mm: fix vunmap vs finish_arch_post_lock_switch
The vunmap_pte_range() function calls ptep_get_and_clear() without any locking. ptep_get_and_clear() uses ptep_xchg_lazy()/ptep_flush_direct() for the page table update. ptep_flush_direct requires that preemption is disabled, but without any locking this is not the case. If the kernel preempts the task while the attach_counter is increased an endless loop in finish_arch_post_lock_switch() will occur the next time the task is scheduled. Add explicit preempt_disable()/preempt_enable() calls to the relevant functions in arch/s390/mm/pgtable.c. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 9f0ce0e6eeb4..67111ccbb5e0 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -70,7 +70,6 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
70#ifdef CONFIG_PGSTE 70#ifdef CONFIG_PGSTE
71 unsigned long old; 71 unsigned long old;
72 72
73 preempt_disable();
74 asm( 73 asm(
75 " lg %0,%2\n" 74 " lg %0,%2\n"
76 "0: lgr %1,%0\n" 75 "0: lgr %1,%0\n"
@@ -93,7 +92,6 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
93 : "=Q" (ptep[PTRS_PER_PTE]) 92 : "=Q" (ptep[PTRS_PER_PTE])
94 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) 93 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
95 : "cc", "memory"); 94 : "cc", "memory");
96 preempt_enable();
97#endif 95#endif
98} 96}
99 97
@@ -230,9 +228,11 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
230 pgste_t pgste; 228 pgste_t pgste;
231 pte_t old; 229 pte_t old;
232 230
231 preempt_disable();
233 pgste = ptep_xchg_start(mm, addr, ptep); 232 pgste = ptep_xchg_start(mm, addr, ptep);
234 old = ptep_flush_direct(mm, addr, ptep); 233 old = ptep_flush_direct(mm, addr, ptep);
235 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 234 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
235 preempt_enable();
236 return old; 236 return old;
237} 237}
238EXPORT_SYMBOL(ptep_xchg_direct); 238EXPORT_SYMBOL(ptep_xchg_direct);
@@ -243,9 +243,11 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
243 pgste_t pgste; 243 pgste_t pgste;
244 pte_t old; 244 pte_t old;
245 245
246 preempt_disable();
246 pgste = ptep_xchg_start(mm, addr, ptep); 247 pgste = ptep_xchg_start(mm, addr, ptep);
247 old = ptep_flush_lazy(mm, addr, ptep); 248 old = ptep_flush_lazy(mm, addr, ptep);
248 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 249 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
250 preempt_enable();
249 return old; 251 return old;
250} 252}
251EXPORT_SYMBOL(ptep_xchg_lazy); 253EXPORT_SYMBOL(ptep_xchg_lazy);
@@ -256,6 +258,7 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
256 pgste_t pgste; 258 pgste_t pgste;
257 pte_t old; 259 pte_t old;
258 260
261 preempt_disable();
259 pgste = ptep_xchg_start(mm, addr, ptep); 262 pgste = ptep_xchg_start(mm, addr, ptep);
260 old = ptep_flush_lazy(mm, addr, ptep); 263 old = ptep_flush_lazy(mm, addr, ptep);
261 if (mm_has_pgste(mm)) { 264 if (mm_has_pgste(mm)) {
@@ -279,6 +282,7 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
279 } else { 282 } else {
280 *ptep = pte; 283 *ptep = pte;
281 } 284 }
285 preempt_enable();
282} 286}
283EXPORT_SYMBOL(ptep_modify_prot_commit); 287EXPORT_SYMBOL(ptep_modify_prot_commit);
284 288
@@ -333,8 +337,10 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
333{ 337{
334 pmd_t old; 338 pmd_t old;
335 339
340 preempt_disable();
336 old = pmdp_flush_direct(mm, addr, pmdp); 341 old = pmdp_flush_direct(mm, addr, pmdp);
337 *pmdp = new; 342 *pmdp = new;
343 preempt_enable();
338 return old; 344 return old;
339} 345}
340EXPORT_SYMBOL(pmdp_xchg_direct); 346EXPORT_SYMBOL(pmdp_xchg_direct);
@@ -344,8 +350,10 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
344{ 350{
345 pmd_t old; 351 pmd_t old;
346 352
353 preempt_disable();
347 old = pmdp_flush_lazy(mm, addr, pmdp); 354 old = pmdp_flush_lazy(mm, addr, pmdp);
348 *pmdp = new; 355 *pmdp = new;
356 preempt_enable();
349 return old; 357 return old;
350} 358}
351EXPORT_SYMBOL(pmdp_xchg_lazy); 359EXPORT_SYMBOL(pmdp_xchg_lazy);
@@ -398,20 +406,24 @@ void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
398 pgste_t pgste; 406 pgste_t pgste;
399 407
400 /* the mm_has_pgste() check is done in set_pte_at() */ 408 /* the mm_has_pgste() check is done in set_pte_at() */
409 preempt_disable();
401 pgste = pgste_get_lock(ptep); 410 pgste = pgste_get_lock(ptep);
402 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; 411 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
403 pgste_set_key(ptep, pgste, entry, mm); 412 pgste_set_key(ptep, pgste, entry, mm);
404 pgste = pgste_set_pte(ptep, pgste, entry); 413 pgste = pgste_set_pte(ptep, pgste, entry);
405 pgste_set_unlock(ptep, pgste); 414 pgste_set_unlock(ptep, pgste);
415 preempt_enable();
406} 416}
407 417
408void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 418void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
409{ 419{
410 pgste_t pgste; 420 pgste_t pgste;
411 421
422 preempt_disable();
412 pgste = pgste_get_lock(ptep); 423 pgste = pgste_get_lock(ptep);
413 pgste_val(pgste) |= PGSTE_IN_BIT; 424 pgste_val(pgste) |= PGSTE_IN_BIT;
414 pgste_set_unlock(ptep, pgste); 425 pgste_set_unlock(ptep, pgste);
426 preempt_enable();
415} 427}
416 428
417static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) 429static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
@@ -434,6 +446,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
434 pte_t pte; 446 pte_t pte;
435 447
436 /* Zap unused and logically-zero pages */ 448 /* Zap unused and logically-zero pages */
449 preempt_disable();
437 pgste = pgste_get_lock(ptep); 450 pgste = pgste_get_lock(ptep);
438 pgstev = pgste_val(pgste); 451 pgstev = pgste_val(pgste);
439 pte = *ptep; 452 pte = *ptep;
@@ -446,6 +459,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
446 if (reset) 459 if (reset)
447 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; 460 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
448 pgste_set_unlock(ptep, pgste); 461 pgste_set_unlock(ptep, pgste);
462 preempt_enable();
449} 463}
450 464
451void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 465void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
@@ -454,6 +468,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
454 pgste_t pgste; 468 pgste_t pgste;
455 469
456 /* Clear storage key */ 470 /* Clear storage key */
471 preempt_disable();
457 pgste = pgste_get_lock(ptep); 472 pgste = pgste_get_lock(ptep);
458 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | 473 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
459 PGSTE_GR_BIT | PGSTE_GC_BIT); 474 PGSTE_GR_BIT | PGSTE_GC_BIT);
@@ -461,6 +476,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
461 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) 476 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
462 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); 477 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
463 pgste_set_unlock(ptep, pgste); 478 pgste_set_unlock(ptep, pgste);
479 preempt_enable();
464} 480}
465 481
466/* 482/*