aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2015-10-07 20:04:26 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-10-08 17:01:38 -0400
commitfdf880a60835cd1dec2563463ac63ae3084e0ddc (patch)
tree46bc0900e03a565b432b658a09c890903c7b2a63
parent4108efb02daa09cbb5db048ada55a5b021b5183d (diff)
powerpc: Fix checkstop in native_hpte_clear() with lockdep
native_hpte_clear() is called in real mode from two places: - Early in boot during htab initialisation if firmware assisted dump is active. - Late in the kexec path. In both contexts there is no need to disable interrupts are they are already disabled. Furthermore, locking around the tlbie() is only required for pre POWER5 hardware. On POWER5 or newer hardware concurrent tlbie()s work as expected and on pre POWER5 hardware concurrent tlbie()s could result in deadlock. This code would only be executed at crashdump time, during which all bets are off, concurrent tlbie()s are unlikely and taking locks is unsafe therefore the best course of action is to simply do nothing. Concurrent tlbie()s are not possible in the first case as secondary CPUs have not come up yet. Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/machdep.h9
-rw-r--r--arch/powerpc/mm/hash_native_64.c23
2 files changed, 18 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index cab6753f1be5..3f191f573d4f 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -61,8 +61,13 @@ struct machdep_calls {
61 unsigned long addr, 61 unsigned long addr,
62 unsigned char *hpte_slot_array, 62 unsigned char *hpte_slot_array,
63 int psize, int ssize, int local); 63 int psize, int ssize, int local);
64 /* special for kexec, to be called in real mode, linear mapping is 64 /*
65 * destroyed as well */ 65 * Special for kexec.
66 * To be called in real mode with interrupts disabled. No locks are
67 * taken as such, concurrent access on pre POWER5 hardware could result
68 * in a deadlock.
69 * The linear mapping is destroyed as well.
70 */
66 void (*hpte_clear_all)(void); 71 void (*hpte_clear_all)(void);
67 72
68 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 73 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 13befa35d8a8..c8822af10a58 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
582 * be when they isi), and we are the only one left. We rely on our kernel 582 * be when they isi), and we are the only one left. We rely on our kernel
583 * mapping being 0xC0's and the hardware ignoring those two real bits. 583 * mapping being 0xC0's and the hardware ignoring those two real bits.
584 * 584 *
585 * This must be called with interrupts disabled.
586 *
587 * Taking the native_tlbie_lock is unsafe here due to the possibility of
588 * lockdep being on. On pre POWER5 hardware, not taking the lock could
589 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
590 * gets called during boot before secondary CPUs have come up and during
591 * crashdump and all bets are off anyway.
592 *
585 * TODO: add batching support when enabled. remember, no dynamic memory here, 593 * TODO: add batching support when enabled. remember, no dynamic memory here,
586 * athough there is the control page available... 594 * athough there is the control page available...
587 */ 595 */
588static void native_hpte_clear(void) 596static void native_hpte_clear(void)
589{ 597{
590 unsigned long vpn = 0; 598 unsigned long vpn = 0;
591 unsigned long slot, slots, flags; 599 unsigned long slot, slots;
592 struct hash_pte *hptep = htab_address; 600 struct hash_pte *hptep = htab_address;
593 unsigned long hpte_v; 601 unsigned long hpte_v;
594 unsigned long pteg_count; 602 unsigned long pteg_count;
@@ -596,13 +604,6 @@ static void native_hpte_clear(void)
596 604
597 pteg_count = htab_hash_mask + 1; 605 pteg_count = htab_hash_mask + 1;
598 606
599 local_irq_save(flags);
600
601 /* we take the tlbie lock and hold it. Some hardware will
602 * deadlock if we try to tlbie from two processors at once.
603 */
604 raw_spin_lock(&native_tlbie_lock);
605
606 slots = pteg_count * HPTES_PER_GROUP; 607 slots = pteg_count * HPTES_PER_GROUP;
607 608
608 for (slot = 0; slot < slots; slot++, hptep++) { 609 for (slot = 0; slot < slots; slot++, hptep++) {
@@ -614,8 +615,8 @@ static void native_hpte_clear(void)
614 hpte_v = be64_to_cpu(hptep->v); 615 hpte_v = be64_to_cpu(hptep->v);
615 616
616 /* 617 /*
617 * Call __tlbie() here rather than tlbie() since we 618 * Call __tlbie() here rather than tlbie() since we can't take the
618 * already hold the native_tlbie_lock. 619 * native_tlbie_lock.
619 */ 620 */
620 if (hpte_v & HPTE_V_VALID) { 621 if (hpte_v & HPTE_V_VALID) {
621 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); 622 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@@ -625,8 +626,6 @@ static void native_hpte_clear(void)
625 } 626 }
626 627
627 asm volatile("eieio; tlbsync; ptesync":::"memory"); 628 asm volatile("eieio; tlbsync; ptesync":::"memory");
628 raw_spin_unlock(&native_tlbie_lock);
629 local_irq_restore(flags);
630} 629}
631 630
632/* 631/*