aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2016-02-23 01:25:16 -0500
committerVineet Gupta <vgupta@synopsys.com>2016-02-24 00:37:28 -0500
commitbb143f814ea488769ca2e79e0b376139cb5f134b (patch)
tree32857f21fd6b8a9a4ae8457142c34260628c535e /arch/arc/kernel
parent3e5177c1919bdc7651b5056f35409d0b4d728841 (diff)
ARCv2: SMP: Emulate IPI to self using software triggered interrupt
ARConnect/MCIP Inter-Core-Interrupt module can't send interrupt to local core. So use core intc capability to trigger software interrupt to self, using an unsued IRQ #21. This showed up as csd deadlock with LTP trace_sched on a dual core system. This test acts as scheduler fuzzer, triggering all sorts of schedulting activity. Trouble starts with IPI to self, which doesn't get delivered (effectively lost due to H/w capability), but the msg intended to be sent remain enqueued in per-cpu @ipi_data. All subsequent IPIs to this core from other cores get elided due to the IPI coalescing optimization in ipi_send_msg_one() where a pending msg implies an IPI already sent and assumes other core is yet to ack it. After the elided IPI, other core simply goes into csd_lock_wait() but never comes out as this core never sees the interrupt. Fixes STAR 9001008624 Cc: Peter Zijlstra <peterz@infradead.org> Cc: <stable@vger.kernel.org> [4.2] Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r--arch/arc/kernel/entry-arcv2.S11
-rw-r--r--arch/arc/kernel/mcip.c15
2 files changed, 21 insertions, 5 deletions
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index b17830294706..c1264607bbff 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
45VECTOR handle_interrupt ; (16) Timer0 45VECTOR handle_interrupt ; (16) Timer0
46VECTOR handle_interrupt ; unused (Timer1) 46VECTOR handle_interrupt ; unused (Timer1)
47VECTOR handle_interrupt ; unused (WDT) 47VECTOR handle_interrupt ; unused (WDT)
48VECTOR handle_interrupt ; (19) ICI (inter core interrupt) 48VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
49VECTOR handle_interrupt 49VECTOR handle_interrupt ; (20) perf Interrupt
50VECTOR handle_interrupt 50VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
51VECTOR handle_interrupt 51VECTOR handle_interrupt ; unused
52VECTOR handle_interrupt ; (23) End of fixed IRQs 52VECTOR handle_interrupt ; (23) unused
53# End of fixed IRQs
53 54
54.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 55.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
55 VECTOR handle_interrupt 56 VECTOR handle_interrupt
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 002c5fcf8947..9e1bd03b87a6 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,9 +11,12 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <asm/irqflags-arcv2.h>
14#include <asm/mcip.h> 15#include <asm/mcip.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
16 17
18#define SOFTIRQ_IRQ 21
19
17static char smp_cpuinfo_buf[128]; 20static char smp_cpuinfo_buf[128];
18static int idu_detected; 21static int idu_detected;
19 22
@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
22static void mcip_setup_per_cpu(int cpu) 25static void mcip_setup_per_cpu(int cpu)
23{ 26{
24 smp_ipi_irq_setup(cpu, IPI_IRQ); 27 smp_ipi_irq_setup(cpu, IPI_IRQ);
28 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
25} 29}
26 30
27static void mcip_ipi_send(int cpu) 31static void mcip_ipi_send(int cpu)
@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
29 unsigned long flags; 33 unsigned long flags;
30 int ipi_was_pending; 34 int ipi_was_pending;
31 35
36 /* ARConnect can only send IPI to others */
37 if (unlikely(cpu == raw_smp_processor_id())) {
38 arc_softirq_trigger(SOFTIRQ_IRQ);
39 return;
40 }
41
32 /* 42 /*
33 * NOTE: We must spin here if the other cpu hasn't yet 43 * NOTE: We must spin here if the other cpu hasn't yet
34 * serviced a previous message. This can burn lots 44 * serviced a previous message. This can burn lots
@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
63 unsigned long flags; 73 unsigned long flags;
64 unsigned int __maybe_unused copy; 74 unsigned int __maybe_unused copy;
65 75
76 if (unlikely(irq == SOFTIRQ_IRQ)) {
77 arc_softirq_clear(irq);
78 return;
79 }
80
66 raw_spin_lock_irqsave(&mcip_lock, flags); 81 raw_spin_lock_irqsave(&mcip_lock, flags);
67 82
68 /* Who sent the IPI */ 83 /* Who sent the IPI */