aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2016-01-28 02:26:03 -0500
committerVineet Gupta <vgupta@synopsys.com>2016-05-09 00:02:28 -0400
commit569579401ae1c9b9f317f38261e32135b153e9b3 (patch)
treefd84b2771fc477193e2fb62cbf0d02bff340e9a8 /arch/arc/kernel
parentdb4c4426daedffefcfd890d04a6ec9ed93268878 (diff)
ARC: opencode arc_request_percpu_irq
- The idea is to remove the API usage since it has a subltle design flaw - relies on being called on cpu0 first. This is true for some early per cpu irqs such as TIMER/IPI, but not for late probed per cpu peripherals such a perf. And it's usage in perf has already bitten us once: see c6317bc7c5ab ("ARCv2: perf: Ensure perf intr gets enabled on all cores") where we ended up open coding it anyways - The seeming duplication will go away once we start using cpu notifier for timer setup Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r--arch/arc/kernel/irq.c29
-rw-r--r--arch/arc/kernel/smp.c15
-rw-r--r--arch/arc/kernel/time.c14
3 files changed, 25 insertions, 33 deletions
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 88074b50456b..fb6dede9d05f 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -50,32 +50,3 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
50 irq_exit(); 50 irq_exit();
51 set_irq_regs(old_regs); 51 set_irq_regs(old_regs);
52} 52}
53
54/*
55 * API called for requesting percpu interrupts - called by each CPU
56 * - For boot CPU, actually request the IRQ with genirq core + enables
57 * - For subsequent callers only enable called locally
58 *
59 * Relies on being called by boot cpu first (i.e. request called ahead) of
60 * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
61 * which are guaranteed to be setup on boot core first.
62 * Late probed peripherals such as perf can't use this as there no guarantee
63 * of being called on boot CPU first.
64 */
65
66void arc_request_percpu_irq(int irq, int cpu,
67 irqreturn_t (*isr)(int irq, void *dev),
68 const char *irq_nm,
69 void *percpu_dev)
70{
71 /* Boot cpu calls request, all call enable */
72 if (!cpu) {
73 int rc;
74
75 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
76 if (rc)
77 panic("Percpu IRQ request failed for %d\n", irq);
78 }
79
80 enable_percpu_irq(irq, 0);
81}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 4cb3add77c75..ca83ebe15a64 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -346,6 +346,10 @@ irqreturn_t do_IPI(int irq, void *dev_id)
346 346
347/* 347/*
348 * API called by platform code to hookup arch-common ISR to their IPI IRQ 348 * API called by platform code to hookup arch-common ISR to their IPI IRQ
349 *
350 * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
351 * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
352 * request_percpu_irq() below will fail
349 */ 353 */
350static DEFINE_PER_CPU(int, ipi_dev); 354static DEFINE_PER_CPU(int, ipi_dev);
351 355
@@ -353,7 +357,16 @@ int smp_ipi_irq_setup(int cpu, int irq)
353{ 357{
354 int *dev = per_cpu_ptr(&ipi_dev, cpu); 358 int *dev = per_cpu_ptr(&ipi_dev, cpu);
355 359
356 arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev); 360 /* Boot cpu calls request, all call enable */
361 if (!cpu) {
362 int rc;
363
364 rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
365 if (rc)
366 panic("Percpu IRQ request failed for %d\n", irq);
367 }
368
369 enable_percpu_irq(irq, 0);
357 370
358 return 0; 371 return 0;
359} 372}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 7d9a736fc7e5..146da3cbcc99 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -251,14 +251,22 @@ void arc_local_timer_setup()
251{ 251{
252 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 252 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
253 int cpu = smp_processor_id(); 253 int cpu = smp_processor_id();
254 int irq = TIMER0_IRQ;
254 255
255 evt->cpumask = cpumask_of(cpu); 256 evt->cpumask = cpumask_of(cpu);
256 clockevents_config_and_register(evt, arc_get_core_freq(), 257 clockevents_config_and_register(evt, arc_get_core_freq(),
257 0, ARC_TIMER_MAX); 258 0, ARC_TIMER_MAX);
258 259
259 /* setup the per-cpu timer IRQ handler - for all cpus */ 260 if (!cpu) {
260 arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler, 261 int rc;
261 "Timer0 (per-cpu-tick)", evt); 262
263 rc = request_percpu_irq(irq, timer_irq_handler,
264 "Timer0 (per-cpu-tick)", evt);
265 if (rc)
266 panic("Percpu IRQ request failed for TIMER\n");
267 }
268
269 enable_percpu_irq(irq, 0);
262} 270}
263 271
264/* 272/*