aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-10-23 09:42:30 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-10-23 09:42:30 -0400
commit34471a9168c8bfd7f0d00989a7b0797ad27d585e (patch)
tree847af3ec746c6357902cf57c7e12ba55a35eed30
parentcefd3e71efca6f4ef7f06f1fc507771d76072741 (diff)
parent28af690a284dfcb627bd69d0963db1c0f412cb8c (diff)
Merge branch 'ppi-irq-core-for-rmk' of git://github.com/mzyngier/arm-platforms into devel-stable
-rw-r--r--arch/arm/Kconfig25
-rw-r--r--arch/arm/common/gic.c60
-rw-r--r--arch/arm/include/asm/cputype.h6
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S7
-rw-r--r--arch/arm/include/asm/exception.h19
-rw-r--r--arch/arm/include/asm/hardirq.h3
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-gic.S19
-rw-r--r--arch/arm/include/asm/hardware/gic.h1
-rw-r--r--arch/arm/include/asm/localtimer.h19
-rw-r--r--arch/arm/include/asm/smp.h16
-rw-r--r--arch/arm/include/asm/smp_twd.h2
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/topology.h33
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/irq.c5
-rw-r--r--arch/arm/kernel/smp.c60
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_twd.c47
-rw-r--r--arch/arm/kernel/topology.c148
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/mach-exynos4/include/mach/entry-macro.S7
-rw-r--r--arch/arm/mach-exynos4/mct.c7
-rw-r--r--arch/arm/mach-exynos4/platsmp.c10
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c11
-rw-r--r--arch/arm/mach-msm/include/mach/entry-macro-qgic.S73
-rw-r--r--arch/arm/mach-msm/platsmp.c6
-rw-r--r--arch/arm/mach-msm/timer.c69
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S14
-rw-r--r--arch/arm/mach-omap2/omap-smp.c10
-rw-r--r--arch/arm/mach-pxa/irq.c2
-rw-r--r--arch/arm/mach-realview/platsmp.c10
-rw-r--r--arch/arm/mach-shmobile/entry-intc.S3
-rw-r--r--arch/arm/mach-shmobile/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-shmobile/platsmp.c6
-rw-r--r--arch/arm/mach-tegra/platsmp.c8
-rw-r--r--arch/arm/mach-ux500/platsmp.c10
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c6
-rw-r--r--arch/arm/mm/fault.c1
-rw-r--r--include/linux/interrupt.h38
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--kernel/irq/chip.c66
-rw-r--r--kernel/irq/internals.h19
-rw-r--r--kernel/irq/irqdesc.c32
-rw-r--r--kernel/irq/manage.c218
-rw-r--r--kernel/irq/settings.h7
46 files changed, 829 insertions, 307 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c8d5ec8a9391..2686959319a0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1420,6 +1420,31 @@ config SMP_ON_UP
1420 1420
1421 If you don't know what to do here, say Y. 1421 If you don't know what to do here, say Y.
1422 1422
1423config ARM_CPU_TOPOLOGY
1424 bool "Support cpu topology definition"
1425 depends on SMP && CPU_V7
1426 default y
1427 help
1428 Support ARM cpu topology definition. The MPIDR register defines
1429 affinity between processors which is then used to describe the cpu
1430 topology of an ARM System.
1431
1432config SCHED_MC
1433 bool "Multi-core scheduler support"
1434 depends on ARM_CPU_TOPOLOGY
1435 help
1436 Multi-core scheduler support improves the CPU scheduler's decision
1437 making when dealing with multi-core CPU chips at a cost of slightly
1438 increased overhead in some places. If unsure say N here.
1439
1440config SCHED_SMT
1441 bool "SMT scheduler support"
1442 depends on ARM_CPU_TOPOLOGY
1443 help
1444 Improves the CPU scheduler's decision making when dealing with
1445 MultiThreading at a cost of slightly increased overhead in some
1446 places. If unsure say N here.
1447
1423config HAVE_ARM_SCU 1448config HAVE_ARM_SCU
1424 bool 1449 bool
1425 help 1450 help
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 734db99eaee7..016c1aeb847c 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -29,6 +29,9 @@
29#include <linux/cpu_pm.h> 29#include <linux/cpu_pm.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/interrupt.h>
33#include <linux/percpu.h>
34#include <linux/slab.h>
32 35
33#include <asm/irq.h> 36#include <asm/irq.h>
34#include <asm/mach/irq.h> 37#include <asm/mach/irq.h>
@@ -181,7 +184,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
181 return -EINVAL; 184 return -EINVAL;
182 185
183 mask = 0xff << shift; 186 mask = 0xff << shift;
184 bit = 1 << (cpu + shift); 187 bit = 1 << (cpu_logical_map(cpu) + shift);
185 188
186 spin_lock(&irq_controller_lock); 189 spin_lock(&irq_controller_lock);
187 val = readl_relaxed(reg) & ~mask; 190 val = readl_relaxed(reg) & ~mask;
@@ -260,9 +263,16 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
260 unsigned int irq_start) 263 unsigned int irq_start)
261{ 264{
262 unsigned int gic_irqs, irq_limit, i; 265 unsigned int gic_irqs, irq_limit, i;
266 u32 cpumask;
263 void __iomem *base = gic->dist_base; 267 void __iomem *base = gic->dist_base;
264 u32 cpumask = 1 << smp_processor_id(); 268 u32 cpu = 0;
269 u32 nrppis = 0, ppi_base = 0;
265 270
271#ifdef CONFIG_SMP
272 cpu = cpu_logical_map(smp_processor_id());
273#endif
274
275 cpumask = 1 << cpu;
266 cpumask |= cpumask << 8; 276 cpumask |= cpumask << 8;
267 cpumask |= cpumask << 16; 277 cpumask |= cpumask << 16;
268 278
@@ -280,6 +290,23 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
280 gic->gic_irqs = gic_irqs; 290 gic->gic_irqs = gic_irqs;
281 291
282 /* 292 /*
293 * Nobody would be insane enough to use PPIs on a secondary
294 * GIC, right?
295 */
296 if (gic == &gic_data[0]) {
297 nrppis = (32 - irq_start) & 31;
298
299 /* The GIC only supports up to 16 PPIs. */
300 if (nrppis > 16)
301 BUG();
302
303 ppi_base = gic->irq_offset + 32 - nrppis;
304 }
305
306 pr_info("Configuring GIC with %d sources (%d PPIs)\n",
307 gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);
308
309 /*
283 * Set all global interrupts to be level triggered, active low. 310 * Set all global interrupts to be level triggered, active low.
284 */ 311 */
285 for (i = 32; i < gic_irqs; i += 16) 312 for (i = 32; i < gic_irqs; i += 16)
@@ -314,7 +341,17 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
314 /* 341 /*
315 * Setup the Linux IRQ subsystem. 342 * Setup the Linux IRQ subsystem.
316 */ 343 */
317 for (i = irq_start; i < irq_limit; i++) { 344 for (i = 0; i < nrppis; i++) {
345 int ppi = i + ppi_base;
346
347 irq_set_percpu_devid(ppi);
348 irq_set_chip_and_handler(ppi, &gic_chip,
349 handle_percpu_devid_irq);
350 irq_set_chip_data(ppi, gic);
351 set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
352 }
353
354 for (i = irq_start + nrppis; i < irq_limit; i++) {
318 irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq); 355 irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
319 irq_set_chip_data(i, gic); 356 irq_set_chip_data(i, gic);
320 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 357 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
@@ -557,20 +594,15 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
557 gic_cpu_init(&gic_data[gic_nr]); 594 gic_cpu_init(&gic_data[gic_nr]);
558} 595}
559 596
560void __cpuinit gic_enable_ppi(unsigned int irq)
561{
562 unsigned long flags;
563
564 local_irq_save(flags);
565 irq_set_status_flags(irq, IRQ_NOPROBE);
566 gic_unmask_irq(irq_get_irq_data(irq));
567 local_irq_restore(flags);
568}
569
570#ifdef CONFIG_SMP 597#ifdef CONFIG_SMP
571void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 598void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
572{ 599{
573 unsigned long map = *cpus_addr(*mask); 600 int cpu;
601 unsigned long map = 0;
602
603 /* Convert our logical CPU mask into a physical one. */
604 for_each_cpu(cpu, mask)
605 map |= 1 << cpu_logical_map(cpu);
574 606
575 /* 607 /*
576 * Ensure that stores to Normal memory are visible to the 608 * Ensure that stores to Normal memory are visible to the
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cd4458f64171..cb47d28cbe1f 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,6 +8,7 @@
8#define CPUID_CACHETYPE 1 8#define CPUID_CACHETYPE 1
9#define CPUID_TCM 2 9#define CPUID_TCM 2
10#define CPUID_TLBTYPE 3 10#define CPUID_TLBTYPE 3
11#define CPUID_MPIDR 5
11 12
12#define CPUID_EXT_PFR0 "c1, 0" 13#define CPUID_EXT_PFR0 "c1, 0"
13#define CPUID_EXT_PFR1 "c1, 1" 14#define CPUID_EXT_PFR1 "c1, 1"
@@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
70 return read_cpuid(CPUID_TCM); 71 return read_cpuid(CPUID_TCM);
71} 72}
72 73
74static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
75{
76 return read_cpuid(CPUID_MPIDR);
77}
78
73/* 79/*
74 * Intel's XScale3 core supports some v6 features (supersections, L2) 80 * Intel's XScale3 core supports some v6 features (supersections, L2)
75 * but advertises itself as v5 as it does not support the v6 ISA. For 81 * but advertises itself as v5 as it does not support the v6 ISA. For
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index 2f1e2098dfe7..88d61815f0c0 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -25,13 +25,6 @@
25 movne r1, sp 25 movne r1, sp
26 adrne lr, BSYM(1b) 26 adrne lr, BSYM(1b)
27 bne do_IPI 27 bne do_IPI
28
29#ifdef CONFIG_LOCAL_TIMERS
30 test_for_ltirq r0, r2, r6, lr
31 movne r0, sp
32 adrne lr, BSYM(1b)
33 bne do_local_timer
34#endif
35#endif 28#endif
369997: 299997:
37 .endm 30 .endm
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
new file mode 100644
index 000000000000..5abaf5bbd985
--- /dev/null
+++ b/arch/arm/include/asm/exception.h
@@ -0,0 +1,19 @@
1/*
2 * Annotations for marking C functions as exception handlers.
3 *
4 * These should only be used for C functions that are called from the low
5 * level exception entry code and not any intervening C code.
6 */
7#ifndef __ASM_ARM_EXCEPTION_H
8#define __ASM_ARM_EXCEPTION_H
9
10#include <linux/ftrace.h>
11
12#define __exception __attribute__((section(".exception.text")))
13#ifdef CONFIG_FUNCTION_GRAPH_TRACER
14#define __exception_irq_entry __irq_entry
15#else
16#define __exception_irq_entry __exception
17#endif
18
19#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 89ad1805e579..ddf07a92a6c8 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -9,9 +9,6 @@
9 9
10typedef struct { 10typedef struct {
11 unsigned int __softirq_pending; 11 unsigned int __softirq_pending;
12#ifdef CONFIG_LOCAL_TIMERS
13 unsigned int local_timer_irqs;
14#endif
15#ifdef CONFIG_SMP 12#ifdef CONFIG_SMP
16 unsigned int ipi_irqs[NR_IPI]; 13 unsigned int ipi_irqs[NR_IPI];
17#endif 14#endif
diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S
index c115b82fe80a..74ebc803904d 100644
--- a/arch/arm/include/asm/hardware/entry-macro-gic.S
+++ b/arch/arm/include/asm/hardware/entry-macro-gic.S
@@ -22,15 +22,11 @@
22 * interrupt controller spec. To wit: 22 * interrupt controller spec. To wit:
23 * 23 *
24 * Interrupts 0-15 are IPI 24 * Interrupts 0-15 are IPI
25 * 16-28 are reserved 25 * 16-31 are local. We allow 30 to be used for the watchdog.
26 * 29-31 are local. We allow 30 to be used for the watchdog.
27 * 32-1020 are global 26 * 32-1020 are global
28 * 1021-1022 are reserved 27 * 1021-1022 are reserved
29 * 1023 is "spurious" (no interrupt) 28 * 1023 is "spurious" (no interrupt)
30 * 29 *
31 * For now, we ignore all local interrupts so only return an interrupt if it's
32 * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
33 *
34 * A simple read from the controller will tell us the number of the highest 30 * A simple read from the controller will tell us the number of the highest
35 * priority enabled interrupt. We then just need to check whether it is in the 31 * priority enabled interrupt. We then just need to check whether it is in the
36 * valid range for an IRQ (30-1020 inclusive). 32 * valid range for an IRQ (30-1020 inclusive).
@@ -43,7 +39,7 @@
43 39
44 ldr \tmp, =1021 40 ldr \tmp, =1021
45 bic \irqnr, \irqstat, #0x1c00 41 bic \irqnr, \irqstat, #0x1c00
46 cmp \irqnr, #29 42 cmp \irqnr, #15
47 cmpcc \irqnr, \irqnr 43 cmpcc \irqnr, \irqnr
48 cmpne \irqnr, \tmp 44 cmpne \irqnr, \tmp
49 cmpcs \irqnr, \irqnr 45 cmpcs \irqnr, \irqnr
@@ -62,14 +58,3 @@
62 strcc \irqstat, [\base, #GIC_CPU_EOI] 58 strcc \irqstat, [\base, #GIC_CPU_EOI]
63 cmpcs \irqnr, \irqnr 59 cmpcs \irqnr, \irqnr
64 .endm 60 .endm
65
66/* As above, this assumes that irqstat and base are preserved.. */
67
68 .macro test_for_ltirq, irqnr, irqstat, base, tmp
69 bic \irqnr, \irqstat, #0x1c00
70 mov \tmp, #0
71 cmp \irqnr, #29
72 moveq \tmp, #1
73 streq \irqstat, [\base, #GIC_CPU_EOI]
74 cmp \tmp, #0
75 .endm
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index c5627057b1c7..14867e12f205 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -40,7 +40,6 @@ void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
40void gic_secondary_init(unsigned int); 40void gic_secondary_init(unsigned int);
41void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 41void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
42void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); 42void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
43void gic_enable_ppi(unsigned int);
44 43
45struct gic_chip_data { 44struct gic_chip_data {
46 unsigned int irq_offset; 45 unsigned int irq_offset;
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index 080d74f8128d..f5e1cec7e35c 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -10,6 +10,8 @@
10#ifndef __ASM_ARM_LOCALTIMER_H 10#ifndef __ASM_ARM_LOCALTIMER_H
11#define __ASM_ARM_LOCALTIMER_H 11#define __ASM_ARM_LOCALTIMER_H
12 12
13#include <linux/interrupt.h>
14
13struct clock_event_device; 15struct clock_event_device;
14 16
15/* 17/*
@@ -17,27 +19,20 @@ struct clock_event_device;
17 */ 19 */
18void percpu_timer_setup(void); 20void percpu_timer_setup(void);
19 21
20/*
21 * Called from assembly, this is the local timer IRQ handler
22 */
23asmlinkage void do_local_timer(struct pt_regs *);
24
25
26#ifdef CONFIG_LOCAL_TIMERS 22#ifdef CONFIG_LOCAL_TIMERS
27 23
28#ifdef CONFIG_HAVE_ARM_TWD 24#ifdef CONFIG_HAVE_ARM_TWD
29 25
30#include "smp_twd.h" 26#include "smp_twd.h"
31 27
32#define local_timer_ack() twd_timer_ack() 28#define local_timer_stop(c) twd_timer_stop((c))
33 29
34#else 30#else
35 31
36/* 32/*
37 * Platform provides this to acknowledge a local timer IRQ. 33 * Stop the local timer
38 * Returns true if the local timer IRQ is to be processed.
39 */ 34 */
40int local_timer_ack(void); 35void local_timer_stop(struct clock_event_device *);
41 36
42#endif 37#endif
43 38
@@ -52,6 +47,10 @@ static inline int local_timer_setup(struct clock_event_device *evt)
52{ 47{
53 return -ENXIO; 48 return -ENXIO;
54} 49}
50
51static inline void local_timer_stop(struct clock_event_device *evt)
52{
53}
55#endif 54#endif
56 55
57#endif 56#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index e42d96a45d3e..1e5717afc4ac 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -33,6 +33,11 @@ extern void show_ipi_list(struct seq_file *, int);
33asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); 33asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
34 34
35/* 35/*
36 * Called from C code, this handles an IPI.
37 */
38void handle_IPI(int ipinr, struct pt_regs *regs);
39
40/*
36 * Setup the set of possible CPUs (via set_cpu_possible) 41 * Setup the set of possible CPUs (via set_cpu_possible)
37 */ 42 */
38extern void smp_init_cpus(void); 43extern void smp_init_cpus(void);
@@ -66,6 +71,12 @@ extern void platform_secondary_init(unsigned int cpu);
66extern void platform_smp_prepare_cpus(unsigned int); 71extern void platform_smp_prepare_cpus(unsigned int);
67 72
68/* 73/*
74 * Logical CPU mapping.
75 */
76extern int __cpu_logical_map[NR_CPUS];
77#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
78
79/*
69 * Initial data for bringing up a secondary CPU. 80 * Initial data for bringing up a secondary CPU.
70 */ 81 */
71struct secondary_data { 82struct secondary_data {
@@ -88,9 +99,4 @@ extern void platform_cpu_enable(unsigned int cpu);
88extern void arch_send_call_function_single_ipi(int cpu); 99extern void arch_send_call_function_single_ipi(int cpu);
89extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 100extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
90 101
91/*
92 * show local interrupt info
93 */
94extern void show_local_irqs(struct seq_file *, int);
95
96#endif /* ifndef __ASM_ARM_SMP_H */ 102#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index fed9981fba08..ef9ffba97ad8 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -22,7 +22,7 @@ struct clock_event_device;
22 22
23extern void __iomem *twd_base; 23extern void __iomem *twd_base;
24 24
25int twd_timer_ack(void);
26void twd_timer_setup(struct clock_event_device *); 25void twd_timer_setup(struct clock_event_device *);
26void twd_timer_stop(struct clock_event_device *);
27 27
28#endif 28#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 832888d0c20c..ed6b0499a106 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -62,13 +62,6 @@
62 62
63#include <asm/outercache.h> 63#include <asm/outercache.h>
64 64
65#define __exception __attribute__((section(".exception.text")))
66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
67#define __exception_irq_entry __irq_entry
68#else
69#define __exception_irq_entry __exception
70#endif
71
72struct thread_info; 65struct thread_info;
73struct task_struct; 66struct task_struct;
74 67
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index accbd7cad9b5..a7e457ed27c3 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -1,6 +1,39 @@
1#ifndef _ASM_ARM_TOPOLOGY_H 1#ifndef _ASM_ARM_TOPOLOGY_H
2#define _ASM_ARM_TOPOLOGY_H 2#define _ASM_ARM_TOPOLOGY_H
3 3
4#ifdef CONFIG_ARM_CPU_TOPOLOGY
5
6#include <linux/cpumask.h>
7
8struct cputopo_arm {
9 int thread_id;
10 int core_id;
11 int socket_id;
12 cpumask_t thread_sibling;
13 cpumask_t core_sibling;
14};
15
16extern struct cputopo_arm cpu_topology[NR_CPUS];
17
18#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
19#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
22
23#define mc_capable() (cpu_topology[0].socket_id != -1)
24#define smt_capable() (cpu_topology[0].thread_id != -1)
25
26void init_cpu_topology(void);
27void store_cpu_topology(unsigned int cpuid);
28const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
29
30#else
31
32static inline void init_cpu_topology(void) { }
33static inline void store_cpu_topology(unsigned int cpuid) { }
34
35#endif
36
4#include <asm-generic/topology.h> 37#include <asm-generic/topology.h>
5 38
6#endif /* _ASM_ARM_TOPOLOGY_H */ 39#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8fa83f54c967..7cac26c5f502 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
73obj-$(CONFIG_CPU_HAS_PMU) += pmu.o 73obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
75AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 75AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
76obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
76 77
77ifneq ($(CONFIG_ARCH_EBSA110),y) 78ifneq ($(CONFIG_ARCH_EBSA110),y)
78 obj-y += io.o 79 obj-y += io.o
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index de3dcab8610b..7cb29261249a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -35,8 +35,8 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/ftrace.h>
39 38
39#include <asm/exception.h>
40#include <asm/system.h> 40#include <asm/system.h>
41#include <asm/mach/arch.h> 41#include <asm/mach/arch.h>
42#include <asm/mach/irq.h> 42#include <asm/mach/irq.h>
@@ -59,9 +59,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
59#ifdef CONFIG_SMP 59#ifdef CONFIG_SMP
60 show_ipi_list(p, prec); 60 show_ipi_list(p, prec);
61#endif 61#endif
62#ifdef CONFIG_LOCAL_TIMERS
63 show_local_irqs(p, prec);
64#endif
65 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 62 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
66 return 0; 63 return 0;
67} 64}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d88ff0230e82..a96c08cd6125 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -16,7 +16,6 @@
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/profile.h> 17#include <linux/profile.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/ftrace.h>
20#include <linux/mm.h> 19#include <linux/mm.h>
21#include <linux/err.h> 20#include <linux/err.h>
22#include <linux/cpu.h> 21#include <linux/cpu.h>
@@ -31,6 +30,8 @@
31#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
32#include <asm/cpu.h> 31#include <asm/cpu.h>
33#include <asm/cputype.h> 32#include <asm/cputype.h>
33#include <asm/exception.h>
34#include <asm/topology.h>
34#include <asm/mmu_context.h> 35#include <asm/mmu_context.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
@@ -39,6 +40,7 @@
39#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
40#include <asm/ptrace.h> 41#include <asm/ptrace.h>
41#include <asm/localtimer.h> 42#include <asm/localtimer.h>
43#include <asm/smp_plat.h>
42 44
43/* 45/*
44 * as from 2.5, kernels no longer have an init_tasks structure 46 * as from 2.5, kernels no longer have an init_tasks structure
@@ -259,6 +261,20 @@ void __ref cpu_die(void)
259} 261}
260#endif /* CONFIG_HOTPLUG_CPU */ 262#endif /* CONFIG_HOTPLUG_CPU */
261 263
264int __cpu_logical_map[NR_CPUS];
265
266void __init smp_setup_processor_id(void)
267{
268 int i;
269 u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
270
271 cpu_logical_map(0) = cpu;
272 for (i = 1; i < NR_CPUS; ++i)
273 cpu_logical_map(i) = i == cpu ? 0 : i;
274
275 printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
276}
277
262/* 278/*
263 * Called by both boot and secondaries to move global data into 279 * Called by both boot and secondaries to move global data into
264 * per-processor storage. 280 * per-processor storage.
@@ -268,6 +284,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
268 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 284 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
269 285
270 cpu_info->loops_per_jiffy = loops_per_jiffy; 286 cpu_info->loops_per_jiffy = loops_per_jiffy;
287
288 store_cpu_topology(cpuid);
271} 289}
272 290
273/* 291/*
@@ -358,6 +376,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
358{ 376{
359 unsigned int ncores = num_possible_cpus(); 377 unsigned int ncores = num_possible_cpus();
360 378
379 init_cpu_topology();
380
361 smp_store_cpu_info(smp_processor_id()); 381 smp_store_cpu_info(smp_processor_id());
362 382
363 /* 383 /*
@@ -437,10 +457,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
437 for (i = 0; i < NR_IPI; i++) 457 for (i = 0; i < NR_IPI; i++)
438 sum += __get_irq_stat(cpu, ipi_irqs[i]); 458 sum += __get_irq_stat(cpu, ipi_irqs[i]);
439 459
440#ifdef CONFIG_LOCAL_TIMERS
441 sum += __get_irq_stat(cpu, local_timer_irqs);
442#endif
443
444 return sum; 460 return sum;
445} 461}
446 462
@@ -457,33 +473,6 @@ static void ipi_timer(void)
457 irq_exit(); 473 irq_exit();
458} 474}
459 475
460#ifdef CONFIG_LOCAL_TIMERS
461asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
462{
463 struct pt_regs *old_regs = set_irq_regs(regs);
464 int cpu = smp_processor_id();
465
466 if (local_timer_ack()) {
467 __inc_irq_stat(cpu, local_timer_irqs);
468 ipi_timer();
469 }
470
471 set_irq_regs(old_regs);
472}
473
474void show_local_irqs(struct seq_file *p, int prec)
475{
476 unsigned int cpu;
477
478 seq_printf(p, "%*s: ", prec, "LOC");
479
480 for_each_present_cpu(cpu)
481 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
482
483 seq_printf(p, " Local timer interrupts\n");
484}
485#endif
486
487#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 476#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
488static void smp_timer_broadcast(const struct cpumask *mask) 477static void smp_timer_broadcast(const struct cpumask *mask)
489{ 478{
@@ -534,7 +523,7 @@ static void percpu_timer_stop(void)
534 unsigned int cpu = smp_processor_id(); 523 unsigned int cpu = smp_processor_id();
535 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 524 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
536 525
537 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); 526 local_timer_stop(evt);
538} 527}
539#endif 528#endif
540 529
@@ -567,6 +556,11 @@ static void ipi_cpu_stop(unsigned int cpu)
567 */ 556 */
568asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) 557asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
569{ 558{
559 handle_IPI(ipinr, regs);
560}
561
562void handle_IPI(int ipinr, struct pt_regs *regs)
563{
570 unsigned int cpu = smp_processor_id(); 564 unsigned int cpu = smp_processor_id();
571 struct pt_regs *old_regs = set_irq_regs(regs); 565 struct pt_regs *old_regs = set_irq_regs(regs);
572 566
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 79ed5e7f204a..5b6d536cbfe3 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -33,7 +33,7 @@ unsigned int __init scu_get_core_count(void __iomem *scu_base)
33/* 33/*
34 * Enable the SCU 34 * Enable the SCU
35 */ 35 */
36void __init scu_enable(void __iomem *scu_base) 36void scu_enable(void __iomem *scu_base)
37{ 37{
38 u32 scu_ctrl; 38 u32 scu_ctrl;
39 39
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 01c186222f3b..a8a6682d6b52 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -19,6 +19,7 @@
19#include <linux/io.h> 19#include <linux/io.h>
20 20
21#include <asm/smp_twd.h> 21#include <asm/smp_twd.h>
22#include <asm/localtimer.h>
22#include <asm/hardware/gic.h> 23#include <asm/hardware/gic.h>
23 24
24/* set up by the platform code */ 25/* set up by the platform code */
@@ -26,6 +27,8 @@ void __iomem *twd_base;
26 27
27static unsigned long twd_timer_rate; 28static unsigned long twd_timer_rate;
28 29
30static struct clock_event_device __percpu **twd_evt;
31
29static void twd_set_mode(enum clock_event_mode mode, 32static void twd_set_mode(enum clock_event_mode mode,
30 struct clock_event_device *clk) 33 struct clock_event_device *clk)
31{ 34{
@@ -80,6 +83,12 @@ int twd_timer_ack(void)
80 return 0; 83 return 0;
81} 84}
82 85
86void twd_timer_stop(struct clock_event_device *clk)
87{
88 twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
89 disable_percpu_irq(clk->irq);
90}
91
83static void __cpuinit twd_calibrate_rate(void) 92static void __cpuinit twd_calibrate_rate(void)
84{ 93{
85 unsigned long count; 94 unsigned long count;
@@ -119,11 +128,43 @@ static void __cpuinit twd_calibrate_rate(void)
119 } 128 }
120} 129}
121 130
131static irqreturn_t twd_handler(int irq, void *dev_id)
132{
133 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
134
135 if (twd_timer_ack()) {
136 evt->event_handler(evt);
137 return IRQ_HANDLED;
138 }
139
140 return IRQ_NONE;
141}
142
122/* 143/*
123 * Setup the local clock events for a CPU. 144 * Setup the local clock events for a CPU.
124 */ 145 */
125void __cpuinit twd_timer_setup(struct clock_event_device *clk) 146void __cpuinit twd_timer_setup(struct clock_event_device *clk)
126{ 147{
148 struct clock_event_device **this_cpu_clk;
149
150 if (!twd_evt) {
151 int err;
152
153 twd_evt = alloc_percpu(struct clock_event_device *);
154 if (!twd_evt) {
155 pr_err("twd: can't allocate memory\n");
156 return;
157 }
158
159 err = request_percpu_irq(clk->irq, twd_handler,
160 "twd", twd_evt);
161 if (err) {
162 pr_err("twd: can't register interrupt %d (%d)\n",
163 clk->irq, err);
164 return;
165 }
166 }
167
127 twd_calibrate_rate(); 168 twd_calibrate_rate();
128 169
129 clk->name = "local_timer"; 170 clk->name = "local_timer";
@@ -137,8 +178,10 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
137 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 178 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
138 clk->min_delta_ns = clockevent_delta2ns(0xf, clk); 179 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
139 180
181 this_cpu_clk = __this_cpu_ptr(twd_evt);
182 *this_cpu_clk = clk;
183
140 clockevents_register_device(clk); 184 clockevents_register_device(clk);
141 185
142 /* Make sure our local interrupt controller has this enabled */ 186 enable_percpu_irq(clk->irq, 0);
143 gic_enable_ppi(clk->irq);
144} 187}
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
new file mode 100644
index 000000000000..1040c00405d0
--- /dev/null
+++ b/arch/arm/kernel/topology.c
@@ -0,0 +1,148 @@
1/*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/cpu.h>
15#include <linux/cpumask.h>
16#include <linux/init.h>
17#include <linux/percpu.h>
18#include <linux/node.h>
19#include <linux/nodemask.h>
20#include <linux/sched.h>
21
22#include <asm/cputype.h>
23#include <asm/topology.h>
24
25#define MPIDR_SMP_BITMASK (0x3 << 30)
26#define MPIDR_SMP_VALUE (0x2 << 30)
27
28#define MPIDR_MT_BITMASK (0x1 << 24)
29
30/*
31 * These masks reflect the current use of the affinity levels.
32 * The affinity level can be up to 16 bits according to ARM ARM
33 */
34
35#define MPIDR_LEVEL0_MASK 0x3
36#define MPIDR_LEVEL0_SHIFT 0
37
38#define MPIDR_LEVEL1_MASK 0xF
39#define MPIDR_LEVEL1_SHIFT 8
40
41#define MPIDR_LEVEL2_MASK 0xFF
42#define MPIDR_LEVEL2_SHIFT 16
43
44struct cputopo_arm cpu_topology[NR_CPUS];
45
46const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
47{
48 return &cpu_topology[cpu].core_sibling;
49}
50
51/*
52 * store_cpu_topology is called at boot when only one cpu is running
53 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
54 * which prevents simultaneous write access to cpu_topology array
55 */
56void store_cpu_topology(unsigned int cpuid)
57{
58 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
59 unsigned int mpidr;
60 unsigned int cpu;
61
62 /* If the cpu topology has been already set, just return */
63 if (cpuid_topo->core_id != -1)
64 return;
65
66 mpidr = read_cpuid_mpidr();
67
68 /* create cpu topology mapping */
69 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
70 /*
71 * This is a multiprocessor system
72 * multiprocessor format & multiprocessor mode field are set
73 */
74
75 if (mpidr & MPIDR_MT_BITMASK) {
76 /* core performance interdependency */
77 cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
78 & MPIDR_LEVEL0_MASK;
79 cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
80 & MPIDR_LEVEL1_MASK;
81 cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
82 & MPIDR_LEVEL2_MASK;
83 } else {
84 /* largely independent cores */
85 cpuid_topo->thread_id = -1;
86 cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
87 & MPIDR_LEVEL0_MASK;
88 cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
89 & MPIDR_LEVEL1_MASK;
90 }
91 } else {
92 /*
93 * This is an uniprocessor system
94 * we are in multiprocessor format but uniprocessor system
95 * or in the old uniprocessor format
96 */
97 cpuid_topo->thread_id = -1;
98 cpuid_topo->core_id = 0;
99 cpuid_topo->socket_id = -1;
100 }
101
102 /* update core and thread sibling masks */
103 for_each_possible_cpu(cpu) {
104 struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
105
106 if (cpuid_topo->socket_id == cpu_topo->socket_id) {
107 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
108 if (cpu != cpuid)
109 cpumask_set_cpu(cpu,
110 &cpuid_topo->core_sibling);
111
112 if (cpuid_topo->core_id == cpu_topo->core_id) {
113 cpumask_set_cpu(cpuid,
114 &cpu_topo->thread_sibling);
115 if (cpu != cpuid)
116 cpumask_set_cpu(cpu,
117 &cpuid_topo->thread_sibling);
118 }
119 }
120 }
121 smp_wmb();
122
123 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
124 cpuid, cpu_topology[cpuid].thread_id,
125 cpu_topology[cpuid].core_id,
126 cpu_topology[cpuid].socket_id, mpidr);
127}
128
129/*
130 * init_cpu_topology is called at boot when only one cpu is running
131 * which prevent simultaneous write access to cpu_topology array
132 */
133void init_cpu_topology(void)
134{
135 unsigned int cpu;
136
137 /* init core mask */
138 for_each_possible_cpu(cpu) {
139 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
140
141 cpu_topo->thread_id = -1;
142 cpu_topo->core_id = -1;
143 cpu_topo->socket_id = -1;
144 cpumask_clear(&cpu_topo->core_sibling);
145 cpumask_clear(&cpu_topo->thread_sibling);
146 }
147 smp_wmb();
148}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc9f9da782cb..210382555af1 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/atomic.h> 28#include <linux/atomic.h>
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/exception.h>
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/unistd.h> 32#include <asm/unistd.h>
32#include <asm/traps.h> 33#include <asm/traps.h>
diff --git a/arch/arm/mach-exynos4/include/mach/entry-macro.S b/arch/arm/mach-exynos4/include/mach/entry-macro.S
index d7a1e281ce7a..006a4f4c65c6 100644
--- a/arch/arm/mach-exynos4/include/mach/entry-macro.S
+++ b/arch/arm/mach-exynos4/include/mach/entry-macro.S
@@ -55,7 +55,7 @@
55 55
56 bic \irqnr, \irqstat, #0x1c00 56 bic \irqnr, \irqstat, #0x1c00
57 57
58 cmp \irqnr, #29 58 cmp \irqnr, #15
59 cmpcc \irqnr, \irqnr 59 cmpcc \irqnr, \irqnr
60 cmpne \irqnr, \tmp 60 cmpne \irqnr, \tmp
61 cmpcs \irqnr, \irqnr 61 cmpcs \irqnr, \irqnr
@@ -76,8 +76,3 @@
76 strcc \irqstat, [\base, #GIC_CPU_EOI] 76 strcc \irqstat, [\base, #GIC_CPU_EOI]
77 cmpcs \irqnr, \irqnr 77 cmpcs \irqnr, \irqnr
78 .endm 78 .endm
79
80 /* As above, this assumes that irqstat and base are preserved.. */
81
82 .macro test_for_ltirq, irqnr, irqstat, base, tmp
83 .endm
diff --git a/arch/arm/mach-exynos4/mct.c b/arch/arm/mach-exynos4/mct.c
index 1ae059b7ad7b..85a1bb79f11c 100644
--- a/arch/arm/mach-exynos4/mct.c
+++ b/arch/arm/mach-exynos4/mct.c
@@ -380,9 +380,11 @@ static void exynos4_mct_tick_init(struct clock_event_device *evt)
380 380
381 if (cpu == 0) { 381 if (cpu == 0) {
382 mct_tick0_event_irq.dev_id = &mct_tick[cpu]; 382 mct_tick0_event_irq.dev_id = &mct_tick[cpu];
383 evt->irq = IRQ_MCT_L0;
383 setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq); 384 setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq);
384 } else { 385 } else {
385 mct_tick1_event_irq.dev_id = &mct_tick[cpu]; 386 mct_tick1_event_irq.dev_id = &mct_tick[cpu];
387 evt->irq = IRQ_MCT_L1;
386 setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq); 388 setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq);
387 irq_set_affinity(IRQ_MCT_L1, cpumask_of(1)); 389 irq_set_affinity(IRQ_MCT_L1, cpumask_of(1));
388 } 390 }
@@ -394,9 +396,10 @@ void __cpuinit local_timer_setup(struct clock_event_device *evt)
394 exynos4_mct_tick_init(evt); 396 exynos4_mct_tick_init(evt);
395} 397}
396 398
397int local_timer_ack(void) 399void local_timer_stop(struct clock_event_device *evt)
398{ 400{
399 return 0; 401 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
402 disable_irq(evt->irq);
400} 403}
401 404
402#endif /* CONFIG_LOCAL_TIMERS */ 405#endif /* CONFIG_LOCAL_TIMERS */
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
index 7c2282c6ba81..a08c536923f9 100644
--- a/arch/arm/mach-exynos4/platsmp.c
+++ b/arch/arm/mach-exynos4/platsmp.c
@@ -191,12 +191,10 @@ void __init smp_init_cpus(void)
191 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 191 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
192 192
193 /* sanity check */ 193 /* sanity check */
194 if (ncores > NR_CPUS) { 194 if (ncores > nr_cpu_ids) {
195 printk(KERN_WARNING 195 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
196 "EXYNOS4: no. of cores (%d) greater than configured " 196 ncores, nr_cpu_ids);
197 "maximum of %d - clipping\n", 197 ncores = nr_cpu_ids;
198 ncores, NR_CPUS);
199 ncores = NR_CPUS;
200 } 198 }
201 199
202 for (i = 0; i < ncores; i++) 200 for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 9221f54778be..106170fb1844 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -53,8 +53,6 @@ static void __init msm8x60_map_io(void)
53 53
54static void __init msm8x60_init_irq(void) 54static void __init msm8x60_init_irq(void)
55{ 55{
56 unsigned int i;
57
58 gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, 56 gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
59 (void *)MSM_QGIC_CPU_BASE); 57 (void *)MSM_QGIC_CPU_BASE);
60 58
@@ -66,15 +64,6 @@ static void __init msm8x60_init_irq(void)
66 */ 64 */
67 if (!machine_is_msm8x60_sim()) 65 if (!machine_is_msm8x60_sim())
68 writel(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET); 66 writel(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET);
69
70 /* FIXME: Not installing AVS_SVICINT and AVS_SVICINTSWDONE yet
71 * as they are configured as level, which does not play nice with
72 * handle_percpu_irq.
73 */
74 for (i = GIC_PPI_START; i < GIC_SPI_START; i++) {
75 if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE)
76 irq_set_handler(i, handle_percpu_irq);
77 }
78} 67}
79 68
80static void __init msm8x60_init(void) 69static void __init msm8x60_init(void)
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
index 12467157afb9..717076f3ca73 100644
--- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
+++ b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
@@ -8,81 +8,10 @@
8 * warranty of any kind, whether express or implied. 8 * warranty of any kind, whether express or implied.
9 */ 9 */
10 10
11#include <mach/hardware.h> 11#include <asm/hardware/entry-macro-gic.S>
12#include <asm/hardware/gic.h>
13 12
14 .macro disable_fiq 13 .macro disable_fiq
15 .endm 14 .endm
16 15
17 .macro get_irqnr_preamble, base, tmp
18 ldr \base, =gic_cpu_base_addr
19 ldr \base, [\base]
20 .endm
21
22 .macro arch_ret_to_user, tmp1, tmp2 16 .macro arch_ret_to_user, tmp1, tmp2
23 .endm 17 .endm
24
25 /*
26 * The interrupt numbering scheme is defined in the
27 * interrupt controller spec. To wit:
28 *
29 * Migrated the code from ARM MP port to be more consistent
30 * with interrupt processing , the following still holds true
31 * however, all interrupts are treated the same regardless of
32 * if they are local IPI or PPI
33 *
34 * Interrupts 0-15 are IPI
35 * 16-31 are PPI
36 * (16-18 are the timers)
37 * 32-1020 are global
38 * 1021-1022 are reserved
39 * 1023 is "spurious" (no interrupt)
40 *
41 * A simple read from the controller will tell us the number of the
42 * highest priority enabled interrupt. We then just need to check
43 * whether it is in the valid range for an IRQ (0-1020 inclusive).
44 *
45 * Base ARM code assumes that the local (private) peripheral interrupts
46 * are not valid, we treat them differently, in that the privates are
47 * handled like normal shared interrupts with the exception that only
48 * one processor can register the interrupt and the handler must be
49 * the same for all processors.
50 */
51
52 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
53
54 ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 =srcCPU,
55 9-0 =int # */
56
57 bic \irqnr, \irqstat, #0x1c00 @mask src
58 cmp \irqnr, #15
59 ldr \tmp, =1021
60 cmpcc \irqnr, \irqnr
61 cmpne \irqnr, \tmp
62 cmpcs \irqnr, \irqnr
63
64 .endm
65
66 /* We assume that irqstat (the raw value of the IRQ acknowledge
67 * register) is preserved from the macro above.
68 * If there is an IPI, we immediately signal end of interrupt on the
69 * controller, since this requires the original irqstat value which
70 * we won't easily be able to recreate later.
71 */
72 .macro test_for_ipi, irqnr, irqstat, base, tmp
73 bic \irqnr, \irqstat, #0x1c00
74 cmp \irqnr, #16
75 strcc \irqstat, [\base, #GIC_CPU_EOI]
76 cmpcs \irqnr, \irqnr
77 .endm
78
79 /* As above, this assumes that irqstat and base are preserved.. */
80
81 .macro test_for_ltirq, irqnr, irqstat, base, tmp
82 bic \irqnr, \irqstat, #0x1c00
83 mov \tmp, #0
84 cmp \irqnr, #16
85 moveq \tmp, #1
86 streq \irqstat, [\base, #GIC_CPU_EOI]
87 cmp \tmp, #0
88 .endm
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 1a1af9e56250..727659520912 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -156,6 +156,12 @@ void __init smp_init_cpus(void)
156{ 156{
157 unsigned int i, ncores = get_core_count(); 157 unsigned int i, ncores = get_core_count();
158 158
159 if (ncores > nr_cpu_ids) {
160 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
161 ncores, nr_cpu_ids);
162 ncores = nr_cpu_ids;
163 }
164
159 for (i = 0; i < ncores; i++) 165 for (i = 0; i < ncores; i++)
160 set_cpu_possible(i, true); 166 set_cpu_possible(i, true);
161 167
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 63621f152c98..afeeca52fc66 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -71,12 +71,16 @@ enum timer_location {
71struct msm_clock { 71struct msm_clock {
72 struct clock_event_device clockevent; 72 struct clock_event_device clockevent;
73 struct clocksource clocksource; 73 struct clocksource clocksource;
74 struct irqaction irq; 74 unsigned int irq;
75 void __iomem *regbase; 75 void __iomem *regbase;
76 uint32_t freq; 76 uint32_t freq;
77 uint32_t shift; 77 uint32_t shift;
78 void __iomem *global_counter; 78 void __iomem *global_counter;
79 void __iomem *local_counter; 79 void __iomem *local_counter;
80 union {
81 struct clock_event_device *evt;
82 struct clock_event_device __percpu **percpu_evt;
83 };
80}; 84};
81 85
82enum { 86enum {
@@ -87,13 +91,10 @@ enum {
87 91
88 92
89static struct msm_clock msm_clocks[]; 93static struct msm_clock msm_clocks[];
90static struct clock_event_device *local_clock_event;
91 94
92static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) 95static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
93{ 96{
94 struct clock_event_device *evt = dev_id; 97 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
95 if (smp_processor_id() != 0)
96 evt = local_clock_event;
97 if (evt->event_handler == NULL) 98 if (evt->event_handler == NULL)
98 return IRQ_HANDLED; 99 return IRQ_HANDLED;
99 evt->event_handler(evt); 100 evt->event_handler(evt);
@@ -171,13 +172,7 @@ static struct msm_clock msm_clocks[] = {
171 .mask = CLOCKSOURCE_MASK(32), 172 .mask = CLOCKSOURCE_MASK(32),
172 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 173 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
173 }, 174 },
174 .irq = { 175 .irq = INT_GP_TIMER_EXP,
175 .name = "gp_timer",
176 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING,
177 .handler = msm_timer_interrupt,
178 .dev_id = &msm_clocks[0].clockevent,
179 .irq = INT_GP_TIMER_EXP
180 },
181 .freq = GPT_HZ, 176 .freq = GPT_HZ,
182 }, 177 },
183 [MSM_CLOCK_DGT] = { 178 [MSM_CLOCK_DGT] = {
@@ -196,13 +191,7 @@ static struct msm_clock msm_clocks[] = {
196 .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)), 191 .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
197 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 192 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
198 }, 193 },
199 .irq = { 194 .irq = INT_DEBUG_TIMER_EXP,
200 .name = "dg_timer",
201 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING,
202 .handler = msm_timer_interrupt,
203 .dev_id = &msm_clocks[1].clockevent,
204 .irq = INT_DEBUG_TIMER_EXP
205 },
206 .freq = DGT_HZ >> MSM_DGT_SHIFT, 195 .freq = DGT_HZ >> MSM_DGT_SHIFT,
207 .shift = MSM_DGT_SHIFT, 196 .shift = MSM_DGT_SHIFT,
208 } 197 }
@@ -261,10 +250,30 @@ static void __init msm_timer_init(void)
261 printk(KERN_ERR "msm_timer_init: clocksource_register " 250 printk(KERN_ERR "msm_timer_init: clocksource_register "
262 "failed for %s\n", cs->name); 251 "failed for %s\n", cs->name);
263 252
264 res = setup_irq(clock->irq.irq, &clock->irq); 253 ce->irq = clock->irq;
254 if (cpu_is_msm8x60() || cpu_is_msm8960()) {
255 clock->percpu_evt = alloc_percpu(struct clock_event_device *);
256 if (!clock->percpu_evt) {
257 pr_err("msm_timer_init: memory allocation "
258 "failed for %s\n", ce->name);
259 continue;
260 }
261
262 *__this_cpu_ptr(clock->percpu_evt) = ce;
263 res = request_percpu_irq(ce->irq, msm_timer_interrupt,
264 ce->name, clock->percpu_evt);
265 if (!res)
266 enable_percpu_irq(ce->irq, 0);
267 } else {
268 clock->evt = ce;
269 res = request_irq(ce->irq, msm_timer_interrupt,
270 IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
271 ce->name, &clock->evt);
272 }
273
265 if (res) 274 if (res)
266 printk(KERN_ERR "msm_timer_init: setup_irq " 275 pr_err("msm_timer_init: request_irq failed for %s\n",
267 "failed for %s\n", cs->name); 276 ce->name);
268 277
269 clockevents_register_device(ce); 278 clockevents_register_device(ce);
270 } 279 }
@@ -273,6 +282,7 @@ static void __init msm_timer_init(void)
273#ifdef CONFIG_SMP 282#ifdef CONFIG_SMP
274int __cpuinit local_timer_setup(struct clock_event_device *evt) 283int __cpuinit local_timer_setup(struct clock_event_device *evt)
275{ 284{
285 static bool local_timer_inited;
276 struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER]; 286 struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
277 287
278 /* Use existing clock_event for cpu 0 */ 288 /* Use existing clock_event for cpu 0 */
@@ -281,12 +291,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
281 291
282 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); 292 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
283 293
284 if (!local_clock_event) { 294 if (!local_timer_inited) {
285 writel(0, clock->regbase + TIMER_ENABLE); 295 writel(0, clock->regbase + TIMER_ENABLE);
286 writel(0, clock->regbase + TIMER_CLEAR); 296 writel(0, clock->regbase + TIMER_CLEAR);
287 writel(~0, clock->regbase + TIMER_MATCH_VAL); 297 writel(~0, clock->regbase + TIMER_MATCH_VAL);
298 local_timer_inited = true;
288 } 299 }
289 evt->irq = clock->irq.irq; 300 evt->irq = clock->irq;
290 evt->name = "local_timer"; 301 evt->name = "local_timer";
291 evt->features = CLOCK_EVT_FEAT_ONESHOT; 302 evt->features = CLOCK_EVT_FEAT_ONESHOT;
292 evt->rating = clock->clockevent.rating; 303 evt->rating = clock->clockevent.rating;
@@ -298,17 +309,17 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
298 clockevent_delta2ns(0xf0000000 >> clock->shift, evt); 309 clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
299 evt->min_delta_ns = clockevent_delta2ns(4, evt); 310 evt->min_delta_ns = clockevent_delta2ns(4, evt);
300 311
301 local_clock_event = evt; 312 *__this_cpu_ptr(clock->percpu_evt) = evt;
302 313 enable_percpu_irq(evt->irq, 0);
303 gic_enable_ppi(clock->irq.irq);
304 314
305 clockevents_register_device(evt); 315 clockevents_register_device(evt);
306 return 0; 316 return 0;
307} 317}
308 318
309inline int local_timer_ack(void) 319void local_timer_stop(struct clock_event_device *evt)
310{ 320{
311 return 1; 321 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
322 disable_percpu_irq(evt->irq);
312} 323}
313 324
314#endif 325#endif
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index ceb8b7e593d7..feb90a10945a 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -78,7 +78,7 @@
784401: ldr \irqstat, [\base, #GIC_CPU_INTACK] 784401: ldr \irqstat, [\base, #GIC_CPU_INTACK]
79 ldr \tmp, =1021 79 ldr \tmp, =1021
80 bic \irqnr, \irqstat, #0x1c00 80 bic \irqnr, \irqstat, #0x1c00
81 cmp \irqnr, #29 81 cmp \irqnr, #15
82 cmpcc \irqnr, \irqnr 82 cmpcc \irqnr, \irqnr
83 cmpne \irqnr, \tmp 83 cmpne \irqnr, \tmp
84 cmpcs \irqnr, \irqnr 84 cmpcs \irqnr, \irqnr
@@ -101,18 +101,6 @@
101 it cs 101 it cs
102 cmpcs \irqnr, \irqnr 102 cmpcs \irqnr, \irqnr
103 .endm 103 .endm
104
105 /* As above, this assumes that irqstat and base are preserved */
106
107 .macro test_for_ltirq, irqnr, irqstat, base, tmp
108 bic \irqnr, \irqstat, #0x1c00
109 mov \tmp, #0
110 cmp \irqnr, #29
111 itt eq
112 moveq \tmp, #1
113 streq \irqstat, [\base, #GIC_CPU_EOI]
114 cmp \tmp, #0
115 .endm
116#endif /* CONFIG_SMP */ 104#endif /* CONFIG_SMP */
117 105
118#else /* MULTI_OMAP2 */ 106#else /* MULTI_OMAP2 */
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index ce65e9329c7b..889464dc7b2d 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -109,12 +109,10 @@ void __init smp_init_cpus(void)
109 ncores = scu_get_core_count(scu_base); 109 ncores = scu_get_core_count(scu_base);
110 110
111 /* sanity check */ 111 /* sanity check */
112 if (ncores > NR_CPUS) { 112 if (ncores > nr_cpu_ids) {
113 printk(KERN_WARNING 113 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
114 "OMAP4: no. of cores (%d) greater than configured " 114 ncores, nr_cpu_ids);
115 "maximum of %d - clipping\n", 115 ncores = nr_cpu_ids;
116 ncores, NR_CPUS);
117 ncores = NR_CPUS;
118 } 116 }
119 117
120 for (i = 0; i < ncores; i++) 118 for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index b09e848eb6c6..ca6075717824 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -19,6 +19,8 @@
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21 21
22#include <asm/exception.h>
23
22#include <mach/hardware.h> 24#include <mach/hardware.h>
23#include <mach/irqs.h> 25#include <mach/irqs.h>
24#include <mach/gpio.h> 26#include <mach/gpio.h>
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 4ae943bafa92..e83c654a58d0 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -52,12 +52,10 @@ void __init smp_init_cpus(void)
52 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 52 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
53 53
54 /* sanity check */ 54 /* sanity check */
55 if (ncores > NR_CPUS) { 55 if (ncores > nr_cpu_ids) {
56 printk(KERN_WARNING 56 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
57 "Realview: no. of cores (%d) greater than configured " 57 ncores, nr_cpu_ids);
58 "maximum of %d - clipping\n", 58 ncores = nr_cpu_ids;
59 ncores, NR_CPUS);
60 ncores = NR_CPUS;
61 } 59 }
62 60
63 for (i = 0; i < ncores; i++) 61 for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-shmobile/entry-intc.S b/arch/arm/mach-shmobile/entry-intc.S
index cac0a7ae2084..1a1c00ca39a2 100644
--- a/arch/arm/mach-shmobile/entry-intc.S
+++ b/arch/arm/mach-shmobile/entry-intc.S
@@ -51,7 +51,4 @@
51 .macro test_for_ipi, irqnr, irqstat, base, tmp 51 .macro test_for_ipi, irqnr, irqstat, base, tmp
52 .endm 52 .endm
53 53
54 .macro test_for_ltirq, irqnr, irqstat, base, tmp
55 .endm
56
57 arch_irq_handler shmobile_handle_irq_intc 54 arch_irq_handler shmobile_handle_irq_intc
diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro.S b/arch/arm/mach-shmobile/include/mach/entry-macro.S
index d791f10eeac7..8d4a416d4285 100644
--- a/arch/arm/mach-shmobile/include/mach/entry-macro.S
+++ b/arch/arm/mach-shmobile/include/mach/entry-macro.S
@@ -27,8 +27,5 @@
27 .macro test_for_ipi, irqnr, irqstat, base, tmp 27 .macro test_for_ipi, irqnr, irqstat, base, tmp
28 .endm 28 .endm
29 29
30 .macro test_for_ltirq, irqnr, irqstat, base, tmp
31 .endm
32
33 .macro arch_ret_to_user, tmp1, tmp2 30 .macro arch_ret_to_user, tmp1, tmp2
34 .endm 31 .endm
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 66f980625a33..e4e485fa2532 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -56,6 +56,12 @@ void __init smp_init_cpus(void)
56 unsigned int ncores = shmobile_smp_get_core_count(); 56 unsigned int ncores = shmobile_smp_get_core_count();
57 unsigned int i; 57 unsigned int i;
58 58
59 if (ncores > nr_cpu_ids) {
60 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
61 ncores, nr_cpu_ids);
62 ncores = nr_cpu_ids;
63 }
64
59 for (i = 0; i < ncores; i++) 65 for (i = 0; i < ncores; i++)
60 set_cpu_possible(i, true); 66 set_cpu_possible(i, true);
61 67
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 0886cbccddee..7d2b5d03c1df 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -114,10 +114,10 @@ void __init smp_init_cpus(void)
114{ 114{
115 unsigned int i, ncores = scu_get_core_count(scu_base); 115 unsigned int i, ncores = scu_get_core_count(scu_base);
116 116
117 if (ncores > NR_CPUS) { 117 if (ncores > nr_cpu_ids) {
118 printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n", 118 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
119 ncores, NR_CPUS); 119 ncores, nr_cpu_ids);
120 ncores = NR_CPUS; 120 ncores = nr_cpu_ids;
121 } 121 }
122 122
123 for (i = 0; i < ncores; i++) 123 for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index a33df5f4c27a..eb5199102cfa 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -156,12 +156,10 @@ void __init smp_init_cpus(void)
156 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 156 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
157 157
158 /* sanity check */ 158 /* sanity check */
159 if (ncores > NR_CPUS) { 159 if (ncores > nr_cpu_ids) {
160 printk(KERN_WARNING 160 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
161 "U8500: no. of cores (%d) greater than configured " 161 ncores, nr_cpu_ids);
162 "maximum of %d - clipping\n", 162 ncores = nr_cpu_ids;
163 ncores, NR_CPUS);
164 ncores = NR_CPUS;
165 } 163 }
166 164
167 for (i = 0; i < ncores; i++) 165 for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index bfd32f52c2db..2b1e836a76ed 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -221,6 +221,12 @@ static void ct_ca9x4_init_cpu_map(void)
221{ 221{
222 int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU)); 222 int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));
223 223
224 if (ncores > nr_cpu_ids) {
225 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
226 ncores, nr_cpu_ids);
227 ncores = nr_cpu_ids;
228 }
229
224 for (i = 0; i < ncores; ++i) 230 for (i = 0; i < ncores; ++i)
225 set_cpu_possible(i, true); 231 set_cpu_possible(i, true);
226 232
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3b5ea68acbb8..aa33949fef60 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -20,6 +20,7 @@
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/perf_event.h> 21#include <linux/perf_event.h>
22 22
23#include <asm/exception.h>
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
25#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a103732b7588..664544ff77d5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -95,6 +95,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
95 * @flags: flags (see IRQF_* above) 95 * @flags: flags (see IRQF_* above)
96 * @name: name of the device 96 * @name: name of the device
97 * @dev_id: cookie to identify the device 97 * @dev_id: cookie to identify the device
98 * @percpu_dev_id: cookie to identify the device
98 * @next: pointer to the next irqaction for shared interrupts 99 * @next: pointer to the next irqaction for shared interrupts
99 * @irq: interrupt number 100 * @irq: interrupt number
100 * @dir: pointer to the proc/irq/NN/name entry 101 * @dir: pointer to the proc/irq/NN/name entry
@@ -104,17 +105,18 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
104 * @thread_mask: bitmask for keeping track of @thread activity 105 * @thread_mask: bitmask for keeping track of @thread activity
105 */ 106 */
106struct irqaction { 107struct irqaction {
107 irq_handler_t handler; 108 irq_handler_t handler;
108 unsigned long flags; 109 unsigned long flags;
109 void *dev_id; 110 void *dev_id;
110 struct irqaction *next; 111 void __percpu *percpu_dev_id;
111 int irq; 112 struct irqaction *next;
112 irq_handler_t thread_fn; 113 int irq;
113 struct task_struct *thread; 114 irq_handler_t thread_fn;
114 unsigned long thread_flags; 115 struct task_struct *thread;
115 unsigned long thread_mask; 116 unsigned long thread_flags;
116 const char *name; 117 unsigned long thread_mask;
117 struct proc_dir_entry *dir; 118 const char *name;
119 struct proc_dir_entry *dir;
118} ____cacheline_internodealigned_in_smp; 120} ____cacheline_internodealigned_in_smp;
119 121
120extern irqreturn_t no_action(int cpl, void *dev_id); 122extern irqreturn_t no_action(int cpl, void *dev_id);
@@ -136,6 +138,10 @@ extern int __must_check
136request_any_context_irq(unsigned int irq, irq_handler_t handler, 138request_any_context_irq(unsigned int irq, irq_handler_t handler,
137 unsigned long flags, const char *name, void *dev_id); 139 unsigned long flags, const char *name, void *dev_id);
138 140
141extern int __must_check
142request_percpu_irq(unsigned int irq, irq_handler_t handler,
143 const char *devname, void __percpu *percpu_dev_id);
144
139extern void exit_irq_thread(void); 145extern void exit_irq_thread(void);
140#else 146#else
141 147
@@ -164,10 +170,18 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
164 return request_irq(irq, handler, flags, name, dev_id); 170 return request_irq(irq, handler, flags, name, dev_id);
165} 171}
166 172
173static inline int __must_check
174request_percpu_irq(unsigned int irq, irq_handler_t handler,
175 const char *devname, void __percpu *percpu_dev_id)
176{
177 return request_irq(irq, handler, 0, devname, percpu_dev_id);
178}
179
167static inline void exit_irq_thread(void) { } 180static inline void exit_irq_thread(void) { }
168#endif 181#endif
169 182
170extern void free_irq(unsigned int, void *); 183extern void free_irq(unsigned int, void *);
184extern void free_percpu_irq(unsigned int, void __percpu *);
171 185
172struct device; 186struct device;
173 187
@@ -207,7 +221,9 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
207 221
208extern void disable_irq_nosync(unsigned int irq); 222extern void disable_irq_nosync(unsigned int irq);
209extern void disable_irq(unsigned int irq); 223extern void disable_irq(unsigned int irq);
224extern void disable_percpu_irq(unsigned int irq);
210extern void enable_irq(unsigned int irq); 225extern void enable_irq(unsigned int irq);
226extern void enable_percpu_irq(unsigned int irq, unsigned int type);
211 227
212/* The following three functions are for the core kernel use only. */ 228/* The following three functions are for the core kernel use only. */
213#ifdef CONFIG_GENERIC_HARDIRQS 229#ifdef CONFIG_GENERIC_HARDIRQS
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 59517300a315..59e49c80cc2c 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -66,6 +66,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
66 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) 66 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
67 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 67 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
68 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread 68 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
69 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
69 */ 70 */
70enum { 71enum {
71 IRQ_TYPE_NONE = 0x00000000, 72 IRQ_TYPE_NONE = 0x00000000,
@@ -88,12 +89,13 @@ enum {
88 IRQ_MOVE_PCNTXT = (1 << 14), 89 IRQ_MOVE_PCNTXT = (1 << 14),
89 IRQ_NESTED_THREAD = (1 << 15), 90 IRQ_NESTED_THREAD = (1 << 15),
90 IRQ_NOTHREAD = (1 << 16), 91 IRQ_NOTHREAD = (1 << 16),
92 IRQ_PER_CPU_DEVID = (1 << 17),
91}; 93};
92 94
93#define IRQF_MODIFY_MASK \ 95#define IRQF_MODIFY_MASK \
94 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 96 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
95 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 97 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
96 IRQ_PER_CPU | IRQ_NESTED_THREAD) 98 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
97 99
98#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 100#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
99 101
@@ -336,12 +338,14 @@ struct irq_chip {
336 * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path 338 * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
337 * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks 339 * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
338 * when irq enabled 340 * when irq enabled
341 * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
339 */ 342 */
340enum { 343enum {
341 IRQCHIP_SET_TYPE_MASKED = (1 << 0), 344 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
342 IRQCHIP_EOI_IF_HANDLED = (1 << 1), 345 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
343 IRQCHIP_MASK_ON_SUSPEND = (1 << 2), 346 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
344 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), 347 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
348 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
345}; 349};
346 350
347/* This include will go away once we isolated irq_desc usage to core code */ 351/* This include will go away once we isolated irq_desc usage to core code */
@@ -365,6 +369,8 @@ enum {
365struct irqaction; 369struct irqaction;
366extern int setup_irq(unsigned int irq, struct irqaction *new); 370extern int setup_irq(unsigned int irq, struct irqaction *new);
367extern void remove_irq(unsigned int irq, struct irqaction *act); 371extern void remove_irq(unsigned int irq, struct irqaction *act);
372extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
373extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
368 374
369extern void irq_cpu_online(void); 375extern void irq_cpu_online(void);
370extern void irq_cpu_offline(void); 376extern void irq_cpu_offline(void);
@@ -392,6 +398,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
392extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); 398extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
393extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 399extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
394extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 400extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
401extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
395extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 402extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
396extern void handle_nested_irq(unsigned int irq); 403extern void handle_nested_irq(unsigned int irq);
397 404
@@ -420,6 +427,8 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
420 irq_set_chip_and_handler_name(irq, chip, handle, NULL); 427 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
421} 428}
422 429
430extern int irq_set_percpu_devid(unsigned int irq);
431
423extern void 432extern void
424__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 433__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
425 const char *name); 434 const char *name);
@@ -481,6 +490,13 @@ static inline void irq_set_nested_thread(unsigned int irq, bool nest)
481 irq_clear_status_flags(irq, IRQ_NESTED_THREAD); 490 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
482} 491}
483 492
493static inline void irq_set_percpu_devid_flags(unsigned int irq)
494{
495 irq_set_status_flags(irq,
496 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
497 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
498}
499
484/* Handle dynamic irq creation and destruction */ 500/* Handle dynamic irq creation and destruction */
485extern unsigned int create_irq_nr(unsigned int irq_want, int node); 501extern unsigned int create_irq_nr(unsigned int irq_want, int node);
486extern int create_irq(void); 502extern int create_irq(void);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 150134ac709a..6b69c2c9dff1 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -53,6 +53,7 @@ struct irq_desc {
53 unsigned long last_unhandled; /* Aging timer for unhandled count */ 53 unsigned long last_unhandled; /* Aging timer for unhandled count */
54 unsigned int irqs_unhandled; 54 unsigned int irqs_unhandled;
55 raw_spinlock_t lock; 55 raw_spinlock_t lock;
56 struct cpumask *percpu_enabled;
56#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
57 const struct cpumask *affinity_hint; 58 const struct cpumask *affinity_hint;
58 struct irq_affinity_notify *affinity_notify; 59 struct irq_affinity_notify *affinity_notify;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index d5a3009da71a..f7c543a801d9 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -26,7 +26,7 @@
26int irq_set_chip(unsigned int irq, struct irq_chip *chip) 26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27{ 27{
28 unsigned long flags; 28 unsigned long flags;
29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
30 30
31 if (!desc) 31 if (!desc)
32 return -EINVAL; 32 return -EINVAL;
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_set_chip);
54int irq_set_irq_type(unsigned int irq, unsigned int type) 54int irq_set_irq_type(unsigned int irq, unsigned int type)
55{ 55{
56 unsigned long flags; 56 unsigned long flags;
57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
58 int ret = 0; 58 int ret = 0;
59 59
60 if (!desc) 60 if (!desc)
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(irq_set_irq_type);
78int irq_set_handler_data(unsigned int irq, void *data) 78int irq_set_handler_data(unsigned int irq, void *data)
79{ 79{
80 unsigned long flags; 80 unsigned long flags;
81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
82 82
83 if (!desc) 83 if (!desc)
84 return -EINVAL; 84 return -EINVAL;
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_set_handler_data);
98int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 98int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
99{ 99{
100 unsigned long flags; 100 unsigned long flags;
101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
102 102
103 if (!desc) 103 if (!desc)
104 return -EINVAL; 104 return -EINVAL;
@@ -119,7 +119,7 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
119int irq_set_chip_data(unsigned int irq, void *data) 119int irq_set_chip_data(unsigned int irq, void *data)
120{ 120{
121 unsigned long flags; 121 unsigned long flags;
122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
123 123
124 if (!desc) 124 if (!desc)
125 return -EINVAL; 125 return -EINVAL;
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
178 desc->depth = 1; 178 desc->depth = 1;
179 if (desc->irq_data.chip->irq_shutdown) 179 if (desc->irq_data.chip->irq_shutdown)
180 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 180 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181 if (desc->irq_data.chip->irq_disable) 181 else if (desc->irq_data.chip->irq_disable)
182 desc->irq_data.chip->irq_disable(&desc->irq_data); 182 desc->irq_data.chip->irq_disable(&desc->irq_data);
183 else 183 else
184 desc->irq_data.chip->irq_mask(&desc->irq_data); 184 desc->irq_data.chip->irq_mask(&desc->irq_data);
@@ -204,6 +204,24 @@ void irq_disable(struct irq_desc *desc)
204 } 204 }
205} 205}
206 206
207void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
208{
209 if (desc->irq_data.chip->irq_enable)
210 desc->irq_data.chip->irq_enable(&desc->irq_data);
211 else
212 desc->irq_data.chip->irq_unmask(&desc->irq_data);
213 cpumask_set_cpu(cpu, desc->percpu_enabled);
214}
215
216void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
217{
218 if (desc->irq_data.chip->irq_disable)
219 desc->irq_data.chip->irq_disable(&desc->irq_data);
220 else
221 desc->irq_data.chip->irq_mask(&desc->irq_data);
222 cpumask_clear_cpu(cpu, desc->percpu_enabled);
223}
224
207static inline void mask_ack_irq(struct irq_desc *desc) 225static inline void mask_ack_irq(struct irq_desc *desc)
208{ 226{
209 if (desc->irq_data.chip->irq_mask_ack) 227 if (desc->irq_data.chip->irq_mask_ack)
@@ -544,12 +562,44 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
544 chip->irq_eoi(&desc->irq_data); 562 chip->irq_eoi(&desc->irq_data);
545} 563}
546 564
565/**
566 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
567 * @irq: the interrupt number
568 * @desc: the interrupt description structure for this irq
569 *
570 * Per CPU interrupts on SMP machines without locking requirements. Same as
571 * handle_percpu_irq() above but with the following extras:
572 *
573 * action->percpu_dev_id is a pointer to percpu variables which
574 * contain the real device id for the cpu on which this handler is
575 * called
576 */
577void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
578{
579 struct irq_chip *chip = irq_desc_get_chip(desc);
580 struct irqaction *action = desc->action;
581 void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
582 irqreturn_t res;
583
584 kstat_incr_irqs_this_cpu(irq, desc);
585
586 if (chip->irq_ack)
587 chip->irq_ack(&desc->irq_data);
588
589 trace_irq_handler_entry(irq, action);
590 res = action->handler(irq, dev_id);
591 trace_irq_handler_exit(irq, action, res);
592
593 if (chip->irq_eoi)
594 chip->irq_eoi(&desc->irq_data);
595}
596
547void 597void
548__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 598__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
549 const char *name) 599 const char *name)
550{ 600{
551 unsigned long flags; 601 unsigned long flags;
552 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 602 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
553 603
554 if (!desc) 604 if (!desc)
555 return; 605 return;
@@ -593,7 +643,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
593void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 643void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
594{ 644{
595 unsigned long flags; 645 unsigned long flags;
596 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 646 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
597 647
598 if (!desc) 648 if (!desc)
599 return; 649 return;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 6546431447d7..a73dd6c7372d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -71,6 +71,8 @@ extern int irq_startup(struct irq_desc *desc);
71extern void irq_shutdown(struct irq_desc *desc); 71extern void irq_shutdown(struct irq_desc *desc);
72extern void irq_enable(struct irq_desc *desc); 72extern void irq_enable(struct irq_desc *desc);
73extern void irq_disable(struct irq_desc *desc); 73extern void irq_disable(struct irq_desc *desc);
74extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
75extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
74extern void mask_irq(struct irq_desc *desc); 76extern void mask_irq(struct irq_desc *desc);
75extern void unmask_irq(struct irq_desc *desc); 77extern void unmask_irq(struct irq_desc *desc);
76 78
@@ -114,14 +116,21 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
114 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); 116 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
115} 117}
116 118
119#define _IRQ_DESC_CHECK (1 << 0)
120#define _IRQ_DESC_PERCPU (1 << 1)
121
122#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
123#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
124
117struct irq_desc * 125struct irq_desc *
118__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus); 126__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
127 unsigned int check);
119void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); 128void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
120 129
121static inline struct irq_desc * 130static inline struct irq_desc *
122irq_get_desc_buslock(unsigned int irq, unsigned long *flags) 131irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
123{ 132{
124 return __irq_get_desc_lock(irq, flags, true); 133 return __irq_get_desc_lock(irq, flags, true, check);
125} 134}
126 135
127static inline void 136static inline void
@@ -131,9 +140,9 @@ irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
131} 140}
132 141
133static inline struct irq_desc * 142static inline struct irq_desc *
134irq_get_desc_lock(unsigned int irq, unsigned long *flags) 143irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
135{ 144{
136 return __irq_get_desc_lock(irq, flags, false); 145 return __irq_get_desc_lock(irq, flags, false, check);
137} 146}
138 147
139static inline void 148static inline void
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 039b889ea053..1550e8447a16 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -424,11 +424,22 @@ unsigned int irq_get_next_irq(unsigned int offset)
424} 424}
425 425
426struct irq_desc * 426struct irq_desc *
427__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) 427__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
428 unsigned int check)
428{ 429{
429 struct irq_desc *desc = irq_to_desc(irq); 430 struct irq_desc *desc = irq_to_desc(irq);
430 431
431 if (desc) { 432 if (desc) {
433 if (check & _IRQ_DESC_CHECK) {
434 if ((check & _IRQ_DESC_PERCPU) &&
435 !irq_settings_is_per_cpu_devid(desc))
436 return NULL;
437
438 if (!(check & _IRQ_DESC_PERCPU) &&
439 irq_settings_is_per_cpu_devid(desc))
440 return NULL;
441 }
442
432 if (bus) 443 if (bus)
433 chip_bus_lock(desc); 444 chip_bus_lock(desc);
434 raw_spin_lock_irqsave(&desc->lock, *flags); 445 raw_spin_lock_irqsave(&desc->lock, *flags);
@@ -443,6 +454,25 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
443 chip_bus_sync_unlock(desc); 454 chip_bus_sync_unlock(desc);
444} 455}
445 456
457int irq_set_percpu_devid(unsigned int irq)
458{
459 struct irq_desc *desc = irq_to_desc(irq);
460
461 if (!desc)
462 return -EINVAL;
463
464 if (desc->percpu_enabled)
465 return -EINVAL;
466
467 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
468
469 if (!desc->percpu_enabled)
470 return -ENOMEM;
471
472 irq_set_percpu_devid_flags(irq);
473 return 0;
474}
475
446/** 476/**
447 * dynamic_irq_cleanup - cleanup a dynamically allocated irq 477 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
448 * @irq: irq number to initialize 478 * @irq: irq number to initialize
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9b956fa20308..67ce837ae52c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -195,7 +195,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
195int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 195int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
196{ 196{
197 unsigned long flags; 197 unsigned long flags;
198 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 198 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
199 199
200 if (!desc) 200 if (!desc)
201 return -EINVAL; 201 return -EINVAL;
@@ -356,7 +356,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
356static int __disable_irq_nosync(unsigned int irq) 356static int __disable_irq_nosync(unsigned int irq)
357{ 357{
358 unsigned long flags; 358 unsigned long flags;
359 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 359 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
360 360
361 if (!desc) 361 if (!desc)
362 return -EINVAL; 362 return -EINVAL;
@@ -448,7 +448,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
448void enable_irq(unsigned int irq) 448void enable_irq(unsigned int irq)
449{ 449{
450 unsigned long flags; 450 unsigned long flags;
451 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 451 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
452 452
453 if (!desc) 453 if (!desc)
454 return; 454 return;
@@ -467,6 +467,9 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
467 struct irq_desc *desc = irq_to_desc(irq); 467 struct irq_desc *desc = irq_to_desc(irq);
468 int ret = -ENXIO; 468 int ret = -ENXIO;
469 469
470 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
471 return 0;
472
470 if (desc->irq_data.chip->irq_set_wake) 473 if (desc->irq_data.chip->irq_set_wake)
471 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 474 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
472 475
@@ -488,7 +491,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
488int irq_set_irq_wake(unsigned int irq, unsigned int on) 491int irq_set_irq_wake(unsigned int irq, unsigned int on)
489{ 492{
490 unsigned long flags; 493 unsigned long flags;
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 494 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
492 int ret = 0; 495 int ret = 0;
493 496
494 if (!desc) 497 if (!desc)
@@ -529,7 +532,7 @@ EXPORT_SYMBOL(irq_set_irq_wake);
529int can_request_irq(unsigned int irq, unsigned long irqflags) 532int can_request_irq(unsigned int irq, unsigned long irqflags)
530{ 533{
531 unsigned long flags; 534 unsigned long flags;
532 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 535 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
533 int canrequest = 0; 536 int canrequest = 0;
534 537
535 if (!desc) 538 if (!desc)
@@ -1118,6 +1121,8 @@ int setup_irq(unsigned int irq, struct irqaction *act)
1118 int retval; 1121 int retval;
1119 struct irq_desc *desc = irq_to_desc(irq); 1122 struct irq_desc *desc = irq_to_desc(irq);
1120 1123
1124 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1125 return -EINVAL;
1121 chip_bus_lock(desc); 1126 chip_bus_lock(desc);
1122 retval = __setup_irq(irq, desc, act); 1127 retval = __setup_irq(irq, desc, act);
1123 chip_bus_sync_unlock(desc); 1128 chip_bus_sync_unlock(desc);
@@ -1126,7 +1131,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
1126} 1131}
1127EXPORT_SYMBOL_GPL(setup_irq); 1132EXPORT_SYMBOL_GPL(setup_irq);
1128 1133
1129 /* 1134/*
1130 * Internal function to unregister an irqaction - used to free 1135 * Internal function to unregister an irqaction - used to free
1131 * regular and special interrupts that are part of the architecture. 1136 * regular and special interrupts that are part of the architecture.
1132 */ 1137 */
@@ -1224,7 +1229,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1224 */ 1229 */
1225void remove_irq(unsigned int irq, struct irqaction *act) 1230void remove_irq(unsigned int irq, struct irqaction *act)
1226{ 1231{
1227 __free_irq(irq, act->dev_id); 1232 struct irq_desc *desc = irq_to_desc(irq);
1233
1234 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1235 __free_irq(irq, act->dev_id);
1228} 1236}
1229EXPORT_SYMBOL_GPL(remove_irq); 1237EXPORT_SYMBOL_GPL(remove_irq);
1230 1238
@@ -1246,7 +1254,7 @@ void free_irq(unsigned int irq, void *dev_id)
1246{ 1254{
1247 struct irq_desc *desc = irq_to_desc(irq); 1255 struct irq_desc *desc = irq_to_desc(irq);
1248 1256
1249 if (!desc) 1257 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1250 return; 1258 return;
1251 1259
1252#ifdef CONFIG_SMP 1260#ifdef CONFIG_SMP
@@ -1324,7 +1332,8 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1324 if (!desc) 1332 if (!desc)
1325 return -EINVAL; 1333 return -EINVAL;
1326 1334
1327 if (!irq_settings_can_request(desc)) 1335 if (!irq_settings_can_request(desc) ||
1336 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1328 return -EINVAL; 1337 return -EINVAL;
1329 1338
1330 if (!handler) { 1339 if (!handler) {
@@ -1409,3 +1418,194 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1409 return !ret ? IRQC_IS_HARDIRQ : ret; 1418 return !ret ? IRQC_IS_HARDIRQ : ret;
1410} 1419}
1411EXPORT_SYMBOL_GPL(request_any_context_irq); 1420EXPORT_SYMBOL_GPL(request_any_context_irq);
1421
1422void enable_percpu_irq(unsigned int irq, unsigned int type)
1423{
1424 unsigned int cpu = smp_processor_id();
1425 unsigned long flags;
1426 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1427
1428 if (!desc)
1429 return;
1430
1431 type &= IRQ_TYPE_SENSE_MASK;
1432 if (type != IRQ_TYPE_NONE) {
1433 int ret;
1434
1435 ret = __irq_set_trigger(desc, irq, type);
1436
1437 if (ret) {
1438 WARN(1, "failed to set type for IRQ%d\n", irq);
1439 goto out;
1440 }
1441 }
1442
1443 irq_percpu_enable(desc, cpu);
1444out:
1445 irq_put_desc_unlock(desc, flags);
1446}
1447
1448void disable_percpu_irq(unsigned int irq)
1449{
1450 unsigned int cpu = smp_processor_id();
1451 unsigned long flags;
1452 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1453
1454 if (!desc)
1455 return;
1456
1457 irq_percpu_disable(desc, cpu);
1458 irq_put_desc_unlock(desc, flags);
1459}
1460
1461/*
1462 * Internal function to unregister a percpu irqaction.
1463 */
1464static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1465{
1466 struct irq_desc *desc = irq_to_desc(irq);
1467 struct irqaction *action;
1468 unsigned long flags;
1469
1470 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1471
1472 if (!desc)
1473 return NULL;
1474
1475 raw_spin_lock_irqsave(&desc->lock, flags);
1476
1477 action = desc->action;
1478 if (!action || action->percpu_dev_id != dev_id) {
1479 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1480 goto bad;
1481 }
1482
1483 if (!cpumask_empty(desc->percpu_enabled)) {
1484 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1485 irq, cpumask_first(desc->percpu_enabled));
1486 goto bad;
1487 }
1488
1489 /* Found it - now remove it from the list of entries: */
1490 desc->action = NULL;
1491
1492 raw_spin_unlock_irqrestore(&desc->lock, flags);
1493
1494 unregister_handler_proc(irq, action);
1495
1496 module_put(desc->owner);
1497 return action;
1498
1499bad:
1500 raw_spin_unlock_irqrestore(&desc->lock, flags);
1501 return NULL;
1502}
1503
1504/**
1505 * remove_percpu_irq - free a per-cpu interrupt
1506 * @irq: Interrupt line to free
1507 * @act: irqaction for the interrupt
1508 *
1509 * Used to remove interrupts statically setup by the early boot process.
1510 */
1511void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1512{
1513 struct irq_desc *desc = irq_to_desc(irq);
1514
1515 if (desc && irq_settings_is_per_cpu_devid(desc))
1516 __free_percpu_irq(irq, act->percpu_dev_id);
1517}
1518
1519/**
1520 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1521 * @irq: Interrupt line to free
1522 * @dev_id: Device identity to free
1523 *
1524 * Remove a percpu interrupt handler. The handler is removed, but
1525 * the interrupt line is not disabled. This must be done on each
1526 * CPU before calling this function. The function does not return
1527 * until any executing interrupts for this IRQ have completed.
1528 *
1529 * This function must not be called from interrupt context.
1530 */
1531void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1532{
1533 struct irq_desc *desc = irq_to_desc(irq);
1534
1535 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1536 return;
1537
1538 chip_bus_lock(desc);
1539 kfree(__free_percpu_irq(irq, dev_id));
1540 chip_bus_sync_unlock(desc);
1541}
1542
1543/**
1544 * setup_percpu_irq - setup a per-cpu interrupt
1545 * @irq: Interrupt line to setup
1546 * @act: irqaction for the interrupt
1547 *
1548 * Used to statically setup per-cpu interrupts in the early boot process.
1549 */
1550int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1551{
1552 struct irq_desc *desc = irq_to_desc(irq);
1553 int retval;
1554
1555 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1556 return -EINVAL;
1557 chip_bus_lock(desc);
1558 retval = __setup_irq(irq, desc, act);
1559 chip_bus_sync_unlock(desc);
1560
1561 return retval;
1562}
1563
1564/**
1565 * request_percpu_irq - allocate a percpu interrupt line
1566 * @irq: Interrupt line to allocate
1567 * @handler: Function to be called when the IRQ occurs.
1568 * @devname: An ascii name for the claiming device
1569 * @dev_id: A percpu cookie passed back to the handler function
1570 *
1571 * This call allocates interrupt resources, but doesn't
1572 * automatically enable the interrupt. It has to be done on each
1573 * CPU using enable_percpu_irq().
1574 *
1575 * Dev_id must be globally unique. It is a per-cpu variable, and
1576 * the handler gets called with the interrupted CPU's instance of
1577 * that variable.
1578 */
1579int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1580 const char *devname, void __percpu *dev_id)
1581{
1582 struct irqaction *action;
1583 struct irq_desc *desc;
1584 int retval;
1585
1586 if (!dev_id)
1587 return -EINVAL;
1588
1589 desc = irq_to_desc(irq);
1590 if (!desc || !irq_settings_can_request(desc) ||
1591 !irq_settings_is_per_cpu_devid(desc))
1592 return -EINVAL;
1593
1594 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1595 if (!action)
1596 return -ENOMEM;
1597
1598 action->handler = handler;
1599 action->flags = IRQF_PERCPU;
1600 action->name = devname;
1601 action->percpu_dev_id = dev_id;
1602
1603 chip_bus_lock(desc);
1604 retval = __setup_irq(irq, desc, action);
1605 chip_bus_sync_unlock(desc);
1606
1607 if (retval)
1608 kfree(action);
1609
1610 return retval;
1611}
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index f1667833d444..1162f1030f18 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -13,6 +13,7 @@ enum {
13 _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, 13 _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING, 14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
16 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 17 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
17}; 18};
18 19
@@ -24,6 +25,7 @@ enum {
24#define IRQ_NOTHREAD GOT_YOU_MORON 25#define IRQ_NOTHREAD GOT_YOU_MORON
25#define IRQ_NOAUTOEN GOT_YOU_MORON 26#define IRQ_NOAUTOEN GOT_YOU_MORON
26#define IRQ_NESTED_THREAD GOT_YOU_MORON 27#define IRQ_NESTED_THREAD GOT_YOU_MORON
28#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
27#undef IRQF_MODIFY_MASK 29#undef IRQF_MODIFY_MASK
28#define IRQF_MODIFY_MASK GOT_YOU_MORON 30#define IRQF_MODIFY_MASK GOT_YOU_MORON
29 31
@@ -39,6 +41,11 @@ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
39 return desc->status_use_accessors & _IRQ_PER_CPU; 41 return desc->status_use_accessors & _IRQ_PER_CPU;
40} 42}
41 43
44static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
45{
46 return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
47}
48
42static inline void irq_settings_set_per_cpu(struct irq_desc *desc) 49static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
43{ 50{
44 desc->status_use_accessors |= _IRQ_PER_CPU; 51 desc->status_use_accessors |= _IRQ_PER_CPU;