diff options
Diffstat (limited to 'arch/arm/mach-vexpress')
-rw-r--r-- | arch/arm/mach-vexpress/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/ct-ca9x4.c | 2 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/hotplug.c | 128 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/include/mach/smp.h | 5 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/platsmp.c | 74 |
5 files changed, 159 insertions, 51 deletions
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile index 1b71b77ade22..2c0ac7de2814 100644 --- a/arch/arm/mach-vexpress/Makefile +++ b/arch/arm/mach-vexpress/Makefile | |||
@@ -5,4 +5,5 @@ | |||
5 | obj-y := v2m.o | 5 | obj-y := v2m.o |
6 | obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o | 6 | obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o |
7 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o | 7 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o |
8 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o | ||
8 | obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o | 9 | obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 26a02eb57571..f4455e3ed6a4 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -53,7 +53,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = { | |||
53 | 53 | ||
54 | static void __init ct_ca9x4_map_io(void) | 54 | static void __init ct_ca9x4_map_io(void) |
55 | { | 55 | { |
56 | #ifdef CONFIG_LOCAL_TIMERS | ||
56 | twd_base = MMIO_P2V(A9_MPCORE_TWD); | 57 | twd_base = MMIO_P2V(A9_MPCORE_TWD); |
58 | #endif | ||
57 | v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); | 59 | v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); |
58 | } | 60 | } |
59 | 61 | ||
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c new file mode 100644 index 000000000000..ea4cbfb90a66 --- /dev/null +++ b/arch/arm/mach-vexpress/hotplug.c | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-realview/hotplug.c | ||
3 | * | ||
4 | * Copyright (C) 2002 ARM Ltd. | ||
5 | * All Rights Reserved | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/smp.h> | ||
14 | |||
15 | #include <asm/cacheflush.h> | ||
16 | |||
17 | extern volatile int pen_release; | ||
18 | |||
19 | static inline void cpu_enter_lowpower(void) | ||
20 | { | ||
21 | unsigned int v; | ||
22 | |||
23 | flush_cache_all(); | ||
24 | asm volatile( | ||
25 | "mcr p15, 0, %1, c7, c5, 0\n" | ||
26 | " mcr p15, 0, %1, c7, c10, 4\n" | ||
27 | /* | ||
28 | * Turn off coherency | ||
29 | */ | ||
30 | " mrc p15, 0, %0, c1, c0, 1\n" | ||
31 | " bic %0, %0, %3\n" | ||
32 | " mcr p15, 0, %0, c1, c0, 1\n" | ||
33 | " mrc p15, 0, %0, c1, c0, 0\n" | ||
34 | " bic %0, %0, %2\n" | ||
35 | " mcr p15, 0, %0, c1, c0, 0\n" | ||
36 | : "=&r" (v) | ||
37 | : "r" (0), "Ir" (CR_C), "Ir" (0x40) | ||
38 | : "cc"); | ||
39 | } | ||
40 | |||
41 | static inline void cpu_leave_lowpower(void) | ||
42 | { | ||
43 | unsigned int v; | ||
44 | |||
45 | asm volatile( | ||
46 | "mrc p15, 0, %0, c1, c0, 0\n" | ||
47 | " orr %0, %0, %1\n" | ||
48 | " mcr p15, 0, %0, c1, c0, 0\n" | ||
49 | " mrc p15, 0, %0, c1, c0, 1\n" | ||
50 | " orr %0, %0, %2\n" | ||
51 | " mcr p15, 0, %0, c1, c0, 1\n" | ||
52 | : "=&r" (v) | ||
53 | : "Ir" (CR_C), "Ir" (0x40) | ||
54 | : "cc"); | ||
55 | } | ||
56 | |||
57 | static inline void platform_do_lowpower(unsigned int cpu, int *spurious) | ||
58 | { | ||
59 | /* | ||
60 | * there is no power-control hardware on this platform, so all | ||
61 | * we can do is put the core into WFI; this is safe as the calling | ||
62 | * code will have already disabled interrupts | ||
63 | */ | ||
64 | for (;;) { | ||
65 | /* | ||
66 | * here's the WFI | ||
67 | */ | ||
68 | asm(".word 0xe320f003\n" | ||
69 | : | ||
70 | : | ||
71 | : "memory", "cc"); | ||
72 | |||
73 | if (pen_release == cpu) { | ||
74 | /* | ||
75 | * OK, proper wakeup, we're done | ||
76 | */ | ||
77 | break; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Getting here, means that we have come out of WFI without | ||
82 | * having been woken up - this shouldn't happen | ||
83 | * | ||
84 | * Just note it happening - when we're woken, we can report | ||
85 | * its occurrence. | ||
86 | */ | ||
87 | (*spurious)++; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | int platform_cpu_kill(unsigned int cpu) | ||
92 | { | ||
93 | return 1; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * platform-specific code to shutdown a CPU | ||
98 | * | ||
99 | * Called with IRQs disabled | ||
100 | */ | ||
101 | void platform_cpu_die(unsigned int cpu) | ||
102 | { | ||
103 | int spurious = 0; | ||
104 | |||
105 | /* | ||
106 | * we're ready for shutdown now, so do it | ||
107 | */ | ||
108 | cpu_enter_lowpower(); | ||
109 | platform_do_lowpower(cpu, &spurious); | ||
110 | |||
111 | /* | ||
112 | * bring this CPU back into the world of cache | ||
113 | * coherency, and then restore interrupts | ||
114 | */ | ||
115 | cpu_leave_lowpower(); | ||
116 | |||
117 | if (spurious) | ||
118 | pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); | ||
119 | } | ||
120 | |||
121 | int platform_cpu_disable(unsigned int cpu) | ||
122 | { | ||
123 | /* | ||
124 | * we don't allow CPU 0 to be shutdown (it is still too special | ||
125 | * e.g. clock tick interrupts) | ||
126 | */ | ||
127 | return cpu == 0 ? -EPERM : 0; | ||
128 | } | ||
diff --git a/arch/arm/mach-vexpress/include/mach/smp.h b/arch/arm/mach-vexpress/include/mach/smp.h index 5a6da4fd247e..4c05e4a9713a 100644 --- a/arch/arm/mach-vexpress/include/mach/smp.h +++ b/arch/arm/mach-vexpress/include/mach/smp.h | |||
@@ -2,13 +2,12 @@ | |||
2 | #define __MACH_SMP_H | 2 | #define __MACH_SMP_H |
3 | 3 | ||
4 | #include <asm/hardware/gic.h> | 4 | #include <asm/hardware/gic.h> |
5 | #include <asm/smp_mpidr.h> | ||
6 | 5 | ||
7 | /* | 6 | /* |
8 | * We use IRQ1 as the IPI | 7 | * We use IRQ1 as the IPI |
9 | */ | 8 | */ |
10 | static inline void smp_cross_call(const struct cpumask *mask) | 9 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) |
11 | { | 10 | { |
12 | gic_raise_softirq(mask, 1); | 11 | gic_raise_softirq(mask, ipi); |
13 | } | 12 | } |
14 | #endif | 13 | #endif |
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c index 670970699ba9..8ce9fef29555 100644 --- a/arch/arm/mach-vexpress/platsmp.c +++ b/arch/arm/mach-vexpress/platsmp.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | 18 | ||
19 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
20 | #include <asm/localtimer.h> | ||
21 | #include <asm/smp_scu.h> | 20 | #include <asm/smp_scu.h> |
22 | #include <asm/unified.h> | 21 | #include <asm/unified.h> |
23 | 22 | ||
@@ -35,6 +34,19 @@ extern void vexpress_secondary_startup(void); | |||
35 | */ | 34 | */ |
36 | volatile int __cpuinitdata pen_release = -1; | 35 | volatile int __cpuinitdata pen_release = -1; |
37 | 36 | ||
37 | /* | ||
38 | * Write pen_release in a way that is guaranteed to be visible to all | ||
39 | * observers, irrespective of whether they're taking part in coherency | ||
40 | * or not. This is necessary for the hotplug code to work reliably. | ||
41 | */ | ||
42 | static void write_pen_release(int val) | ||
43 | { | ||
44 | pen_release = val; | ||
45 | smp_wmb(); | ||
46 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); | ||
47 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); | ||
48 | } | ||
49 | |||
38 | static void __iomem *scu_base_addr(void) | 50 | static void __iomem *scu_base_addr(void) |
39 | { | 51 | { |
40 | return MMIO_P2V(A9_MPCORE_SCU); | 52 | return MMIO_P2V(A9_MPCORE_SCU); |
@@ -44,8 +56,6 @@ static DEFINE_SPINLOCK(boot_lock); | |||
44 | 56 | ||
45 | void __cpuinit platform_secondary_init(unsigned int cpu) | 57 | void __cpuinit platform_secondary_init(unsigned int cpu) |
46 | { | 58 | { |
47 | trace_hardirqs_off(); | ||
48 | |||
49 | /* | 59 | /* |
50 | * if any interrupts are already enabled for the primary | 60 | * if any interrupts are already enabled for the primary |
51 | * core (e.g. timer irq), then they will not have been enabled | 61 | * core (e.g. timer irq), then they will not have been enabled |
@@ -57,8 +67,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu) | |||
57 | * let the primary processor know we're out of the | 67 | * let the primary processor know we're out of the |
58 | * pen, then head off into the C entry point | 68 | * pen, then head off into the C entry point |
59 | */ | 69 | */ |
60 | pen_release = -1; | 70 | write_pen_release(-1); |
61 | smp_wmb(); | ||
62 | 71 | ||
63 | /* | 72 | /* |
64 | * Synchronise with the boot thread. | 73 | * Synchronise with the boot thread. |
@@ -83,16 +92,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
83 | * since we haven't sent them a soft interrupt, they shouldn't | 92 | * since we haven't sent them a soft interrupt, they shouldn't |
84 | * be there. | 93 | * be there. |
85 | */ | 94 | */ |
86 | pen_release = cpu; | 95 | write_pen_release(cpu); |
87 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); | ||
88 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); | ||
89 | 96 | ||
90 | /* | 97 | /* |
91 | * Send the secondary CPU a soft interrupt, thereby causing | 98 | * Send the secondary CPU a soft interrupt, thereby causing |
92 | * the boot monitor to read the system wide flags register, | 99 | * the boot monitor to read the system wide flags register, |
93 | * and branch to the address found there. | 100 | * and branch to the address found there. |
94 | */ | 101 | */ |
95 | smp_cross_call(cpumask_of(cpu)); | 102 | smp_cross_call(cpumask_of(cpu), 1); |
96 | 103 | ||
97 | timeout = jiffies + (1 * HZ); | 104 | timeout = jiffies + (1 * HZ); |
98 | while (time_before(jiffies, timeout)) { | 105 | while (time_before(jiffies, timeout)) { |
@@ -124,13 +131,6 @@ void __init smp_init_cpus(void) | |||
124 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; | 131 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; |
125 | 132 | ||
126 | /* sanity check */ | 133 | /* sanity check */ |
127 | if (ncores == 0) { | ||
128 | printk(KERN_ERR | ||
129 | "vexpress: strange CM count of 0? Default to 1\n"); | ||
130 | |||
131 | ncores = 1; | ||
132 | } | ||
133 | |||
134 | if (ncores > NR_CPUS) { | 134 | if (ncores > NR_CPUS) { |
135 | printk(KERN_WARNING | 135 | printk(KERN_WARNING |
136 | "vexpress: no. of cores (%d) greater than configured " | 136 | "vexpress: no. of cores (%d) greater than configured " |
@@ -143,20 +143,10 @@ void __init smp_init_cpus(void) | |||
143 | set_cpu_possible(i, true); | 143 | set_cpu_possible(i, true); |
144 | } | 144 | } |
145 | 145 | ||
146 | void __init smp_prepare_cpus(unsigned int max_cpus) | 146 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
147 | { | 147 | { |
148 | unsigned int ncores = num_possible_cpus(); | ||
149 | unsigned int cpu = smp_processor_id(); | ||
150 | int i; | 148 | int i; |
151 | 149 | ||
152 | smp_store_cpu_info(cpu); | ||
153 | |||
154 | /* | ||
155 | * are we trying to boot more cores than exist? | ||
156 | */ | ||
157 | if (max_cpus > ncores) | ||
158 | max_cpus = ncores; | ||
159 | |||
160 | /* | 150 | /* |
161 | * Initialise the present map, which describes the set of CPUs | 151 | * Initialise the present map, which describes the set of CPUs |
162 | * actually populated at the present time. | 152 | * actually populated at the present time. |
@@ -164,27 +154,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
164 | for (i = 0; i < max_cpus; i++) | 154 | for (i = 0; i < max_cpus; i++) |
165 | set_cpu_present(i, true); | 155 | set_cpu_present(i, true); |
166 | 156 | ||
157 | scu_enable(scu_base_addr()); | ||
158 | |||
167 | /* | 159 | /* |
168 | * Initialise the SCU if there are more than one CPU and let | 160 | * Write the address of secondary startup into the |
169 | * them know where to start. | 161 | * system-wide flags register. The boot monitor waits |
162 | * until it receives a soft interrupt, and then the | ||
163 | * secondary CPU branches to this address. | ||
170 | */ | 164 | */ |
171 | if (max_cpus > 1) { | 165 | writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR)); |
172 | /* | 166 | writel(BSYM(virt_to_phys(vexpress_secondary_startup)), |
173 | * Enable the local timer or broadcast device for the | 167 | MMIO_P2V(V2M_SYS_FLAGSSET)); |
174 | * boot CPU, but only if we have more than one CPU. | ||
175 | */ | ||
176 | percpu_timer_setup(); | ||
177 | |||
178 | scu_enable(scu_base_addr()); | ||
179 | |||
180 | /* | ||
181 | * Write the address of secondary startup into the | ||
182 | * system-wide flags register. The boot monitor waits | ||
183 | * until it receives a soft interrupt, and then the | ||
184 | * secondary CPU branches to this address. | ||
185 | */ | ||
186 | writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR)); | ||
187 | writel(BSYM(virt_to_phys(vexpress_secondary_startup)), | ||
188 | MMIO_P2V(V2M_SYS_FLAGSSET)); | ||
189 | } | ||
190 | } | 168 | } |