aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-realview
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 17:31:35 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 17:32:03 -0500
commit4ec3eb13634529c0bc7466658d84d0bbe3244aea (patch)
treeb491daac2ccfc7b8ca88e171a43f66888463568a /arch/arm/mach-realview
parent24056f525051a9e186af28904b396320e18bf9a0 (diff)
parent15095bb0fe779c0403091bda7adce5fb3bb9ca35 (diff)
Merge branch 'smp' into misc
Conflicts: arch/arm/kernel/entry-armv.S arch/arm/mm/ioremap.c
Diffstat (limited to 'arch/arm/mach-realview')
-rw-r--r--arch/arm/mach-realview/hotplug.c44
-rw-r--r--arch/arm/mach-realview/include/mach/smp.h5
-rw-r--r--arch/arm/mach-realview/platsmp.c114
3 files changed, 53 insertions, 110 deletions
diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c
index f95521a5e5ce..a87523d095e6 100644
--- a/arch/arm/mach-realview/hotplug.c
+++ b/arch/arm/mach-realview/hotplug.c
@@ -11,14 +11,11 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/completion.h>
15 14
16#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
17 16
18extern volatile int pen_release; 17extern volatile int pen_release;
19 18
20static DECLARE_COMPLETION(cpu_killed);
21
22static inline void cpu_enter_lowpower(void) 19static inline void cpu_enter_lowpower(void)
23{ 20{
24 unsigned int v; 21 unsigned int v;
@@ -34,10 +31,10 @@ static inline void cpu_enter_lowpower(void)
34 " bic %0, %0, #0x20\n" 31 " bic %0, %0, #0x20\n"
35 " mcr p15, 0, %0, c1, c0, 1\n" 32 " mcr p15, 0, %0, c1, c0, 1\n"
36 " mrc p15, 0, %0, c1, c0, 0\n" 33 " mrc p15, 0, %0, c1, c0, 0\n"
37 " bic %0, %0, #0x04\n" 34 " bic %0, %0, %2\n"
38 " mcr p15, 0, %0, c1, c0, 0\n" 35 " mcr p15, 0, %0, c1, c0, 0\n"
39 : "=&r" (v) 36 : "=&r" (v)
40 : "r" (0) 37 : "r" (0), "Ir" (CR_C)
41 : "cc"); 38 : "cc");
42} 39}
43 40
@@ -46,17 +43,17 @@ static inline void cpu_leave_lowpower(void)
46 unsigned int v; 43 unsigned int v;
47 44
48 asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" 45 asm volatile( "mrc p15, 0, %0, c1, c0, 0\n"
49 " orr %0, %0, #0x04\n" 46 " orr %0, %0, %1\n"
50 " mcr p15, 0, %0, c1, c0, 0\n" 47 " mcr p15, 0, %0, c1, c0, 0\n"
51 " mrc p15, 0, %0, c1, c0, 1\n" 48 " mrc p15, 0, %0, c1, c0, 1\n"
52 " orr %0, %0, #0x20\n" 49 " orr %0, %0, #0x20\n"
53 " mcr p15, 0, %0, c1, c0, 1\n" 50 " mcr p15, 0, %0, c1, c0, 1\n"
54 : "=&r" (v) 51 : "=&r" (v)
55 : 52 : "Ir" (CR_C)
56 : "cc"); 53 : "cc");
57} 54}
58 55
59static inline void platform_do_lowpower(unsigned int cpu) 56static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
60{ 57{
61 /* 58 /*
62 * there is no power-control hardware on this platform, so all 59 * there is no power-control hardware on this platform, so all
@@ -80,22 +77,19 @@ static inline void platform_do_lowpower(unsigned int cpu)
80 } 77 }
81 78
82 /* 79 /*
83 * getting here, means that we have come out of WFI without 80 * Getting here, means that we have come out of WFI without
84 * having been woken up - this shouldn't happen 81 * having been woken up - this shouldn't happen
85 * 82 *
86 * The trouble is, letting people know about this is not really 83 * Just note it happening - when we're woken, we can report
87 * possible, since we are currently running incoherently, and 84 * its occurrence.
88 * therefore cannot safely call printk() or anything else
89 */ 85 */
90#ifdef DEBUG 86 (*spurious)++;
91 printk("CPU%u: spurious wakeup call\n", cpu);
92#endif
93 } 87 }
94} 88}
95 89
96int platform_cpu_kill(unsigned int cpu) 90int platform_cpu_kill(unsigned int cpu)
97{ 91{
98 return wait_for_completion_timeout(&cpu_killed, 5000); 92 return 1;
99} 93}
100 94
101/* 95/*
@@ -105,30 +99,22 @@ int platform_cpu_kill(unsigned int cpu)
105 */ 99 */
106void platform_cpu_die(unsigned int cpu) 100void platform_cpu_die(unsigned int cpu)
107{ 101{
108#ifdef DEBUG 102 int spurious = 0;
109 unsigned int this_cpu = hard_smp_processor_id();
110
111 if (cpu != this_cpu) {
112 printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
113 this_cpu, cpu);
114 BUG();
115 }
116#endif
117
118 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
119 complete(&cpu_killed);
120 103
121 /* 104 /*
122 * we're ready for shutdown now, so do it 105 * we're ready for shutdown now, so do it
123 */ 106 */
124 cpu_enter_lowpower(); 107 cpu_enter_lowpower();
125 platform_do_lowpower(cpu); 108 platform_do_lowpower(cpu, &spurious);
126 109
127 /* 110 /*
128 * bring this CPU back into the world of cache 111 * bring this CPU back into the world of cache
129 * coherency, and then restore interrupts 112 * coherency, and then restore interrupts
130 */ 113 */
131 cpu_leave_lowpower(); 114 cpu_leave_lowpower();
115
116 if (spurious)
117 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
132} 118}
133 119
134int platform_cpu_disable(unsigned int cpu) 120int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-realview/include/mach/smp.h b/arch/arm/mach-realview/include/mach/smp.h
index d3cd265cb058..c8221b38ee7c 100644
--- a/arch/arm/mach-realview/include/mach/smp.h
+++ b/arch/arm/mach-realview/include/mach/smp.h
@@ -2,14 +2,13 @@
2#define ASMARM_ARCH_SMP_H 2#define ASMARM_ARCH_SMP_H
3 3
4#include <asm/hardware/gic.h> 4#include <asm/hardware/gic.h>
5#include <asm/smp_mpidr.h>
6 5
7/* 6/*
8 * We use IRQ1 as the IPI 7 * We use IRQ1 as the IPI
9 */ 8 */
10static inline void smp_cross_call(const struct cpumask *mask) 9static inline void smp_cross_call(const struct cpumask *mask, int ipi)
11{ 10{
12 gic_raise_softirq(mask, 1); 11 gic_raise_softirq(mask, ipi);
13} 12}
14 13
15#endif 14#endif
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 009265818d55..bb8d6c4e4315 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -19,7 +19,6 @@
19#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20#include <mach/hardware.h> 20#include <mach/hardware.h>
21#include <asm/mach-types.h> 21#include <asm/mach-types.h>
22#include <asm/localtimer.h>
23#include <asm/unified.h> 22#include <asm/unified.h>
24 23
25#include <mach/board-eb.h> 24#include <mach/board-eb.h>
@@ -37,6 +36,19 @@ extern void realview_secondary_startup(void);
37 */ 36 */
38volatile int __cpuinitdata pen_release = -1; 37volatile int __cpuinitdata pen_release = -1;
39 38
39/*
40 * Write pen_release in a way that is guaranteed to be visible to all
41 * observers, irrespective of whether they're taking part in coherency
42 * or not. This is necessary for the hotplug code to work reliably.
43 */
44static void write_pen_release(int val)
45{
46 pen_release = val;
47 smp_wmb();
48 __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
49 outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
50}
51
40static void __iomem *scu_base_addr(void) 52static void __iomem *scu_base_addr(void)
41{ 53{
42 if (machine_is_realview_eb_mp()) 54 if (machine_is_realview_eb_mp())
@@ -50,20 +62,10 @@ static void __iomem *scu_base_addr(void)
50 return (void __iomem *)0; 62 return (void __iomem *)0;
51} 63}
52 64
53static inline unsigned int get_core_count(void)
54{
55 void __iomem *scu_base = scu_base_addr();
56 if (scu_base)
57 return scu_get_core_count(scu_base);
58 return 1;
59}
60
61static DEFINE_SPINLOCK(boot_lock); 65static DEFINE_SPINLOCK(boot_lock);
62 66
63void __cpuinit platform_secondary_init(unsigned int cpu) 67void __cpuinit platform_secondary_init(unsigned int cpu)
64{ 68{
65 trace_hardirqs_off();
66
67 /* 69 /*
68 * if any interrupts are already enabled for the primary 70 * if any interrupts are already enabled for the primary
69 * core (e.g. timer irq), then they will not have been enabled 71 * core (e.g. timer irq), then they will not have been enabled
@@ -75,8 +77,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
75 * let the primary processor know we're out of the 77 * let the primary processor know we're out of the
76 * pen, then head off into the C entry point 78 * pen, then head off into the C entry point
77 */ 79 */
78 pen_release = -1; 80 write_pen_release(-1);
79 smp_wmb();
80 81
81 /* 82 /*
82 * Synchronise with the boot thread. 83 * Synchronise with the boot thread.
@@ -103,20 +104,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
103 * Note that "pen_release" is the hardware CPU ID, whereas 104 * Note that "pen_release" is the hardware CPU ID, whereas
104 * "cpu" is Linux's internal ID. 105 * "cpu" is Linux's internal ID.
105 */ 106 */
106 pen_release = cpu; 107 write_pen_release(cpu);
107 flush_cache_all();
108 108
109 /* 109 /*
110 * XXX 110 * Send the secondary CPU a soft interrupt, thereby causing
111 * 111 * the boot monitor to read the system wide flags register,
112 * This is a later addition to the booting protocol: the 112 * and branch to the address found there.
113 * bootMonitor now puts secondary cores into WFI, so
114 * poke_milo() no longer gets the cores moving; we need
115 * to send a soft interrupt to wake the secondary core.
116 * Use smp_cross_call() for this, since there's little
117 * point duplicating the code here
118 */ 113 */
119 smp_cross_call(cpumask_of(cpu)); 114 smp_cross_call(cpumask_of(cpu), 1);
120 115
121 timeout = jiffies + (1 * HZ); 116 timeout = jiffies + (1 * HZ);
122 while (time_before(jiffies, timeout)) { 117 while (time_before(jiffies, timeout)) {
@@ -136,48 +131,18 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
136 return pen_release != -1 ? -ENOSYS : 0; 131 return pen_release != -1 ? -ENOSYS : 0;
137} 132}
138 133
139static void __init poke_milo(void)
140{
141 /* nobody is to be released from the pen yet */
142 pen_release = -1;
143
144 /*
145 * Write the address of secondary startup into the system-wide flags
146 * register. The BootMonitor waits for this register to become
147 * non-zero.
148 */
149 __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
150 __io_address(REALVIEW_SYS_FLAGSSET));
151
152 mb();
153}
154
155/* 134/*
156 * Initialise the CPU possible map early - this describes the CPUs 135 * Initialise the CPU possible map early - this describes the CPUs
157 * which may be present or become present in the system. 136 * which may be present or become present in the system.
158 */ 137 */
159void __init smp_init_cpus(void) 138void __init smp_init_cpus(void)
160{ 139{
161 unsigned int i, ncores = get_core_count(); 140 void __iomem *scu_base = scu_base_addr();
141 unsigned int i, ncores;
162 142
163 for (i = 0; i < ncores; i++) 143 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
164 set_cpu_possible(i, true);
165}
166
167void __init smp_prepare_cpus(unsigned int max_cpus)
168{
169 unsigned int ncores = get_core_count();
170 unsigned int cpu = smp_processor_id();
171 int i;
172 144
173 /* sanity check */ 145 /* sanity check */
174 if (ncores == 0) {
175 printk(KERN_ERR
176 "Realview: strange CM count of 0? Default to 1\n");
177
178 ncores = 1;
179 }
180
181 if (ncores > NR_CPUS) { 146 if (ncores > NR_CPUS) {
182 printk(KERN_WARNING 147 printk(KERN_WARNING
183 "Realview: no. of cores (%d) greater than configured " 148 "Realview: no. of cores (%d) greater than configured "
@@ -186,13 +151,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
186 ncores = NR_CPUS; 151 ncores = NR_CPUS;
187 } 152 }
188 153
189 smp_store_cpu_info(cpu); 154 for (i = 0; i < ncores; i++)
155 set_cpu_possible(i, true);
156}
190 157
191 /* 158void __init platform_smp_prepare_cpus(unsigned int max_cpus)
192 * are we trying to boot more cores than exist? 159{
193 */ 160 int i;
194 if (max_cpus > ncores)
195 max_cpus = ncores;
196 161
197 /* 162 /*
198 * Initialise the present map, which describes the set of CPUs 163 * Initialise the present map, which describes the set of CPUs
@@ -201,21 +166,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
201 for (i = 0; i < max_cpus; i++) 166 for (i = 0; i < max_cpus; i++)
202 set_cpu_present(i, true); 167 set_cpu_present(i, true);
203 168
169 scu_enable(scu_base_addr());
170
204 /* 171 /*
205 * Initialise the SCU if there are more than one CPU and let 172 * Write the address of secondary startup into the
206 * them know where to start. Note that, on modern versions of 173 * system-wide flags register. The BootMonitor waits
207 * MILO, the "poke" doesn't actually do anything until each 174 * until it receives a soft interrupt, and then the
208 * individual core is sent a soft interrupt to get it out of 175 * secondary CPU branches to this address.
209 * WFI
210 */ 176 */
211 if (max_cpus > 1) { 177 __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
212 /* 178 __io_address(REALVIEW_SYS_FLAGSSET));
213 * Enable the local timer or broadcast device for the
214 * boot CPU, but only if we have more than one CPU.
215 */
216 percpu_timer_setup();
217
218 scu_enable(scu_base_addr());
219 poke_milo();
220 }
221} 179}