diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:31:35 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:32:03 -0500 |
commit | 4ec3eb13634529c0bc7466658d84d0bbe3244aea (patch) | |
tree | b491daac2ccfc7b8ca88e171a43f66888463568a /arch/arm/mach-tegra | |
parent | 24056f525051a9e186af28904b396320e18bf9a0 (diff) | |
parent | 15095bb0fe779c0403091bda7adce5fb3bb9ca35 (diff) |
Merge branch 'smp' into misc
Conflicts:
arch/arm/kernel/entry-armv.S
arch/arm/mm/ioremap.c
Diffstat (limited to 'arch/arm/mach-tegra')
-rw-r--r-- | arch/arm/mach-tegra/hotplug.c | 44 | ||||
-rw-r--r-- | arch/arm/mach-tegra/include/mach/smp.h | 12 | ||||
-rw-r--r-- | arch/arm/mach-tegra/platsmp.c | 33 |
3 files changed, 25 insertions, 64 deletions
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c index 8e7f115aa21e..a5cb1ce76ff2 100644 --- a/arch/arm/mach-tegra/hotplug.c +++ b/arch/arm/mach-tegra/hotplug.c | |||
@@ -11,12 +11,9 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/completion.h> | ||
15 | 14 | ||
16 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
17 | 16 | ||
18 | static DECLARE_COMPLETION(cpu_killed); | ||
19 | |||
20 | static inline void cpu_enter_lowpower(void) | 17 | static inline void cpu_enter_lowpower(void) |
21 | { | 18 | { |
22 | unsigned int v; | 19 | unsigned int v; |
@@ -29,13 +26,13 @@ static inline void cpu_enter_lowpower(void) | |||
29 | * Turn off coherency | 26 | * Turn off coherency |
30 | */ | 27 | */ |
31 | " mrc p15, 0, %0, c1, c0, 1\n" | 28 | " mrc p15, 0, %0, c1, c0, 1\n" |
32 | " bic %0, %0, #0x20\n" | 29 | " bic %0, %0, %2\n" |
33 | " mcr p15, 0, %0, c1, c0, 1\n" | 30 | " mcr p15, 0, %0, c1, c0, 1\n" |
34 | " mrc p15, 0, %0, c1, c0, 0\n" | 31 | " mrc p15, 0, %0, c1, c0, 0\n" |
35 | " bic %0, %0, #0x04\n" | 32 | " bic %0, %0, #0x04\n" |
36 | " mcr p15, 0, %0, c1, c0, 0\n" | 33 | " mcr p15, 0, %0, c1, c0, 0\n" |
37 | : "=&r" (v) | 34 | : "=&r" (v) |
38 | : "r" (0) | 35 | : "r" (0), "Ir" (CR_C) |
39 | : "cc"); | 36 | : "cc"); |
40 | } | 37 | } |
41 | 38 | ||
@@ -45,17 +42,17 @@ static inline void cpu_leave_lowpower(void) | |||
45 | 42 | ||
46 | asm volatile( | 43 | asm volatile( |
47 | "mrc p15, 0, %0, c1, c0, 0\n" | 44 | "mrc p15, 0, %0, c1, c0, 0\n" |
48 | " orr %0, %0, #0x04\n" | 45 | " orr %0, %0, %1\n" |
49 | " mcr p15, 0, %0, c1, c0, 0\n" | 46 | " mcr p15, 0, %0, c1, c0, 0\n" |
50 | " mrc p15, 0, %0, c1, c0, 1\n" | 47 | " mrc p15, 0, %0, c1, c0, 1\n" |
51 | " orr %0, %0, #0x20\n" | 48 | " orr %0, %0, #0x20\n" |
52 | " mcr p15, 0, %0, c1, c0, 1\n" | 49 | " mcr p15, 0, %0, c1, c0, 1\n" |
53 | : "=&r" (v) | 50 | : "=&r" (v) |
54 | : | 51 | : "Ir" (CR_C) |
55 | : "cc"); | 52 | : "cc"); |
56 | } | 53 | } |
57 | 54 | ||
58 | static inline void platform_do_lowpower(unsigned int cpu) | 55 | static inline void platform_do_lowpower(unsigned int cpu, int *spurious) |
59 | { | 56 | { |
60 | /* | 57 | /* |
61 | * there is no power-control hardware on this platform, so all | 58 | * there is no power-control hardware on this platform, so all |
@@ -79,22 +76,19 @@ static inline void platform_do_lowpower(unsigned int cpu) | |||
79 | /*}*/ | 76 | /*}*/ |
80 | 77 | ||
81 | /* | 78 | /* |
82 | * getting here, means that we have come out of WFI without | 79 | * Getting here, means that we have come out of WFI without |
83 | * having been woken up - this shouldn't happen | 80 | * having been woken up - this shouldn't happen |
84 | * | 81 | * |
85 | * The trouble is, letting people know about this is not really | 82 | * Just note it happening - when we're woken, we can report |
86 | * possible, since we are currently running incoherently, and | 83 | * its occurrence. |
87 | * therefore cannot safely call printk() or anything else | ||
88 | */ | 84 | */ |
89 | #ifdef DEBUG | 85 | (*spurious)++; |
90 | printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu); | ||
91 | #endif | ||
92 | } | 86 | } |
93 | } | 87 | } |
94 | 88 | ||
95 | int platform_cpu_kill(unsigned int cpu) | 89 | int platform_cpu_kill(unsigned int cpu) |
96 | { | 90 | { |
97 | return wait_for_completion_timeout(&cpu_killed, 5000); | 91 | return 1; |
98 | } | 92 | } |
99 | 93 | ||
100 | /* | 94 | /* |
@@ -104,30 +98,22 @@ int platform_cpu_kill(unsigned int cpu) | |||
104 | */ | 98 | */ |
105 | void platform_cpu_die(unsigned int cpu) | 99 | void platform_cpu_die(unsigned int cpu) |
106 | { | 100 | { |
107 | #ifdef DEBUG | 101 | int spurious = 0; |
108 | unsigned int this_cpu = hard_smp_processor_id(); | ||
109 | |||
110 | if (cpu != this_cpu) { | ||
111 | printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", | ||
112 | this_cpu, cpu); | ||
113 | BUG(); | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); | ||
118 | complete(&cpu_killed); | ||
119 | 102 | ||
120 | /* | 103 | /* |
121 | * we're ready for shutdown now, so do it | 104 | * we're ready for shutdown now, so do it |
122 | */ | 105 | */ |
123 | cpu_enter_lowpower(); | 106 | cpu_enter_lowpower(); |
124 | platform_do_lowpower(cpu); | 107 | platform_do_lowpower(cpu, &spurious); |
125 | 108 | ||
126 | /* | 109 | /* |
127 | * bring this CPU back into the world of cache | 110 | * bring this CPU back into the world of cache |
128 | * coherency, and then restore interrupts | 111 | * coherency, and then restore interrupts |
129 | */ | 112 | */ |
130 | cpu_leave_lowpower(); | 113 | cpu_leave_lowpower(); |
114 | |||
115 | if (spurious) | ||
116 | pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); | ||
131 | } | 117 | } |
132 | 118 | ||
133 | int platform_cpu_disable(unsigned int cpu) | 119 | int platform_cpu_disable(unsigned int cpu) |
diff --git a/arch/arm/mach-tegra/include/mach/smp.h b/arch/arm/mach-tegra/include/mach/smp.h index e4a34a35a544..c8221b38ee7c 100644 --- a/arch/arm/mach-tegra/include/mach/smp.h +++ b/arch/arm/mach-tegra/include/mach/smp.h | |||
@@ -2,21 +2,13 @@ | |||
2 | #define ASMARM_ARCH_SMP_H | 2 | #define ASMARM_ARCH_SMP_H |
3 | 3 | ||
4 | #include <asm/hardware/gic.h> | 4 | #include <asm/hardware/gic.h> |
5 | #include <asm/smp_mpidr.h> | ||
6 | 5 | ||
7 | /* | 6 | /* |
8 | * We use IRQ1 as the IPI | 7 | * We use IRQ1 as the IPI |
9 | */ | 8 | */ |
10 | static inline void smp_cross_call(const struct cpumask *mask) | 9 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) |
11 | { | ||
12 | gic_raise_softirq(mask, 1); | ||
13 | } | ||
14 | |||
15 | /* | ||
16 | * Do nothing on MPcore. | ||
17 | */ | ||
18 | static inline void smp_cross_call_done(cpumask_t callmap) | ||
19 | { | 10 | { |
11 | gic_raise_softirq(mask, ipi); | ||
20 | } | 12 | } |
21 | 13 | ||
22 | #endif | 14 | #endif |
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index 1c0fd92cab39..c729cd72cc3b 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <mach/hardware.h> | 23 | #include <mach/hardware.h> |
24 | #include <asm/mach-types.h> | 24 | #include <asm/mach-types.h> |
25 | #include <asm/localtimer.h> | ||
26 | #include <asm/smp_scu.h> | 25 | #include <asm/smp_scu.h> |
27 | 26 | ||
28 | #include <mach/iomap.h> | 27 | #include <mach/iomap.h> |
@@ -41,8 +40,6 @@ static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE); | |||
41 | 40 | ||
42 | void __cpuinit platform_secondary_init(unsigned int cpu) | 41 | void __cpuinit platform_secondary_init(unsigned int cpu) |
43 | { | 42 | { |
44 | trace_hardirqs_off(); | ||
45 | |||
46 | /* | 43 | /* |
47 | * if any interrupts are already enabled for the primary | 44 | * if any interrupts are already enabled for the primary |
48 | * core (e.g. timer irq), then they will not have been enabled | 45 | * core (e.g. timer irq), then they will not have been enabled |
@@ -117,24 +114,20 @@ void __init smp_init_cpus(void) | |||
117 | { | 114 | { |
118 | unsigned int i, ncores = scu_get_core_count(scu_base); | 115 | unsigned int i, ncores = scu_get_core_count(scu_base); |
119 | 116 | ||
117 | if (ncores > NR_CPUS) { | ||
118 | printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n", | ||
119 | ncores, NR_CPUS); | ||
120 | ncores = NR_CPUS; | ||
121 | } | ||
122 | |||
120 | for (i = 0; i < ncores; i++) | 123 | for (i = 0; i < ncores; i++) |
121 | cpu_set(i, cpu_possible_map); | 124 | cpu_set(i, cpu_possible_map); |
122 | } | 125 | } |
123 | 126 | ||
124 | void __init smp_prepare_cpus(unsigned int max_cpus) | 127 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
125 | { | 128 | { |
126 | unsigned int ncores = scu_get_core_count(scu_base); | ||
127 | unsigned int cpu = smp_processor_id(); | ||
128 | int i; | 129 | int i; |
129 | 130 | ||
130 | smp_store_cpu_info(cpu); | ||
131 | |||
132 | /* | ||
133 | * are we trying to boot more cores than exist? | ||
134 | */ | ||
135 | if (max_cpus > ncores) | ||
136 | max_cpus = ncores; | ||
137 | |||
138 | /* | 131 | /* |
139 | * Initialise the present map, which describes the set of CPUs | 132 | * Initialise the present map, which describes the set of CPUs |
140 | * actually populated at the present time. | 133 | * actually populated at the present time. |
@@ -142,15 +135,5 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
142 | for (i = 0; i < max_cpus; i++) | 135 | for (i = 0; i < max_cpus; i++) |
143 | set_cpu_present(i, true); | 136 | set_cpu_present(i, true); |
144 | 137 | ||
145 | /* | 138 | scu_enable(scu_base); |
146 | * Initialise the SCU if there are more than one CPU and let | ||
147 | * them know where to start. Note that, on modern versions of | ||
148 | * MILO, the "poke" doesn't actually do anything until each | ||
149 | * individual core is sent a soft interrupt to get it out of | ||
150 | * WFI | ||
151 | */ | ||
152 | if (max_cpus > 1) { | ||
153 | percpu_timer_setup(); | ||
154 | scu_enable(scu_base); | ||
155 | } | ||
156 | } | 139 | } |