diff options
-rw-r--r-- | arch/arm64/include/asm/cpu_ops.h | 16 | ||||
-rw-r--r-- | arch/arm64/include/asm/smp.h | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 12 | ||||
-rw-r--r-- | arch/arm64/kernel/psci.c | 18 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 65 | ||||
-rw-r--r-- | arch/arm64/kernel/smp_spin_table.c | 75 | ||||
-rw-r--r-- | arch/arm64/kernel/vmlinux.lds.S | 1 |
7 files changed, 117 insertions, 73 deletions
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 3c60c8d1d928..67bc4fd83798 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h | |||
@@ -21,10 +21,26 @@ | |||
21 | 21 | ||
22 | struct device_node; | 22 | struct device_node; |
23 | 23 | ||
24 | /** | ||
25 | * struct cpu_operations - Callback operations for hotplugging CPUs. | ||
26 | * | ||
27 | * @name: Name of the property as appears in a devicetree cpu node's | ||
28 | * enable-method property. | ||
29 | * @cpu_init: Reads any data necessary for a specific enable-method from the | ||
30 | * devicetree, for a given cpu node and proposed logical id. | ||
31 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a | ||
32 | * mechanism for doing so, tests whether it is possible to boot | ||
33 | * the given CPU. | ||
34 | * @cpu_boot: Boots a cpu into the kernel. | ||
35 | * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary | ||
36 | * synchronisation. Called from the cpu being booted. | ||
37 | */ | ||
24 | struct cpu_operations { | 38 | struct cpu_operations { |
25 | const char *name; | 39 | const char *name; |
26 | int (*cpu_init)(struct device_node *, unsigned int); | 40 | int (*cpu_init)(struct device_node *, unsigned int); |
27 | int (*cpu_prepare)(unsigned int); | 41 | int (*cpu_prepare)(unsigned int); |
42 | int (*cpu_boot)(unsigned int); | ||
43 | void (*cpu_postboot)(void); | ||
28 | }; | 44 | }; |
29 | 45 | ||
30 | extern const struct cpu_operations *cpu_ops[NR_CPUS]; | 46 | extern const struct cpu_operations *cpu_ops[NR_CPUS]; |
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 7e34295f78e3..d64187ce69a2 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h | |||
@@ -60,8 +60,7 @@ struct secondary_data { | |||
60 | void *stack; | 60 | void *stack; |
61 | }; | 61 | }; |
62 | extern struct secondary_data secondary_data; | 62 | extern struct secondary_data secondary_data; |
63 | extern void secondary_holding_pen(void); | 63 | extern void secondary_entry(void); |
64 | extern volatile unsigned long secondary_holding_pen_release; | ||
65 | 64 | ||
66 | extern void arch_send_call_function_single_ipi(int cpu); | 65 | extern void arch_send_call_function_single_ipi(int cpu); |
67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 66 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7090c126797c..bf7efdf2d7e7 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -225,7 +225,6 @@ ENTRY(__boot_cpu_mode) | |||
225 | .quad PAGE_OFFSET | 225 | .quad PAGE_OFFSET |
226 | 226 | ||
227 | #ifdef CONFIG_SMP | 227 | #ifdef CONFIG_SMP |
228 | .pushsection .smp.pen.text, "ax" | ||
229 | .align 3 | 228 | .align 3 |
230 | 1: .quad . | 229 | 1: .quad . |
231 | .quad secondary_holding_pen_release | 230 | .quad secondary_holding_pen_release |
@@ -250,7 +249,16 @@ pen: ldr x4, [x3] | |||
250 | wfe | 249 | wfe |
251 | b pen | 250 | b pen |
252 | ENDPROC(secondary_holding_pen) | 251 | ENDPROC(secondary_holding_pen) |
253 | .popsection | 252 | |
253 | /* | ||
254 | * Secondary entry point that jumps straight into the kernel. Only to | ||
255 | * be used where CPUs are brought online dynamically by the kernel. | ||
256 | */ | ||
257 | ENTRY(secondary_entry) | ||
258 | bl __calc_phys_offset // x2=phys offset | ||
259 | bl el2_setup // Drop to EL1 | ||
260 | b secondary_startup | ||
261 | ENDPROC(secondary_entry) | ||
254 | 262 | ||
255 | ENTRY(secondary_startup) | 263 | ENTRY(secondary_startup) |
256 | /* | 264 | /* |
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index ccec2ca67755..fb56b6158344 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -239,26 +239,28 @@ static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu) | |||
239 | 239 | ||
240 | static int __init cpu_psci_cpu_prepare(unsigned int cpu) | 240 | static int __init cpu_psci_cpu_prepare(unsigned int cpu) |
241 | { | 241 | { |
242 | int err; | ||
243 | |||
244 | if (!psci_ops.cpu_on) { | 242 | if (!psci_ops.cpu_on) { |
245 | pr_err("no cpu_on method, not booting CPU%d\n", cpu); | 243 | pr_err("no cpu_on method, not booting CPU%d\n", cpu); |
246 | return -ENODEV; | 244 | return -ENODEV; |
247 | } | 245 | } |
248 | 246 | ||
249 | err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen)); | ||
250 | if (err) { | ||
251 | pr_err("failed to boot CPU%d (%d)\n", cpu, err); | ||
252 | return err; | ||
253 | } | ||
254 | |||
255 | return 0; | 247 | return 0; |
256 | } | 248 | } |
257 | 249 | ||
250 | static int cpu_psci_cpu_boot(unsigned int cpu) | ||
251 | { | ||
252 | int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry)); | ||
253 | if (err) | ||
254 | pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); | ||
255 | |||
256 | return err; | ||
257 | } | ||
258 | |||
258 | const struct cpu_operations cpu_psci_ops = { | 259 | const struct cpu_operations cpu_psci_ops = { |
259 | .name = "psci", | 260 | .name = "psci", |
260 | .cpu_init = cpu_psci_cpu_init, | 261 | .cpu_init = cpu_psci_cpu_init, |
261 | .cpu_prepare = cpu_psci_cpu_prepare, | 262 | .cpu_prepare = cpu_psci_cpu_prepare, |
263 | .cpu_boot = cpu_psci_cpu_boot, | ||
262 | }; | 264 | }; |
263 | 265 | ||
264 | #endif | 266 | #endif |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 8965fb7dee89..6806bc40b630 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -55,7 +55,6 @@ | |||
55 | * where to place its SVC stack | 55 | * where to place its SVC stack |
56 | */ | 56 | */ |
57 | struct secondary_data secondary_data; | 57 | struct secondary_data secondary_data; |
58 | volatile unsigned long secondary_holding_pen_release = INVALID_HWID; | ||
59 | 58 | ||
60 | enum ipi_msg_type { | 59 | enum ipi_msg_type { |
61 | IPI_RESCHEDULE, | 60 | IPI_RESCHEDULE, |
@@ -64,61 +63,16 @@ enum ipi_msg_type { | |||
64 | IPI_CPU_STOP, | 63 | IPI_CPU_STOP, |
65 | }; | 64 | }; |
66 | 65 | ||
67 | static DEFINE_RAW_SPINLOCK(boot_lock); | ||
68 | |||
69 | /* | ||
70 | * Write secondary_holding_pen_release in a way that is guaranteed to be | ||
71 | * visible to all observers, irrespective of whether they're taking part | ||
72 | * in coherency or not. This is necessary for the hotplug code to work | ||
73 | * reliably. | ||
74 | */ | ||
75 | static void write_pen_release(u64 val) | ||
76 | { | ||
77 | void *start = (void *)&secondary_holding_pen_release; | ||
78 | unsigned long size = sizeof(secondary_holding_pen_release); | ||
79 | |||
80 | secondary_holding_pen_release = val; | ||
81 | __flush_dcache_area(start, size); | ||
82 | } | ||
83 | |||
84 | /* | 66 | /* |
85 | * Boot a secondary CPU, and assign it the specified idle task. | 67 | * Boot a secondary CPU, and assign it the specified idle task. |
86 | * This also gives us the initial stack to use for this CPU. | 68 | * This also gives us the initial stack to use for this CPU. |
87 | */ | 69 | */ |
88 | static int boot_secondary(unsigned int cpu, struct task_struct *idle) | 70 | static int boot_secondary(unsigned int cpu, struct task_struct *idle) |
89 | { | 71 | { |
90 | unsigned long timeout; | 72 | if (cpu_ops[cpu]->cpu_boot) |
91 | 73 | return cpu_ops[cpu]->cpu_boot(cpu); | |
92 | /* | ||
93 | * Set synchronisation state between this boot processor | ||
94 | * and the secondary one | ||
95 | */ | ||
96 | raw_spin_lock(&boot_lock); | ||
97 | |||
98 | /* | ||
99 | * Update the pen release flag. | ||
100 | */ | ||
101 | write_pen_release(cpu_logical_map(cpu)); | ||
102 | 74 | ||
103 | /* | 75 | return -EOPNOTSUPP; |
104 | * Send an event, causing the secondaries to read pen_release. | ||
105 | */ | ||
106 | sev(); | ||
107 | |||
108 | timeout = jiffies + (1 * HZ); | ||
109 | while (time_before(jiffies, timeout)) { | ||
110 | if (secondary_holding_pen_release == INVALID_HWID) | ||
111 | break; | ||
112 | udelay(10); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Now the secondary core is starting up let it run its | ||
117 | * calibrations, then wait for it to finish | ||
118 | */ | ||
119 | raw_spin_unlock(&boot_lock); | ||
120 | |||
121 | return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0; | ||
122 | } | 76 | } |
123 | 77 | ||
124 | static DECLARE_COMPLETION(cpu_running); | 78 | static DECLARE_COMPLETION(cpu_running); |
@@ -188,17 +142,8 @@ asmlinkage void secondary_start_kernel(void) | |||
188 | preempt_disable(); | 142 | preempt_disable(); |
189 | trace_hardirqs_off(); | 143 | trace_hardirqs_off(); |
190 | 144 | ||
191 | /* | 145 | if (cpu_ops[cpu]->cpu_postboot) |
192 | * Let the primary processor know we're out of the | 146 | cpu_ops[cpu]->cpu_postboot(); |
193 | * pen, then head off into the C entry point | ||
194 | */ | ||
195 | write_pen_release(INVALID_HWID); | ||
196 | |||
197 | /* | ||
198 | * Synchronise with the boot thread. | ||
199 | */ | ||
200 | raw_spin_lock(&boot_lock); | ||
201 | raw_spin_unlock(&boot_lock); | ||
202 | 147 | ||
203 | /* | 148 | /* |
204 | * OK, now it's safe to let the boot CPU continue. Wait for | 149 | * OK, now it's safe to let the boot CPU continue. Wait for |
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index a8b76e4eccff..27f08367a6e7 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c | |||
@@ -16,14 +16,37 @@ | |||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/delay.h> | ||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
21 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
22 | 23 | ||
23 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
24 | #include <asm/cpu_ops.h> | 25 | #include <asm/cpu_ops.h> |
26 | #include <asm/cputype.h> | ||
27 | #include <asm/smp_plat.h> | ||
28 | |||
29 | extern void secondary_holding_pen(void); | ||
30 | volatile unsigned long secondary_holding_pen_release = INVALID_HWID; | ||
25 | 31 | ||
26 | static phys_addr_t cpu_release_addr[NR_CPUS]; | 32 | static phys_addr_t cpu_release_addr[NR_CPUS]; |
33 | static DEFINE_RAW_SPINLOCK(boot_lock); | ||
34 | |||
35 | /* | ||
36 | * Write secondary_holding_pen_release in a way that is guaranteed to be | ||
37 | * visible to all observers, irrespective of whether they're taking part | ||
38 | * in coherency or not. This is necessary for the hotplug code to work | ||
39 | * reliably. | ||
40 | */ | ||
41 | static void write_pen_release(u64 val) | ||
42 | { | ||
43 | void *start = (void *)&secondary_holding_pen_release; | ||
44 | unsigned long size = sizeof(secondary_holding_pen_release); | ||
45 | |||
46 | secondary_holding_pen_release = val; | ||
47 | __flush_dcache_area(start, size); | ||
48 | } | ||
49 | |||
27 | 50 | ||
28 | static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) | 51 | static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) |
29 | { | 52 | { |
@@ -60,8 +83,60 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) | |||
60 | return 0; | 83 | return 0; |
61 | } | 84 | } |
62 | 85 | ||
86 | static int smp_spin_table_cpu_boot(unsigned int cpu) | ||
87 | { | ||
88 | unsigned long timeout; | ||
89 | |||
90 | /* | ||
91 | * Set synchronisation state between this boot processor | ||
92 | * and the secondary one | ||
93 | */ | ||
94 | raw_spin_lock(&boot_lock); | ||
95 | |||
96 | /* | ||
97 | * Update the pen release flag. | ||
98 | */ | ||
99 | write_pen_release(cpu_logical_map(cpu)); | ||
100 | |||
101 | /* | ||
102 | * Send an event, causing the secondaries to read pen_release. | ||
103 | */ | ||
104 | sev(); | ||
105 | |||
106 | timeout = jiffies + (1 * HZ); | ||
107 | while (time_before(jiffies, timeout)) { | ||
108 | if (secondary_holding_pen_release == INVALID_HWID) | ||
109 | break; | ||
110 | udelay(10); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Now the secondary core is starting up let it run its | ||
115 | * calibrations, then wait for it to finish | ||
116 | */ | ||
117 | raw_spin_unlock(&boot_lock); | ||
118 | |||
119 | return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0; | ||
120 | } | ||
121 | |||
122 | void smp_spin_table_cpu_postboot(void) | ||
123 | { | ||
124 | /* | ||
125 | * Let the primary processor know we're out of the pen. | ||
126 | */ | ||
127 | write_pen_release(INVALID_HWID); | ||
128 | |||
129 | /* | ||
130 | * Synchronise with the boot thread. | ||
131 | */ | ||
132 | raw_spin_lock(&boot_lock); | ||
133 | raw_spin_unlock(&boot_lock); | ||
134 | } | ||
135 | |||
63 | const struct cpu_operations smp_spin_table_ops = { | 136 | const struct cpu_operations smp_spin_table_ops = { |
64 | .name = "spin-table", | 137 | .name = "spin-table", |
65 | .cpu_init = smp_spin_table_cpu_init, | 138 | .cpu_init = smp_spin_table_cpu_init, |
66 | .cpu_prepare = smp_spin_table_cpu_prepare, | 139 | .cpu_prepare = smp_spin_table_cpu_prepare, |
140 | .cpu_boot = smp_spin_table_cpu_boot, | ||
141 | .cpu_postboot = smp_spin_table_cpu_postboot, | ||
67 | }; | 142 | }; |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index f8ab9d8e2ea3..991ffddf49df 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -54,7 +54,6 @@ SECTIONS | |||
54 | } | 54 | } |
55 | .text : { /* Real text segment */ | 55 | .text : { /* Real text segment */ |
56 | _stext = .; /* Text and read-only data */ | 56 | _stext = .; /* Text and read-only data */ |
57 | *(.smp.pen.text) | ||
58 | __exception_text_start = .; | 57 | __exception_text_start = .; |
59 | *(.exception.text) | 58 | *(.exception.text) |
60 | __exception_text_end = .; | 59 | __exception_text_end = .; |