aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/smpboot.c')
-rw-r--r--arch/tile/kernel/smpboot.c293
1 files changed, 293 insertions, 0 deletions
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
new file mode 100644
index 000000000000..aa3aafdb4b93
--- /dev/null
+++ b/arch/tile/kernel/smpboot.c
@@ -0,0 +1,293 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/sched.h>
20#include <linux/kernel_stat.h>
21#include <linux/smp_lock.h>
22#include <linux/bootmem.h>
23#include <linux/notifier.h>
24#include <linux/cpu.h>
25#include <linux/percpu.h>
26#include <linux/delay.h>
27#include <linux/err.h>
28#include <asm/mmu_context.h>
29#include <asm/tlbflush.h>
30#include <asm/sections.h>
31
32/*
33 * This assembly function is provided in entry.S.
34 * When called, it loops on a nap instruction forever.
35 * FIXME: should be in a header somewhere.
36 */
37extern void smp_nap(void);
38
39/* State of each CPU. */
40DEFINE_PER_CPU(int, cpu_state) = { 0 };
41
42/* The messaging code jumps to this pointer during boot-up */
43unsigned long start_cpu_function_addr;
44
45/* Called very early during startup to mark boot cpu as online */
46void __init smp_prepare_boot_cpu(void)
47{
48 int cpu = smp_processor_id();
49 set_cpu_online(cpu, 1);
50 set_cpu_present(cpu, 1);
51 __get_cpu_var(cpu_state) = CPU_ONLINE;
52
53 init_messaging();
54}
55
56static void start_secondary(void);
57
58/*
59 * Called at the top of init() to launch all the other CPUs.
60 * They run free to complete their initialization and then wait
61 * until they get an IPI from the boot cpu to come online.
62 */
63void __init smp_prepare_cpus(unsigned int max_cpus)
64{
65 long rc;
66 int cpu, cpu_count;
67 int boot_cpu = smp_processor_id();
68
69 current_thread_info()->cpu = boot_cpu;
70
71 /*
72 * Pin this task to the boot CPU while we bring up the others,
73 * just to make sure we don't uselessly migrate as they come up.
74 */
75 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
76 if (rc != 0)
77 printk("Couldn't set init affinity to boot cpu (%ld)\n", rc);
78
79 /* Print information about disabled and dataplane cpus. */
80 print_disabled_cpus();
81
82 /*
83 * Tell the messaging subsystem how to respond to the
84 * startup message. We use a level of indirection to avoid
85 * confusing the linker with the fact that the messaging
86 * subsystem is calling __init code.
87 */
88 start_cpu_function_addr = (unsigned long) &online_secondary;
89
90 /* Set up thread context for all new processors. */
91 cpu_count = 1;
92 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
93 struct task_struct *idle;
94
95 if (cpu == boot_cpu)
96 continue;
97
98 if (!cpu_possible(cpu)) {
99 /*
100 * Make this processor do nothing on boot.
101 * Note that we don't give the boot_pc function
102 * a stack, so it has to be assembly code.
103 */
104 per_cpu(boot_sp, cpu) = 0;
105 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
106 continue;
107 }
108
109 /* Create a new idle thread to run start_secondary() */
110 idle = fork_idle(cpu);
111 if (IS_ERR(idle))
112 panic("failed fork for CPU %d", cpu);
113 idle->thread.pc = (unsigned long) start_secondary;
114
115 /* Make this thread the boot thread for this processor */
116 per_cpu(boot_sp, cpu) = task_ksp0(idle);
117 per_cpu(boot_pc, cpu) = idle->thread.pc;
118
119 ++cpu_count;
120 }
121 BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));
122
123 /* Fire up the other tiles, if any */
124 init_cpu_present(cpu_possible_mask);
125 if (cpumask_weight(cpu_present_mask) > 1) {
126 mb(); /* make sure all data is visible to new processors */
127 hv_start_all_tiles();
128 }
129}
130
131static __initdata struct cpumask init_affinity;
132
133static __init int reset_init_affinity(void)
134{
135 long rc = sched_setaffinity(current->pid, &init_affinity);
136 if (rc != 0)
137 printk(KERN_WARNING "couldn't reset init affinity (%ld)\n",
138 rc);
139 return 0;
140}
141late_initcall(reset_init_affinity);
142
143struct cpumask cpu_started __cpuinitdata;
144
145/*
146 * Activate a secondary processor. Very minimal; don't add anything
147 * to this path without knowing what you're doing, since SMP booting
148 * is pretty fragile.
149 */
150static void __cpuinit start_secondary(void)
151{
152 int cpuid = smp_processor_id();
153
154 /* Set our thread pointer appropriately. */
155 set_my_cpu_offset(__per_cpu_offset[cpuid]);
156
157 preempt_disable();
158
159 /*
160 * In large machines even this will slow us down, since we
161 * will be contending for for the printk spinlock.
162 */
163 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
164
165 /* Initialize the current asid for our first page table. */
166 __get_cpu_var(current_asid) = min_asid;
167
168 /* Set up this thread as another owner of the init_mm */
169 atomic_inc(&init_mm.mm_count);
170 current->active_mm = &init_mm;
171 if (current->mm)
172 BUG();
173 enter_lazy_tlb(&init_mm, current);
174
175 /* Enable IRQs. */
176 init_per_tile_IRQs();
177
178 /* Allow hypervisor messages to be received */
179 init_messaging();
180 local_irq_enable();
181
182 /* Indicate that we're ready to come up. */
183 /* Must not do this before we're ready to receive messages */
184 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
185 printk(KERN_WARNING "CPU#%d already started!\n", cpuid);
186 for (;;)
187 local_irq_enable();
188 }
189
190 smp_nap();
191}
192
193void setup_mpls(void); /* from kernel/setup.c */
194void store_permanent_mappings(void);
195
196/*
197 * Bring a secondary processor online.
198 */
199void __cpuinit online_secondary()
200{
201 /*
202 * low-memory mappings have been cleared, flush them from
203 * the local TLBs too.
204 */
205 local_flush_tlb();
206
207 BUG_ON(in_interrupt());
208
209 /* This must be done before setting cpu_online_mask */
210 wmb();
211
212 /*
213 * We need to hold call_lock, so there is no inconsistency
214 * between the time smp_call_function() determines number of
215 * IPI recipients, and the time when the determination is made
216 * for which cpus receive the IPI. Holding this
217 * lock helps us to not include this cpu in a currently in progress
218 * smp_call_function().
219 */
220 ipi_call_lock();
221 set_cpu_online(smp_processor_id(), 1);
222 ipi_call_unlock();
223 __get_cpu_var(cpu_state) = CPU_ONLINE;
224
225 /* Set up MPLs for this processor */
226 setup_mpls();
227
228
229 /* Set up tile-timer clock-event device on this cpu */
230 setup_tile_timer();
231
232 preempt_enable();
233
234 store_permanent_mappings();
235
236 cpu_idle();
237}
238
239int __cpuinit __cpu_up(unsigned int cpu)
240{
241 /* Wait 5s total for all CPUs for them to come online */
242 static int timeout;
243 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
244 if (timeout >= 50000) {
245 printk(KERN_INFO "skipping unresponsive cpu%d\n", cpu);
246 local_irq_enable();
247 return -EIO;
248 }
249 udelay(100);
250 }
251
252 local_irq_enable();
253 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
254
255 /* Unleash the CPU! */
256 send_IPI_single(cpu, MSG_TAG_START_CPU);
257 while (!cpumask_test_cpu(cpu, cpu_online_mask))
258 cpu_relax();
259 return 0;
260}
261
262static void panic_start_cpu(void)
263{
264 panic("Received a MSG_START_CPU IPI after boot finished.");
265}
266
267void __init smp_cpus_done(unsigned int max_cpus)
268{
269 int cpu, next, rc;
270
271 /* Reset the response to a (now illegal) MSG_START_CPU IPI. */
272 start_cpu_function_addr = (unsigned long) &panic_start_cpu;
273
274 cpumask_copy(&init_affinity, cpu_online_mask);
275
276 /*
277 * Pin ourselves to a single cpu in the initial affinity set
278 * so that kernel mappings for the rootfs are not in the dataplane,
279 * if set, and to avoid unnecessary migrating during bringup.
280 * Use the last cpu just in case the whole chip has been
281 * isolated from the scheduler, to keep init away from likely
282 * more useful user code. This also ensures that work scheduled
283 * via schedule_delayed_work() in the init routines will land
284 * on this cpu.
285 */
286 for (cpu = cpumask_first(&init_affinity);
287 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids;
288 cpu = next)
289 ;
290 rc = sched_setaffinity(current->pid, cpumask_of(cpu));
291 if (rc != 0)
292 printk("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
293}