diff options
Diffstat (limited to 'arch/sh/kernel/smp.c')
-rw-r--r-- | arch/sh/kernel/smp.c | 160 |
1 files changed, 140 insertions, 20 deletions
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 002cc612deef..509b36b45115 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * SMP support for the SuperH processors. | 4 | * SMP support for the SuperH processors. |
5 | * | 5 | * |
6 | * Copyright (C) 2002 - 2008 Paul Mundt | 6 | * Copyright (C) 2002 - 2010 Paul Mundt |
7 | * Copyright (C) 2006 - 2007 Akio Idehara | 7 | * Copyright (C) 2006 - 2007 Akio Idehara |
8 | * | 8 | * |
9 | * This file is subject to the terms and conditions of the GNU General Public | 9 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -31,7 +31,20 @@ | |||
31 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | 31 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
32 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | 32 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ |
33 | 33 | ||
34 | static inline void __init smp_store_cpu_info(unsigned int cpu) | 34 | struct plat_smp_ops *mp_ops = NULL; |
35 | |||
36 | /* State of each CPU */ | ||
37 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
38 | |||
39 | void __cpuinit register_smp_ops(struct plat_smp_ops *ops) | ||
40 | { | ||
41 | if (mp_ops) | ||
42 | printk(KERN_WARNING "Overriding previously set SMP ops\n"); | ||
43 | |||
44 | mp_ops = ops; | ||
45 | } | ||
46 | |||
47 | static inline void __cpuinit smp_store_cpu_info(unsigned int cpu) | ||
35 | { | 48 | { |
36 | struct sh_cpuinfo *c = cpu_data + cpu; | 49 | struct sh_cpuinfo *c = cpu_data + cpu; |
37 | 50 | ||
@@ -46,14 +59,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
46 | 59 | ||
47 | init_new_context(current, &init_mm); | 60 | init_new_context(current, &init_mm); |
48 | current_thread_info()->cpu = cpu; | 61 | current_thread_info()->cpu = cpu; |
49 | plat_prepare_cpus(max_cpus); | 62 | mp_ops->prepare_cpus(max_cpus); |
50 | 63 | ||
51 | #ifndef CONFIG_HOTPLUG_CPU | 64 | #ifndef CONFIG_HOTPLUG_CPU |
52 | init_cpu_present(&cpu_possible_map); | 65 | init_cpu_present(&cpu_possible_map); |
53 | #endif | 66 | #endif |
54 | } | 67 | } |
55 | 68 | ||
56 | void __devinit smp_prepare_boot_cpu(void) | 69 | void __init smp_prepare_boot_cpu(void) |
57 | { | 70 | { |
58 | unsigned int cpu = smp_processor_id(); | 71 | unsigned int cpu = smp_processor_id(); |
59 | 72 | ||
@@ -62,37 +75,137 @@ void __devinit smp_prepare_boot_cpu(void) | |||
62 | 75 | ||
63 | set_cpu_online(cpu, true); | 76 | set_cpu_online(cpu, true); |
64 | set_cpu_possible(cpu, true); | 77 | set_cpu_possible(cpu, true); |
78 | |||
79 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | ||
80 | } | ||
81 | |||
82 | #ifdef CONFIG_HOTPLUG_CPU | ||
83 | void native_cpu_die(unsigned int cpu) | ||
84 | { | ||
85 | unsigned int i; | ||
86 | |||
87 | for (i = 0; i < 10; i++) { | ||
88 | smp_rmb(); | ||
89 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | ||
90 | if (system_state == SYSTEM_RUNNING) | ||
91 | pr_info("CPU %u is now offline\n", cpu); | ||
92 | |||
93 | return; | ||
94 | } | ||
95 | |||
96 | msleep(100); | ||
97 | } | ||
98 | |||
99 | pr_err("CPU %u didn't die...\n", cpu); | ||
100 | } | ||
101 | |||
102 | int native_cpu_disable(unsigned int cpu) | ||
103 | { | ||
104 | return cpu == 0 ? -EPERM : 0; | ||
105 | } | ||
106 | |||
107 | void play_dead_common(void) | ||
108 | { | ||
109 | idle_task_exit(); | ||
110 | irq_ctx_exit(raw_smp_processor_id()); | ||
111 | mb(); | ||
112 | |||
113 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
114 | local_irq_disable(); | ||
115 | } | ||
116 | |||
117 | void native_play_dead(void) | ||
118 | { | ||
119 | play_dead_common(); | ||
65 | } | 120 | } |
66 | 121 | ||
122 | int __cpu_disable(void) | ||
123 | { | ||
124 | unsigned int cpu = smp_processor_id(); | ||
125 | struct task_struct *p; | ||
126 | int ret; | ||
127 | |||
128 | ret = mp_ops->cpu_disable(cpu); | ||
129 | if (ret) | ||
130 | return ret; | ||
131 | |||
132 | /* | ||
133 | * Take this CPU offline. Once we clear this, we can't return, | ||
134 | * and we must not schedule until we're ready to give up the cpu. | ||
135 | */ | ||
136 | set_cpu_online(cpu, false); | ||
137 | |||
138 | /* | ||
139 | * OK - migrate IRQs away from this CPU | ||
140 | */ | ||
141 | migrate_irqs(); | ||
142 | |||
143 | /* | ||
144 | * Stop the local timer for this CPU. | ||
145 | */ | ||
146 | local_timer_stop(cpu); | ||
147 | |||
148 | /* | ||
149 | * Flush user cache and TLB mappings, and then remove this CPU | ||
150 | * from the vm mask set of all processes. | ||
151 | */ | ||
152 | flush_cache_all(); | ||
153 | local_flush_tlb_all(); | ||
154 | |||
155 | read_lock(&tasklist_lock); | ||
156 | for_each_process(p) | ||
157 | if (p->mm) | ||
158 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
159 | read_unlock(&tasklist_lock); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | #else /* ... !CONFIG_HOTPLUG_CPU */ | ||
164 | int native_cpu_disable(unsigned int cpu) | ||
165 | { | ||
166 | return -ENOSYS; | ||
167 | } | ||
168 | |||
169 | void native_cpu_die(unsigned int cpu) | ||
170 | { | ||
171 | /* We said "no" in __cpu_disable */ | ||
172 | BUG(); | ||
173 | } | ||
174 | |||
175 | void native_play_dead(void) | ||
176 | { | ||
177 | BUG(); | ||
178 | } | ||
179 | #endif | ||
180 | |||
67 | asmlinkage void __cpuinit start_secondary(void) | 181 | asmlinkage void __cpuinit start_secondary(void) |
68 | { | 182 | { |
69 | unsigned int cpu; | 183 | unsigned int cpu = smp_processor_id(); |
70 | struct mm_struct *mm = &init_mm; | 184 | struct mm_struct *mm = &init_mm; |
71 | 185 | ||
72 | enable_mmu(); | 186 | enable_mmu(); |
73 | atomic_inc(&mm->mm_count); | 187 | atomic_inc(&mm->mm_count); |
74 | atomic_inc(&mm->mm_users); | 188 | atomic_inc(&mm->mm_users); |
75 | current->active_mm = mm; | 189 | current->active_mm = mm; |
76 | BUG_ON(current->mm); | ||
77 | enter_lazy_tlb(mm, current); | 190 | enter_lazy_tlb(mm, current); |
191 | local_flush_tlb_all(); | ||
78 | 192 | ||
79 | per_cpu_trap_init(); | 193 | per_cpu_trap_init(); |
80 | 194 | ||
81 | preempt_disable(); | 195 | preempt_disable(); |
82 | 196 | ||
83 | notify_cpu_starting(smp_processor_id()); | 197 | notify_cpu_starting(cpu); |
84 | 198 | ||
85 | local_irq_enable(); | 199 | local_irq_enable(); |
86 | 200 | ||
87 | cpu = smp_processor_id(); | ||
88 | |||
89 | /* Enable local timers */ | 201 | /* Enable local timers */ |
90 | local_timer_setup(cpu); | 202 | local_timer_setup(cpu); |
91 | calibrate_delay(); | 203 | calibrate_delay(); |
92 | 204 | ||
93 | smp_store_cpu_info(cpu); | 205 | smp_store_cpu_info(cpu); |
94 | 206 | ||
95 | cpu_set(cpu, cpu_online_map); | 207 | set_cpu_online(cpu, true); |
208 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | ||
96 | 209 | ||
97 | cpu_idle(); | 210 | cpu_idle(); |
98 | } | 211 | } |
@@ -111,12 +224,19 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
111 | struct task_struct *tsk; | 224 | struct task_struct *tsk; |
112 | unsigned long timeout; | 225 | unsigned long timeout; |
113 | 226 | ||
114 | tsk = fork_idle(cpu); | 227 | tsk = cpu_data[cpu].idle; |
115 | if (IS_ERR(tsk)) { | 228 | if (!tsk) { |
116 | printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu); | 229 | tsk = fork_idle(cpu); |
117 | return PTR_ERR(tsk); | 230 | if (IS_ERR(tsk)) { |
231 | pr_err("Failed forking idle task for cpu %d\n", cpu); | ||
232 | return PTR_ERR(tsk); | ||
233 | } | ||
234 | |||
235 | cpu_data[cpu].idle = tsk; | ||
118 | } | 236 | } |
119 | 237 | ||
238 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
239 | |||
120 | /* Fill in data in head.S for secondary cpus */ | 240 | /* Fill in data in head.S for secondary cpus */ |
121 | stack_start.sp = tsk->thread.sp; | 241 | stack_start.sp = tsk->thread.sp; |
122 | stack_start.thread_info = tsk->stack; | 242 | stack_start.thread_info = tsk->stack; |
@@ -127,7 +247,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
127 | (unsigned long)&stack_start + sizeof(stack_start)); | 247 | (unsigned long)&stack_start + sizeof(stack_start)); |
128 | wmb(); | 248 | wmb(); |
129 | 249 | ||
130 | plat_start_cpu(cpu, (unsigned long)_stext); | 250 | mp_ops->start_cpu(cpu, (unsigned long)_stext); |
131 | 251 | ||
132 | timeout = jiffies + HZ; | 252 | timeout = jiffies + HZ; |
133 | while (time_before(jiffies, timeout)) { | 253 | while (time_before(jiffies, timeout)) { |
@@ -135,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
135 | break; | 255 | break; |
136 | 256 | ||
137 | udelay(10); | 257 | udelay(10); |
258 | barrier(); | ||
138 | } | 259 | } |
139 | 260 | ||
140 | if (cpu_online(cpu)) | 261 | if (cpu_online(cpu)) |
@@ -159,7 +280,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
159 | 280 | ||
160 | void smp_send_reschedule(int cpu) | 281 | void smp_send_reschedule(int cpu) |
161 | { | 282 | { |
162 | plat_send_ipi(cpu, SMP_MSG_RESCHEDULE); | 283 | mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); |
163 | } | 284 | } |
164 | 285 | ||
165 | void smp_send_stop(void) | 286 | void smp_send_stop(void) |
@@ -172,12 +293,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
172 | int cpu; | 293 | int cpu; |
173 | 294 | ||
174 | for_each_cpu(cpu, mask) | 295 | for_each_cpu(cpu, mask) |
175 | plat_send_ipi(cpu, SMP_MSG_FUNCTION); | 296 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); |
176 | } | 297 | } |
177 | 298 | ||
178 | void arch_send_call_function_single_ipi(int cpu) | 299 | void arch_send_call_function_single_ipi(int cpu) |
179 | { | 300 | { |
180 | plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); | 301 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); |
181 | } | 302 | } |
182 | 303 | ||
183 | void smp_timer_broadcast(const struct cpumask *mask) | 304 | void smp_timer_broadcast(const struct cpumask *mask) |
@@ -185,7 +306,7 @@ void smp_timer_broadcast(const struct cpumask *mask) | |||
185 | int cpu; | 306 | int cpu; |
186 | 307 | ||
187 | for_each_cpu(cpu, mask) | 308 | for_each_cpu(cpu, mask) |
188 | plat_send_ipi(cpu, SMP_MSG_TIMER); | 309 | mp_ops->send_ipi(cpu, SMP_MSG_TIMER); |
189 | } | 310 | } |
190 | 311 | ||
191 | static void ipi_timer(void) | 312 | static void ipi_timer(void) |
@@ -249,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm) | |||
249 | * behalf of debugees, kswapd stealing pages from another process etc). | 370 | * behalf of debugees, kswapd stealing pages from another process etc). |
250 | * Kanoj 07/00. | 371 | * Kanoj 07/00. |
251 | */ | 372 | */ |
252 | |||
253 | void flush_tlb_mm(struct mm_struct *mm) | 373 | void flush_tlb_mm(struct mm_struct *mm) |
254 | { | 374 | { |
255 | preempt_disable(); | 375 | preempt_disable(); |