diff options
Diffstat (limited to 'arch/sparc/kernel/smp_32.c')
-rw-r--r-- | arch/sparc/kernel/smp_32.c | 121 |
1 files changed, 85 insertions, 36 deletions
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 91c10fb70858..21b125341bf7 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -37,8 +37,6 @@ | |||
37 | #include "irq.h" | 37 | #include "irq.h" |
38 | 38 | ||
39 | volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; | 39 | volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; |
40 | unsigned char boot_cpu_id = 0; | ||
41 | unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ | ||
42 | 40 | ||
43 | cpumask_t smp_commenced_mask = CPU_MASK_NONE; | 41 | cpumask_t smp_commenced_mask = CPU_MASK_NONE; |
44 | 42 | ||
@@ -53,6 +51,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |||
53 | void __cpuinit smp_store_cpu_info(int id) | 51 | void __cpuinit smp_store_cpu_info(int id) |
54 | { | 52 | { |
55 | int cpu_node; | 53 | int cpu_node; |
54 | int mid; | ||
56 | 55 | ||
57 | cpu_data(id).udelay_val = loops_per_jiffy; | 56 | cpu_data(id).udelay_val = loops_per_jiffy; |
58 | 57 | ||
@@ -60,10 +59,13 @@ void __cpuinit smp_store_cpu_info(int id) | |||
60 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, | 59 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, |
61 | "clock-frequency", 0); | 60 | "clock-frequency", 0); |
62 | cpu_data(id).prom_node = cpu_node; | 61 | cpu_data(id).prom_node = cpu_node; |
63 | cpu_data(id).mid = cpu_get_hwmid(cpu_node); | 62 | mid = cpu_get_hwmid(cpu_node); |
64 | 63 | ||
65 | if (cpu_data(id).mid < 0) | 64 | if (mid < 0) { |
66 | panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); | 65 | printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); |
66 | mid = 0; | ||
67 | } | ||
68 | cpu_data(id).mid = mid; | ||
67 | } | 69 | } |
68 | 70 | ||
69 | void __init smp_cpus_done(unsigned int max_cpus) | 71 | void __init smp_cpus_done(unsigned int max_cpus) |
@@ -112,7 +114,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
112 | printk("UNKNOWN!\n"); | 114 | printk("UNKNOWN!\n"); |
113 | BUG(); | 115 | BUG(); |
114 | break; | 116 | break; |
115 | }; | 117 | } |
116 | } | 118 | } |
117 | 119 | ||
118 | void cpu_panic(void) | 120 | void cpu_panic(void) |
@@ -125,13 +127,58 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; | |||
125 | 127 | ||
126 | void smp_send_reschedule(int cpu) | 128 | void smp_send_reschedule(int cpu) |
127 | { | 129 | { |
128 | /* See sparc64 */ | 130 | /* |
131 | * CPU model dependent way of implementing IPI generation targeting | ||
132 | * a single CPU. The trap handler needs only to do trap entry/return | ||
133 | * to call schedule. | ||
134 | */ | ||
135 | BTFIXUP_CALL(smp_ipi_resched)(cpu); | ||
129 | } | 136 | } |
130 | 137 | ||
131 | void smp_send_stop(void) | 138 | void smp_send_stop(void) |
132 | { | 139 | { |
133 | } | 140 | } |
134 | 141 | ||
142 | void arch_send_call_function_single_ipi(int cpu) | ||
143 | { | ||
144 | /* trigger one IPI single call on one CPU */ | ||
145 | BTFIXUP_CALL(smp_ipi_single)(cpu); | ||
146 | } | ||
147 | |||
148 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
149 | { | ||
150 | int cpu; | ||
151 | |||
152 | /* trigger IPI mask call on each CPU */ | ||
153 | for_each_cpu(cpu, mask) | ||
154 | BTFIXUP_CALL(smp_ipi_mask_one)(cpu); | ||
155 | } | ||
156 | |||
157 | void smp_resched_interrupt(void) | ||
158 | { | ||
159 | irq_enter(); | ||
160 | scheduler_ipi(); | ||
161 | local_cpu_data().irq_resched_count++; | ||
162 | irq_exit(); | ||
163 | /* re-schedule routine called by interrupt return code. */ | ||
164 | } | ||
165 | |||
166 | void smp_call_function_single_interrupt(void) | ||
167 | { | ||
168 | irq_enter(); | ||
169 | generic_smp_call_function_single_interrupt(); | ||
170 | local_cpu_data().irq_call_count++; | ||
171 | irq_exit(); | ||
172 | } | ||
173 | |||
174 | void smp_call_function_interrupt(void) | ||
175 | { | ||
176 | irq_enter(); | ||
177 | generic_smp_call_function_interrupt(); | ||
178 | local_cpu_data().irq_call_count++; | ||
179 | irq_exit(); | ||
180 | } | ||
181 | |||
135 | void smp_flush_cache_all(void) | 182 | void smp_flush_cache_all(void) |
136 | { | 183 | { |
137 | xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); | 184 | xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); |
@@ -147,9 +194,10 @@ void smp_flush_tlb_all(void) | |||
147 | void smp_flush_cache_mm(struct mm_struct *mm) | 194 | void smp_flush_cache_mm(struct mm_struct *mm) |
148 | { | 195 | { |
149 | if(mm->context != NO_CONTEXT) { | 196 | if(mm->context != NO_CONTEXT) { |
150 | cpumask_t cpu_mask = *mm_cpumask(mm); | 197 | cpumask_t cpu_mask; |
151 | cpu_clear(smp_processor_id(), cpu_mask); | 198 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
152 | if (!cpus_empty(cpu_mask)) | 199 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
200 | if (!cpumask_empty(&cpu_mask)) | ||
153 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); | 201 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); |
154 | local_flush_cache_mm(mm); | 202 | local_flush_cache_mm(mm); |
155 | } | 203 | } |
@@ -158,9 +206,10 @@ void smp_flush_cache_mm(struct mm_struct *mm) | |||
158 | void smp_flush_tlb_mm(struct mm_struct *mm) | 206 | void smp_flush_tlb_mm(struct mm_struct *mm) |
159 | { | 207 | { |
160 | if(mm->context != NO_CONTEXT) { | 208 | if(mm->context != NO_CONTEXT) { |
161 | cpumask_t cpu_mask = *mm_cpumask(mm); | 209 | cpumask_t cpu_mask; |
162 | cpu_clear(smp_processor_id(), cpu_mask); | 210 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
163 | if (!cpus_empty(cpu_mask)) { | 211 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
212 | if (!cpumask_empty(&cpu_mask)) { | ||
164 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); | 213 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); |
165 | if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) | 214 | if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) |
166 | cpumask_copy(mm_cpumask(mm), | 215 | cpumask_copy(mm_cpumask(mm), |
@@ -176,9 +225,10 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
176 | struct mm_struct *mm = vma->vm_mm; | 225 | struct mm_struct *mm = vma->vm_mm; |
177 | 226 | ||
178 | if (mm->context != NO_CONTEXT) { | 227 | if (mm->context != NO_CONTEXT) { |
179 | cpumask_t cpu_mask = *mm_cpumask(mm); | 228 | cpumask_t cpu_mask; |
180 | cpu_clear(smp_processor_id(), cpu_mask); | 229 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
181 | if (!cpus_empty(cpu_mask)) | 230 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
231 | if (!cpumask_empty(&cpu_mask)) | ||
182 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); | 232 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); |
183 | local_flush_cache_range(vma, start, end); | 233 | local_flush_cache_range(vma, start, end); |
184 | } | 234 | } |
@@ -190,9 +240,10 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
190 | struct mm_struct *mm = vma->vm_mm; | 240 | struct mm_struct *mm = vma->vm_mm; |
191 | 241 | ||
192 | if (mm->context != NO_CONTEXT) { | 242 | if (mm->context != NO_CONTEXT) { |
193 | cpumask_t cpu_mask = *mm_cpumask(mm); | 243 | cpumask_t cpu_mask; |
194 | cpu_clear(smp_processor_id(), cpu_mask); | 244 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
195 | if (!cpus_empty(cpu_mask)) | 245 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
246 | if (!cpumask_empty(&cpu_mask)) | ||
196 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); | 247 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); |
197 | local_flush_tlb_range(vma, start, end); | 248 | local_flush_tlb_range(vma, start, end); |
198 | } | 249 | } |
@@ -203,9 +254,10 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | |||
203 | struct mm_struct *mm = vma->vm_mm; | 254 | struct mm_struct *mm = vma->vm_mm; |
204 | 255 | ||
205 | if(mm->context != NO_CONTEXT) { | 256 | if(mm->context != NO_CONTEXT) { |
206 | cpumask_t cpu_mask = *mm_cpumask(mm); | 257 | cpumask_t cpu_mask; |
207 | cpu_clear(smp_processor_id(), cpu_mask); | 258 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
208 | if (!cpus_empty(cpu_mask)) | 259 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
260 | if (!cpumask_empty(&cpu_mask)) | ||
209 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); | 261 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); |
210 | local_flush_cache_page(vma, page); | 262 | local_flush_cache_page(vma, page); |
211 | } | 263 | } |
@@ -216,19 +268,15 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
216 | struct mm_struct *mm = vma->vm_mm; | 268 | struct mm_struct *mm = vma->vm_mm; |
217 | 269 | ||
218 | if(mm->context != NO_CONTEXT) { | 270 | if(mm->context != NO_CONTEXT) { |
219 | cpumask_t cpu_mask = *mm_cpumask(mm); | 271 | cpumask_t cpu_mask; |
220 | cpu_clear(smp_processor_id(), cpu_mask); | 272 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
221 | if (!cpus_empty(cpu_mask)) | 273 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
274 | if (!cpumask_empty(&cpu_mask)) | ||
222 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); | 275 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); |
223 | local_flush_tlb_page(vma, page); | 276 | local_flush_tlb_page(vma, page); |
224 | } | 277 | } |
225 | } | 278 | } |
226 | 279 | ||
227 | void smp_reschedule_irq(void) | ||
228 | { | ||
229 | set_need_resched(); | ||
230 | } | ||
231 | |||
232 | void smp_flush_page_to_ram(unsigned long page) | 280 | void smp_flush_page_to_ram(unsigned long page) |
233 | { | 281 | { |
234 | /* Current theory is that those who call this are the one's | 282 | /* Current theory is that those who call this are the one's |
@@ -245,9 +293,10 @@ void smp_flush_page_to_ram(unsigned long page) | |||
245 | 293 | ||
246 | void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | 294 | void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) |
247 | { | 295 | { |
248 | cpumask_t cpu_mask = *mm_cpumask(mm); | 296 | cpumask_t cpu_mask; |
249 | cpu_clear(smp_processor_id(), cpu_mask); | 297 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
250 | if (!cpus_empty(cpu_mask)) | 298 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
299 | if (!cpumask_empty(&cpu_mask)) | ||
251 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); | 300 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); |
252 | local_flush_sig_insns(mm, insn_addr); | 301 | local_flush_sig_insns(mm, insn_addr); |
253 | } | 302 | } |
@@ -325,7 +374,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
325 | printk("UNKNOWN!\n"); | 374 | printk("UNKNOWN!\n"); |
326 | BUG(); | 375 | BUG(); |
327 | break; | 376 | break; |
328 | }; | 377 | } |
329 | } | 378 | } |
330 | 379 | ||
331 | /* Set this up early so that things like the scheduler can init | 380 | /* Set this up early so that things like the scheduler can init |
@@ -398,10 +447,10 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
398 | printk("UNKNOWN!\n"); | 447 | printk("UNKNOWN!\n"); |
399 | BUG(); | 448 | BUG(); |
400 | break; | 449 | break; |
401 | }; | 450 | } |
402 | 451 | ||
403 | if (!ret) { | 452 | if (!ret) { |
404 | cpu_set(cpu, smp_commenced_mask); | 453 | cpumask_set_cpu(cpu, &smp_commenced_mask); |
405 | while (!cpu_online(cpu)) | 454 | while (!cpu_online(cpu)) |
406 | mb(); | 455 | mb(); |
407 | } | 456 | } |