diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-05-26 19:24:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-26 20:12:32 -0400 |
commit | 937e26c0d1843c92750dac9bca1c972d33e73306 (patch) | |
tree | aad9ae8c1f736a3acd56fbcc954cb1af7a50ea6e /arch/m32r | |
parent | ba7328b2d83090c2440b8d0baa6ccfc2ddf1bda6 (diff) |
m32r: convert cpumask api
We plan to remove cpus_xx() old cpumask APIs later. Also, we plan to
change mm_cpu_mask() implementation, allocate only nr_cpu_ids, thus
*mm_cpu_mask() is dangerous operation.
Then, this patch convert them.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/m32r')
-rw-r--r-- | arch/m32r/include/asm/smp.h | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 51 | ||||
-rw-r--r-- | arch/m32r/kernel/smpboot.c | 48 |
3 files changed, 51 insertions, 52 deletions
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h index 8accc1bb0263..cf7829a61551 100644 --- a/arch/m32r/include/asm/smp.h +++ b/arch/m32r/include/asm/smp.h | |||
@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu) | |||
81 | 81 | ||
82 | static __inline__ unsigned int num_booting_cpus(void) | 82 | static __inline__ unsigned int num_booting_cpus(void) |
83 | { | 83 | { |
84 | return cpus_weight(cpu_callout_map); | 84 | return cpumask_weight(&cpu_callout_map); |
85 | } | 85 | } |
86 | 86 | ||
87 | extern void smp_send_timer(void); | 87 | extern void smp_send_timer(void); |
88 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 88 | extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int); |
89 | 89 | ||
90 | extern void arch_send_call_function_single_ipi(int cpu); | 90 | extern void arch_send_call_function_single_ipi(int cpu); |
91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index fc10b39893d4..f758100b8976 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -87,7 +87,6 @@ void smp_local_timer_interrupt(void); | |||
87 | 87 | ||
88 | static void send_IPI_allbutself(int, int); | 88 | static void send_IPI_allbutself(int, int); |
89 | static void send_IPI_mask(const struct cpumask *, int, int); | 89 | static void send_IPI_mask(const struct cpumask *, int, int); |
90 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); | ||
91 | 90 | ||
92 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 91 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
93 | /* Rescheduling request Routines */ | 92 | /* Rescheduling request Routines */ |
@@ -162,10 +161,10 @@ void smp_flush_cache_all(void) | |||
162 | unsigned long *mask; | 161 | unsigned long *mask; |
163 | 162 | ||
164 | preempt_disable(); | 163 | preempt_disable(); |
165 | cpumask = cpu_online_map; | 164 | cpumask_copy(&cpumask, cpu_online_mask); |
166 | cpu_clear(smp_processor_id(), cpumask); | 165 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
167 | spin_lock(&flushcache_lock); | 166 | spin_lock(&flushcache_lock); |
168 | mask=cpus_addr(cpumask); | 167 | mask=cpumask_bits(&cpumask); |
169 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); | 168 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); |
170 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); | 169 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); |
171 | _flush_cache_copyback_all(); | 170 | _flush_cache_copyback_all(); |
@@ -263,8 +262,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
263 | preempt_disable(); | 262 | preempt_disable(); |
264 | cpu_id = smp_processor_id(); | 263 | cpu_id = smp_processor_id(); |
265 | mmc = &mm->context[cpu_id]; | 264 | mmc = &mm->context[cpu_id]; |
266 | cpu_mask = *mm_cpumask(mm); | 265 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
267 | cpu_clear(cpu_id, cpu_mask); | 266 | cpumask_clear_cpu(cpu_id, &cpu_mask); |
268 | 267 | ||
269 | if (*mmc != NO_CONTEXT) { | 268 | if (*mmc != NO_CONTEXT) { |
270 | local_irq_save(flags); | 269 | local_irq_save(flags); |
@@ -275,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
275 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); | 274 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); |
276 | local_irq_restore(flags); | 275 | local_irq_restore(flags); |
277 | } | 276 | } |
278 | if (!cpus_empty(cpu_mask)) | 277 | if (!cpumask_empty(&cpu_mask)) |
279 | flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); | 278 | flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); |
280 | 279 | ||
281 | preempt_enable(); | 280 | preempt_enable(); |
@@ -333,8 +332,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
333 | preempt_disable(); | 332 | preempt_disable(); |
334 | cpu_id = smp_processor_id(); | 333 | cpu_id = smp_processor_id(); |
335 | mmc = &mm->context[cpu_id]; | 334 | mmc = &mm->context[cpu_id]; |
336 | cpu_mask = *mm_cpumask(mm); | 335 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
337 | cpu_clear(cpu_id, cpu_mask); | 336 | cpumask_clear_cpu(cpu_id, &cpu_mask); |
338 | 337 | ||
339 | #ifdef DEBUG_SMP | 338 | #ifdef DEBUG_SMP |
340 | if (!mm) | 339 | if (!mm) |
@@ -348,7 +347,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
348 | __flush_tlb_page(va); | 347 | __flush_tlb_page(va); |
349 | local_irq_restore(flags); | 348 | local_irq_restore(flags); |
350 | } | 349 | } |
351 | if (!cpus_empty(cpu_mask)) | 350 | if (!cpumask_empty(&cpu_mask)) |
352 | flush_tlb_others(cpu_mask, mm, vma, va); | 351 | flush_tlb_others(cpu_mask, mm, vma, va); |
353 | 352 | ||
354 | preempt_enable(); | 353 | preempt_enable(); |
@@ -395,14 +394,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
395 | * - current CPU must not be in mask | 394 | * - current CPU must not be in mask |
396 | * - mask must exist :) | 395 | * - mask must exist :) |
397 | */ | 396 | */ |
398 | BUG_ON(cpus_empty(cpumask)); | 397 | BUG_ON(cpumask_empty(&cpumask)); |
399 | 398 | ||
400 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | 399 | BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); |
401 | BUG_ON(!mm); | 400 | BUG_ON(!mm); |
402 | 401 | ||
403 | /* If a CPU which we ran on has gone down, OK. */ | 402 | /* If a CPU which we ran on has gone down, OK. */ |
404 | cpus_and(cpumask, cpumask, cpu_online_map); | 403 | cpumask_and(&cpumask, &cpumask, cpu_online_mask); |
405 | if (cpus_empty(cpumask)) | 404 | if (cpumask_empty(&cpumask)) |
406 | return; | 405 | return; |
407 | 406 | ||
408 | /* | 407 | /* |
@@ -416,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
416 | flush_mm = mm; | 415 | flush_mm = mm; |
417 | flush_vma = vma; | 416 | flush_vma = vma; |
418 | flush_va = va; | 417 | flush_va = va; |
419 | mask=cpus_addr(cpumask); | 418 | mask=cpumask_bits(&cpumask); |
420 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); | 419 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); |
421 | 420 | ||
422 | /* | 421 | /* |
@@ -425,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
425 | */ | 424 | */ |
426 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); | 425 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); |
427 | 426 | ||
428 | while (!cpus_empty(flush_cpumask)) { | 427 | while (!cpumask_empty((cpumask_t*)&flush_cpumask)) { |
429 | /* nothing. lockup detection does not belong here */ | 428 | /* nothing. lockup detection does not belong here */ |
430 | mb(); | 429 | mb(); |
431 | } | 430 | } |
@@ -460,7 +459,7 @@ void smp_invalidate_interrupt(void) | |||
460 | int cpu_id = smp_processor_id(); | 459 | int cpu_id = smp_processor_id(); |
461 | unsigned long *mmc = &flush_mm->context[cpu_id]; | 460 | unsigned long *mmc = &flush_mm->context[cpu_id]; |
462 | 461 | ||
463 | if (!cpu_isset(cpu_id, flush_cpumask)) | 462 | if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) |
464 | return; | 463 | return; |
465 | 464 | ||
466 | if (flush_va == FLUSH_ALL) { | 465 | if (flush_va == FLUSH_ALL) { |
@@ -478,7 +477,7 @@ void smp_invalidate_interrupt(void) | |||
478 | __flush_tlb_page(va); | 477 | __flush_tlb_page(va); |
479 | } | 478 | } |
480 | } | 479 | } |
481 | cpu_clear(cpu_id, flush_cpumask); | 480 | cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask); |
482 | } | 481 | } |
483 | 482 | ||
484 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 483 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
@@ -530,7 +529,7 @@ static void stop_this_cpu(void *dummy) | |||
530 | /* | 529 | /* |
531 | * Remove this CPU: | 530 | * Remove this CPU: |
532 | */ | 531 | */ |
533 | cpu_clear(cpu_id, cpu_online_map); | 532 | set_cpu_online(cpu_id, false); |
534 | 533 | ||
535 | /* | 534 | /* |
536 | * PSW IE = 1; | 535 | * PSW IE = 1; |
@@ -725,8 +724,8 @@ static void send_IPI_allbutself(int ipi_num, int try) | |||
725 | { | 724 | { |
726 | cpumask_t cpumask; | 725 | cpumask_t cpumask; |
727 | 726 | ||
728 | cpumask = cpu_online_map; | 727 | cpumask_copy(&cpumask, cpu_online_mask); |
729 | cpu_clear(smp_processor_id(), cpumask); | 728 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
730 | 729 | ||
731 | send_IPI_mask(&cpumask, ipi_num, try); | 730 | send_IPI_mask(&cpumask, ipi_num, try); |
732 | } | 731 | } |
@@ -763,13 +762,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) | |||
763 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 762 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
764 | BUG_ON(!cpumask_equal(cpumask, &tmp)); | 763 | BUG_ON(!cpumask_equal(cpumask, &tmp)); |
765 | 764 | ||
766 | physid_mask = CPU_MASK_NONE; | 765 | cpumask_clear(&physid_mask); |
767 | for_each_cpu(cpu_id, cpumask) { | 766 | for_each_cpu(cpu_id, cpumask) { |
768 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) | 767 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) |
769 | cpu_set(phys_id, physid_mask); | 768 | cpumask_set_cpu(phys_id, &physid_mask); |
770 | } | 769 | } |
771 | 770 | ||
772 | send_IPI_mask_phys(physid_mask, ipi_num, try); | 771 | send_IPI_mask_phys(&physid_mask, ipi_num, try); |
773 | } | 772 | } |
774 | 773 | ||
775 | /*==========================================================================* | 774 | /*==========================================================================* |
@@ -792,14 +791,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) | |||
792 | * ---------- --- -------------------------------------------------------- | 791 | * ---------- --- -------------------------------------------------------- |
793 | * | 792 | * |
794 | *==========================================================================*/ | 793 | *==========================================================================*/ |
795 | unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | 794 | unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num, |
796 | int try) | 795 | int try) |
797 | { | 796 | { |
798 | spinlock_t *ipilock; | 797 | spinlock_t *ipilock; |
799 | volatile unsigned long *ipicr_addr; | 798 | volatile unsigned long *ipicr_addr; |
800 | unsigned long ipicr_val; | 799 | unsigned long ipicr_val; |
801 | unsigned long my_physid_mask; | 800 | unsigned long my_physid_mask; |
802 | unsigned long mask = cpus_addr(physid_mask)[0]; | 801 | unsigned long mask = cpumask_bits(physid_mask)[0]; |
803 | 802 | ||
804 | 803 | ||
805 | if (mask & ~physids_coerce(phys_cpu_present_map)) | 804 | if (mask & ~physids_coerce(phys_cpu_present_map)) |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index e034844cfc0d..cfdbe5d15002 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void) | |||
135 | { | 135 | { |
136 | bsp_phys_id = hard_smp_processor_id(); | 136 | bsp_phys_id = hard_smp_processor_id(); |
137 | physid_set(bsp_phys_id, phys_cpu_present_map); | 137 | physid_set(bsp_phys_id, phys_cpu_present_map); |
138 | cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */ | 138 | set_cpu_online(0, true); /* BSP's cpu_id == 0 */ |
139 | cpu_set(0, cpu_callout_map); | 139 | cpumask_set_cpu(0, &cpu_callout_map); |
140 | cpu_set(0, cpu_callin_map); | 140 | cpumask_set_cpu(0, &cpu_callin_map); |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * Initialize the logical to physical CPU number mapping | 143 | * Initialize the logical to physical CPU number mapping |
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) | 178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) |
179 | physid_set(phys_id, phys_cpu_present_map); | 179 | physid_set(phys_id, phys_cpu_present_map); |
180 | #ifndef CONFIG_HOTPLUG_CPU | 180 | #ifndef CONFIG_HOTPLUG_CPU |
181 | init_cpu_present(&cpu_possible_map); | 181 | init_cpu_present(cpu_possible_mask); |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | show_mp_info(nr_cpu); | 184 | show_mp_info(nr_cpu); |
@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id) | |||
294 | send_status = 0; | 294 | send_status = 0; |
295 | boot_status = 0; | 295 | boot_status = 0; |
296 | 296 | ||
297 | cpu_set(phys_id, cpu_bootout_map); | 297 | cpumask_set_cpu(phys_id, &cpu_bootout_map); |
298 | 298 | ||
299 | /* Send Startup IPI */ | 299 | /* Send Startup IPI */ |
300 | send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0); | 300 | send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0); |
301 | 301 | ||
302 | Dprintk("Waiting for send to finish...\n"); | 302 | Dprintk("Waiting for send to finish...\n"); |
303 | timeout = 0; | 303 | timeout = 0; |
@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id) | |||
306 | do { | 306 | do { |
307 | Dprintk("+"); | 307 | Dprintk("+"); |
308 | udelay(1000); | 308 | udelay(1000); |
309 | send_status = !cpu_isset(phys_id, cpu_bootin_map); | 309 | send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map); |
310 | } while (send_status && (timeout++ < 100)); | 310 | } while (send_status && (timeout++ < 100)); |
311 | 311 | ||
312 | Dprintk("After Startup.\n"); | 312 | Dprintk("After Startup.\n"); |
@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id) | |||
316 | * allow APs to start initializing. | 316 | * allow APs to start initializing. |
317 | */ | 317 | */ |
318 | Dprintk("Before Callout %d.\n", cpu_id); | 318 | Dprintk("Before Callout %d.\n", cpu_id); |
319 | cpu_set(cpu_id, cpu_callout_map); | 319 | cpumask_set_cpu(cpu_id, &cpu_callout_map); |
320 | Dprintk("After Callout %d.\n", cpu_id); | 320 | Dprintk("After Callout %d.\n", cpu_id); |
321 | 321 | ||
322 | /* | 322 | /* |
323 | * Wait 5s total for a response | 323 | * Wait 5s total for a response |
324 | */ | 324 | */ |
325 | for (timeout = 0; timeout < 5000; timeout++) { | 325 | for (timeout = 0; timeout < 5000; timeout++) { |
326 | if (cpu_isset(cpu_id, cpu_callin_map)) | 326 | if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) |
327 | break; /* It has booted */ | 327 | break; /* It has booted */ |
328 | udelay(1000); | 328 | udelay(1000); |
329 | } | 329 | } |
330 | 330 | ||
331 | if (cpu_isset(cpu_id, cpu_callin_map)) { | 331 | if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) { |
332 | /* number CPUs logically, starting from 1 (BSP is 0) */ | 332 | /* number CPUs logically, starting from 1 (BSP is 0) */ |
333 | Dprintk("OK.\n"); | 333 | Dprintk("OK.\n"); |
334 | } else { | 334 | } else { |
@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id) | |||
340 | 340 | ||
341 | if (send_status || boot_status) { | 341 | if (send_status || boot_status) { |
342 | unmap_cpu_to_physid(cpu_id, phys_id); | 342 | unmap_cpu_to_physid(cpu_id, phys_id); |
343 | cpu_clear(cpu_id, cpu_callout_map); | 343 | cpumask_clear_cpu(cpu_id, &cpu_callout_map); |
344 | cpu_clear(cpu_id, cpu_callin_map); | 344 | cpumask_clear_cpu(cpu_id, &cpu_callin_map); |
345 | cpu_clear(cpu_id, cpu_initialized); | 345 | cpumask_clear_cpu(cpu_id, &cpu_initialized); |
346 | cpucount--; | 346 | cpucount--; |
347 | } | 347 | } |
348 | } | 348 | } |
@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id) | |||
351 | { | 351 | { |
352 | int timeout; | 352 | int timeout; |
353 | 353 | ||
354 | cpu_set(cpu_id, smp_commenced_mask); | 354 | cpumask_set_cpu(cpu_id, &smp_commenced_mask); |
355 | 355 | ||
356 | /* | 356 | /* |
357 | * Wait 5s total for a response | 357 | * Wait 5s total for a response |
358 | */ | 358 | */ |
359 | for (timeout = 0; timeout < 5000; timeout++) { | 359 | for (timeout = 0; timeout < 5000; timeout++) { |
360 | if (cpu_isset(cpu_id, cpu_online_map)) | 360 | if (cpu_online(cpu_id)) |
361 | break; | 361 | break; |
362 | udelay(1000); | 362 | udelay(1000); |
363 | } | 363 | } |
364 | if (!cpu_isset(cpu_id, cpu_online_map)) | 364 | if (!cpu_online(cpu_id)) |
365 | BUG(); | 365 | BUG(); |
366 | 366 | ||
367 | return 0; | 367 | return 0; |
@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
373 | unsigned long bogosum = 0; | 373 | unsigned long bogosum = 0; |
374 | 374 | ||
375 | for (timeout = 0; timeout < 5000; timeout++) { | 375 | for (timeout = 0; timeout < 5000; timeout++) { |
376 | if (cpus_equal(cpu_callin_map, cpu_online_map)) | 376 | if (cpumask_equal(&cpu_callin_map, cpu_online_mask)) |
377 | break; | 377 | break; |
378 | udelay(1000); | 378 | udelay(1000); |
379 | } | 379 | } |
380 | if (!cpus_equal(cpu_callin_map, cpu_online_map)) | 380 | if (!cpumask_equal(&cpu_callin_map, cpu_online_mask)) |
381 | BUG(); | 381 | BUG(); |
382 | 382 | ||
383 | for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) | 383 | for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) |
@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
388 | */ | 388 | */ |
389 | Dprintk("Before bogomips.\n"); | 389 | Dprintk("Before bogomips.\n"); |
390 | if (cpucount) { | 390 | if (cpucount) { |
391 | for_each_cpu_mask(cpu_id, cpu_online_map) | 391 | for_each_cpu(cpu_id,cpu_online_mask) |
392 | bogosum += cpu_data[cpu_id].loops_per_jiffy; | 392 | bogosum += cpu_data[cpu_id].loops_per_jiffy; |
393 | 393 | ||
394 | printk(KERN_INFO "Total of %d processors activated " \ | 394 | printk(KERN_INFO "Total of %d processors activated " \ |
@@ -425,7 +425,7 @@ int __init start_secondary(void *unused) | |||
425 | cpu_init(); | 425 | cpu_init(); |
426 | preempt_disable(); | 426 | preempt_disable(); |
427 | smp_callin(); | 427 | smp_callin(); |
428 | while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) | 428 | while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask)) |
429 | cpu_relax(); | 429 | cpu_relax(); |
430 | 430 | ||
431 | smp_online(); | 431 | smp_online(); |
@@ -463,7 +463,7 @@ static void __init smp_callin(void) | |||
463 | int cpu_id = smp_processor_id(); | 463 | int cpu_id = smp_processor_id(); |
464 | unsigned long timeout; | 464 | unsigned long timeout; |
465 | 465 | ||
466 | if (cpu_isset(cpu_id, cpu_callin_map)) { | 466 | if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) { |
467 | printk("huh, phys CPU#%d, CPU#%d already present??\n", | 467 | printk("huh, phys CPU#%d, CPU#%d already present??\n", |
468 | phys_id, cpu_id); | 468 | phys_id, cpu_id); |
469 | BUG(); | 469 | BUG(); |
@@ -474,7 +474,7 @@ static void __init smp_callin(void) | |||
474 | timeout = jiffies + (2 * HZ); | 474 | timeout = jiffies + (2 * HZ); |
475 | while (time_before(jiffies, timeout)) { | 475 | while (time_before(jiffies, timeout)) { |
476 | /* Has the boot CPU finished it's STARTUP sequence ? */ | 476 | /* Has the boot CPU finished it's STARTUP sequence ? */ |
477 | if (cpu_isset(cpu_id, cpu_callout_map)) | 477 | if (cpumask_test_cpu(cpu_id, &cpu_callout_map)) |
478 | break; | 478 | break; |
479 | cpu_relax(); | 479 | cpu_relax(); |
480 | } | 480 | } |
@@ -486,7 +486,7 @@ static void __init smp_callin(void) | |||
486 | } | 486 | } |
487 | 487 | ||
488 | /* Allow the master to continue. */ | 488 | /* Allow the master to continue. */ |
489 | cpu_set(cpu_id, cpu_callin_map); | 489 | cpumask_set_cpu(cpu_id, &cpu_callin_map); |
490 | } | 490 | } |
491 | 491 | ||
492 | static void __init smp_online(void) | 492 | static void __init smp_online(void) |
@@ -503,7 +503,7 @@ static void __init smp_online(void) | |||
503 | /* Save our processor parameters */ | 503 | /* Save our processor parameters */ |
504 | smp_store_cpu_info(cpu_id); | 504 | smp_store_cpu_info(cpu_id); |
505 | 505 | ||
506 | cpu_set(cpu_id, cpu_online_map); | 506 | set_cpu_online(cpu_id, true); |
507 | } | 507 | } |
508 | 508 | ||
509 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 509 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |