diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-05-23 04:24:36 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-05-23 04:24:31 -0400 |
commit | 0f1959f50646612b247d624bdbf8b0c8816f2a93 (patch) | |
tree | 450c9765dc1fef51a35c1a389cdf26ed5ed89ee1 /arch | |
parent | 7dd8fe1f910f9644167ef91ddab44107d0d668c5 (diff) |
[S390] convert old cpumask API into new one
Adapt new API.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 24 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 16 |
4 files changed, 23 insertions, 23 deletions
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 4fdcefc1a98d..b7a4f2eb0057 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -50,7 +50,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm) | |||
50 | /* | 50 | /* |
51 | * If the process only ran on the local cpu, do a local flush. | 51 | * If the process only ran on the local cpu, do a local flush. |
52 | */ | 52 | */ |
53 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | 53 | cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id())); |
54 | if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) | 54 | if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) |
55 | __tlb_flush_local(); | 55 | __tlb_flush_local(); |
56 | else | 56 | else |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2310f07aaadb..fed71dcaa0d6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -335,7 +335,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
335 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 335 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
336 | if (!cpu_stopped(logical_cpu)) | 336 | if (!cpu_stopped(logical_cpu)) |
337 | continue; | 337 | continue; |
338 | cpu_set(logical_cpu, cpu_present_map); | 338 | set_cpu_present(logical_cpu, true); |
339 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 339 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
340 | logical_cpu = cpumask_next(logical_cpu, &avail); | 340 | logical_cpu = cpumask_next(logical_cpu, &avail); |
341 | if (logical_cpu >= nr_cpu_ids) | 341 | if (logical_cpu >= nr_cpu_ids) |
@@ -367,7 +367,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) | |||
367 | continue; | 367 | continue; |
368 | __cpu_logical_map[logical_cpu] = cpu_id; | 368 | __cpu_logical_map[logical_cpu] = cpu_id; |
369 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 369 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
370 | cpu_set(logical_cpu, cpu_present_map); | 370 | set_cpu_present(logical_cpu, true); |
371 | if (cpu >= info->configured) | 371 | if (cpu >= info->configured) |
372 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 372 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
373 | else | 373 | else |
@@ -385,7 +385,7 @@ static int __smp_rescan_cpus(void) | |||
385 | { | 385 | { |
386 | cpumask_t avail; | 386 | cpumask_t avail; |
387 | 387 | ||
388 | cpus_xor(avail, cpu_possible_map, cpu_present_map); | 388 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); |
389 | if (smp_use_sigp_detection) | 389 | if (smp_use_sigp_detection) |
390 | return smp_rescan_cpus_sigp(avail); | 390 | return smp_rescan_cpus_sigp(avail); |
391 | else | 391 | else |
@@ -467,7 +467,7 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
467 | notify_cpu_starting(smp_processor_id()); | 467 | notify_cpu_starting(smp_processor_id()); |
468 | /* Mark this cpu as online */ | 468 | /* Mark this cpu as online */ |
469 | ipi_call_lock(); | 469 | ipi_call_lock(); |
470 | cpu_set(smp_processor_id(), cpu_online_map); | 470 | set_cpu_online(smp_processor_id(), true); |
471 | ipi_call_unlock(); | 471 | ipi_call_unlock(); |
472 | /* Switch on interrupts */ | 472 | /* Switch on interrupts */ |
473 | local_irq_enable(); | 473 | local_irq_enable(); |
@@ -644,7 +644,7 @@ int __cpu_disable(void) | |||
644 | struct ec_creg_mask_parms cr_parms; | 644 | struct ec_creg_mask_parms cr_parms; |
645 | int cpu = smp_processor_id(); | 645 | int cpu = smp_processor_id(); |
646 | 646 | ||
647 | cpu_clear(cpu, cpu_online_map); | 647 | set_cpu_online(cpu, false); |
648 | 648 | ||
649 | /* Disable pfault pseudo page faults on this cpu. */ | 649 | /* Disable pfault pseudo page faults on this cpu. */ |
650 | pfault_fini(); | 650 | pfault_fini(); |
@@ -738,8 +738,8 @@ void __init smp_prepare_boot_cpu(void) | |||
738 | BUG_ON(smp_processor_id() != 0); | 738 | BUG_ON(smp_processor_id() != 0); |
739 | 739 | ||
740 | current_thread_info()->cpu = 0; | 740 | current_thread_info()->cpu = 0; |
741 | cpu_set(0, cpu_present_map); | 741 | set_cpu_present(0, true); |
742 | cpu_set(0, cpu_online_map); | 742 | set_cpu_online(0, true); |
743 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 743 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
744 | current_set[0] = current; | 744 | current_set[0] = current; |
745 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | 745 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; |
@@ -1016,21 +1016,21 @@ int __ref smp_rescan_cpus(void) | |||
1016 | 1016 | ||
1017 | get_online_cpus(); | 1017 | get_online_cpus(); |
1018 | mutex_lock(&smp_cpu_state_mutex); | 1018 | mutex_lock(&smp_cpu_state_mutex); |
1019 | newcpus = cpu_present_map; | 1019 | cpumask_copy(&newcpus, cpu_present_mask); |
1020 | rc = __smp_rescan_cpus(); | 1020 | rc = __smp_rescan_cpus(); |
1021 | if (rc) | 1021 | if (rc) |
1022 | goto out; | 1022 | goto out; |
1023 | cpus_andnot(newcpus, cpu_present_map, newcpus); | 1023 | cpumask_andnot(&newcpus, cpu_present_mask, &newcpus); |
1024 | for_each_cpu_mask(cpu, newcpus) { | 1024 | for_each_cpu(cpu, &newcpus) { |
1025 | rc = smp_add_present_cpu(cpu); | 1025 | rc = smp_add_present_cpu(cpu); |
1026 | if (rc) | 1026 | if (rc) |
1027 | cpu_clear(cpu, cpu_present_map); | 1027 | set_cpu_present(cpu, false); |
1028 | } | 1028 | } |
1029 | rc = 0; | 1029 | rc = 0; |
1030 | out: | 1030 | out: |
1031 | mutex_unlock(&smp_cpu_state_mutex); | 1031 | mutex_unlock(&smp_cpu_state_mutex); |
1032 | put_online_cpus(); | 1032 | put_online_cpus(); |
1033 | if (!cpus_empty(newcpus)) | 1033 | if (!cpumask_empty(&newcpus)) |
1034 | topology_schedule_update(); | 1034 | topology_schedule_update(); |
1035 | return rc; | 1035 | return rc; |
1036 | } | 1036 | } |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 87be655557aa..a59557f1fb5f 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -810,7 +810,7 @@ static int etr_sync_clock_stop(struct etr_aib *aib, int port) | |||
810 | etr_sync.etr_port = port; | 810 | etr_sync.etr_port = port; |
811 | get_online_cpus(); | 811 | get_online_cpus(); |
812 | atomic_set(&etr_sync.cpus, num_online_cpus() - 1); | 812 | atomic_set(&etr_sync.cpus, num_online_cpus() - 1); |
813 | rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); | 813 | rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask); |
814 | put_online_cpus(); | 814 | put_online_cpus(); |
815 | return rc; | 815 | return rc; |
816 | } | 816 | } |
@@ -1579,7 +1579,7 @@ static void stp_work_fn(struct work_struct *work) | |||
1579 | memset(&stp_sync, 0, sizeof(stp_sync)); | 1579 | memset(&stp_sync, 0, sizeof(stp_sync)); |
1580 | get_online_cpus(); | 1580 | get_online_cpus(); |
1581 | atomic_set(&stp_sync.cpus, num_online_cpus() - 1); | 1581 | atomic_set(&stp_sync.cpus, num_online_cpus() - 1); |
1582 | stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); | 1582 | stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask); |
1583 | put_online_cpus(); | 1583 | put_online_cpus(); |
1584 | 1584 | ||
1585 | if (!check_sync_clock()) | 1585 | if (!check_sync_clock()) |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 94b06c31fc8a..2eafb8c7a746 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -52,20 +52,20 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
52 | { | 52 | { |
53 | cpumask_t mask; | 53 | cpumask_t mask; |
54 | 54 | ||
55 | cpus_clear(mask); | 55 | cpumask_clear(&mask); |
56 | if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { | 56 | if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { |
57 | cpumask_copy(&mask, cpumask_of(cpu)); | 57 | cpumask_copy(&mask, cpumask_of(cpu)); |
58 | return mask; | 58 | return mask; |
59 | } | 59 | } |
60 | while (info) { | 60 | while (info) { |
61 | if (cpu_isset(cpu, info->mask)) { | 61 | if (cpumask_test_cpu(cpu, &info->mask)) { |
62 | mask = info->mask; | 62 | mask = info->mask; |
63 | break; | 63 | break; |
64 | } | 64 | } |
65 | info = info->next; | 65 | info = info->next; |
66 | } | 66 | } |
67 | if (cpus_empty(mask)) | 67 | if (cpumask_empty(&mask)) |
68 | mask = cpumask_of_cpu(cpu); | 68 | cpumask_copy(&mask, cpumask_of(cpu)); |
69 | return mask; | 69 | return mask; |
70 | } | 70 | } |
71 | 71 | ||
@@ -85,10 +85,10 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu, | |||
85 | if (cpu_logical_map(lcpu) != rcpu) | 85 | if (cpu_logical_map(lcpu) != rcpu) |
86 | continue; | 86 | continue; |
87 | #ifdef CONFIG_SCHED_BOOK | 87 | #ifdef CONFIG_SCHED_BOOK |
88 | cpu_set(lcpu, book->mask); | 88 | cpumask_set_cpu(lcpu, &book->mask); |
89 | cpu_book_id[lcpu] = book->id; | 89 | cpu_book_id[lcpu] = book->id; |
90 | #endif | 90 | #endif |
91 | cpu_set(lcpu, core->mask); | 91 | cpumask_set_cpu(lcpu, &core->mask); |
92 | cpu_core_id[lcpu] = core->id; | 92 | cpu_core_id[lcpu] = core->id; |
93 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 93 | smp_cpu_polarization[lcpu] = tl_cpu->pp; |
94 | } | 94 | } |
@@ -101,13 +101,13 @@ static void clear_masks(void) | |||
101 | 101 | ||
102 | info = &core_info; | 102 | info = &core_info; |
103 | while (info) { | 103 | while (info) { |
104 | cpus_clear(info->mask); | 104 | cpumask_clear(&info->mask); |
105 | info = info->next; | 105 | info = info->next; |
106 | } | 106 | } |
107 | #ifdef CONFIG_SCHED_BOOK | 107 | #ifdef CONFIG_SCHED_BOOK |
108 | info = &book_info; | 108 | info = &book_info; |
109 | while (info) { | 109 | while (info) { |
110 | cpus_clear(info->mask); | 110 | cpumask_clear(&info->mask); |
111 | info = info->next; | 111 | info = info->next; |
112 | } | 112 | } |
113 | #endif | 113 | #endif |