diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/apic/x2apic_cluster.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 6 |
3 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index e658f21681c8..d9d0bd2faaf4 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -135,12 +135,12 @@ static void init_x2apic_ldr(void) | |||
135 | 135 | ||
136 | per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); | 136 | per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); |
137 | 137 | ||
138 | __cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); | 138 | cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); |
139 | for_each_online_cpu(cpu) { | 139 | for_each_online_cpu(cpu) { |
140 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) | 140 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) |
141 | continue; | 141 | continue; |
142 | __cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu)); | 142 | cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); |
143 | __cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu)); | 143 | cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); |
144 | } | 144 | } |
145 | } | 145 | } |
146 | 146 | ||
@@ -195,7 +195,7 @@ static int x2apic_init_cpu_notifier(void) | |||
195 | 195 | ||
196 | BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); | 196 | BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); |
197 | 197 | ||
198 | __cpu_set(cpu, per_cpu(cpus_in_cluster, cpu)); | 198 | cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); |
199 | register_hotcpu_notifier(&x2apic_cpu_notifier); | 199 | register_hotcpu_notifier(&x2apic_cpu_notifier); |
200 | return 1; | 200 | return 1; |
201 | } | 201 | } |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 67b1cbe0093a..e5952c225532 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -295,7 +295,7 @@ int check_irq_vectors_for_cpu_disable(void) | |||
295 | 295 | ||
296 | this_cpu = smp_processor_id(); | 296 | this_cpu = smp_processor_id(); |
297 | cpumask_copy(&online_new, cpu_online_mask); | 297 | cpumask_copy(&online_new, cpu_online_mask); |
298 | cpu_clear(this_cpu, online_new); | 298 | cpumask_clear_cpu(this_cpu, &online_new); |
299 | 299 | ||
300 | this_count = 0; | 300 | this_count = 0; |
301 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 301 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
@@ -307,7 +307,7 @@ int check_irq_vectors_for_cpu_disable(void) | |||
307 | 307 | ||
308 | data = irq_desc_get_irq_data(desc); | 308 | data = irq_desc_get_irq_data(desc); |
309 | cpumask_copy(&affinity_new, data->affinity); | 309 | cpumask_copy(&affinity_new, data->affinity); |
310 | cpu_clear(this_cpu, affinity_new); | 310 | cpumask_clear_cpu(this_cpu, &affinity_new); |
311 | 311 | ||
312 | /* Do not count inactive or per-cpu irqs. */ | 312 | /* Do not count inactive or per-cpu irqs. */ |
313 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) | 313 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 994798548b1a..3b6ec42718e4 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -415,7 +415,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) | |||
415 | struct reset_args reset_args; | 415 | struct reset_args reset_args; |
416 | 416 | ||
417 | reset_args.sender = sender; | 417 | reset_args.sender = sender; |
418 | cpus_clear(*mask); | 418 | cpumask_clear(mask); |
419 | /* find a single cpu for each uvhub in this distribution mask */ | 419 | /* find a single cpu for each uvhub in this distribution mask */ |
420 | maskbits = sizeof(struct pnmask) * BITSPERBYTE; | 420 | maskbits = sizeof(struct pnmask) * BITSPERBYTE; |
421 | /* each bit is a pnode relative to the partition base pnode */ | 421 | /* each bit is a pnode relative to the partition base pnode */ |
@@ -425,7 +425,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) | |||
425 | continue; | 425 | continue; |
426 | apnode = pnode + bcp->partition_base_pnode; | 426 | apnode = pnode + bcp->partition_base_pnode; |
427 | cpu = pnode_to_first_cpu(apnode, smaster); | 427 | cpu = pnode_to_first_cpu(apnode, smaster); |
428 | cpu_set(cpu, *mask); | 428 | cpumask_set_cpu(cpu, mask); |
429 | } | 429 | } |
430 | 430 | ||
431 | /* IPI all cpus; preemption is already disabled */ | 431 | /* IPI all cpus; preemption is already disabled */ |
@@ -1126,7 +1126,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
1126 | /* don't actually do a shootdown of the local cpu */ | 1126 | /* don't actually do a shootdown of the local cpu */ |
1127 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | 1127 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
1128 | 1128 | ||
1129 | if (cpu_isset(cpu, *cpumask)) | 1129 | if (cpumask_test_cpu(cpu, cpumask)) |
1130 | stat->s_ntargself++; | 1130 | stat->s_ntargself++; |
1131 | 1131 | ||
1132 | bau_desc = bcp->descriptor_base; | 1132 | bau_desc = bcp->descriptor_base; |