diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpuid.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/nmi_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/nmi_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 | ||||
-rw-r--r-- | arch/x86/lib/msr-on-cpu.c | 8 | ||||
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 2 |
12 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 6a1e278d9323..290652cefddb 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -222,7 +222,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
222 | atomic_set(&data.gate,0); | 222 | atomic_set(&data.gate,0); |
223 | 223 | ||
224 | /* Start the ball rolling on other CPUs */ | 224 | /* Start the ball rolling on other CPUs */ |
225 | if (smp_call_function(ipi_handler, &data, 1, 0) != 0) | 225 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
226 | panic("mtrr: timed out waiting for other CPUs\n"); | 226 | panic("mtrr: timed out waiting for other CPUs\n"); |
227 | 227 | ||
228 | local_irq_save(flags); | 228 | local_irq_save(flags); |
@@ -822,7 +822,7 @@ void mtrr_ap_init(void) | |||
822 | */ | 822 | */ |
823 | void mtrr_save_state(void) | 823 | void mtrr_save_state(void) |
824 | { | 824 | { |
825 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); | 825 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); |
826 | } | 826 | } |
827 | 827 | ||
828 | static int __init mtrr_init_finialize(void) | 828 | static int __init mtrr_init_finialize(void) |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index daff52a62248..336dd43c9158 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -95,7 +95,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
95 | for (; count; count -= 16) { | 95 | for (; count; count -= 16) { |
96 | cmd.eax = pos; | 96 | cmd.eax = pos; |
97 | cmd.ecx = pos >> 32; | 97 | cmd.ecx = pos >> 32; |
98 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); | 98 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); |
99 | if (copy_to_user(tmp, &cmd, 16)) | 99 | if (copy_to_user(tmp, &cmd, 16)) |
100 | return -EFAULT; | 100 | return -EFAULT; |
101 | tmp += 16; | 101 | tmp += 16; |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 0224c3637c73..cb0a6398c64b 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
68 | load_LDT(pc); | 68 | load_LDT(pc); |
69 | mask = cpumask_of_cpu(smp_processor_id()); | 69 | mask = cpumask_of_cpu(smp_processor_id()); |
70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) | 70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) |
71 | smp_call_function(flush_ldt, NULL, 1, 1); | 71 | smp_call_function(flush_ldt, NULL, 1); |
72 | preempt_enable(); | 72 | preempt_enable(); |
73 | #else | 73 | #else |
74 | load_LDT(pc); | 74 | load_LDT(pc); |
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 84160f74eeb0..5562dab0bd20 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -87,7 +87,7 @@ int __init check_nmi_watchdog(void) | |||
87 | 87 | ||
88 | #ifdef CONFIG_SMP | 88 | #ifdef CONFIG_SMP |
89 | if (nmi_watchdog == NMI_LOCAL_APIC) | 89 | if (nmi_watchdog == NMI_LOCAL_APIC) |
90 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | 90 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | for_each_possible_cpu(cpu) | 93 | for_each_possible_cpu(cpu) |
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 5a29ded994fa..2f1e4f503c9e 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c | |||
@@ -96,7 +96,7 @@ int __init check_nmi_watchdog(void) | |||
96 | 96 | ||
97 | #ifdef CONFIG_SMP | 97 | #ifdef CONFIG_SMP |
98 | if (nmi_watchdog == NMI_LOCAL_APIC) | 98 | if (nmi_watchdog == NMI_LOCAL_APIC) |
99 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | 99 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); |
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 102 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 575aa3d7248a..56546e8a13ac 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -164,7 +164,7 @@ static void native_smp_send_stop(void) | |||
164 | if (reboot_force) | 164 | if (reboot_force) |
165 | return; | 165 | return; |
166 | 166 | ||
167 | smp_call_function(stop_this_cpu, NULL, 0, 0); | 167 | smp_call_function(stop_this_cpu, NULL, 0); |
168 | local_irq_save(flags); | 168 | local_irq_save(flags); |
169 | disable_local_APIC(); | 169 | disable_local_APIC(); |
170 | local_irq_restore(flags); | 170 | local_irq_restore(flags); |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 61efa2f7d564..0a03d57f9b3b 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -278,7 +278,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | |||
278 | { | 278 | { |
279 | long cpu = (long)arg; | 279 | long cpu = (long)arg; |
280 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | 280 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
281 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); | 281 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); |
282 | return NOTIFY_DONE; | 282 | return NOTIFY_DONE; |
283 | } | 283 | } |
284 | 284 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 540e95179074..5534fe59b5fc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx) | |||
335 | { | 335 | { |
336 | if (vmx->vcpu.cpu == -1) | 336 | if (vmx->vcpu.cpu == -1) |
337 | return; | 337 | return; |
338 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); | 338 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); |
339 | vmx->launched = 0; | 339 | vmx->launched = 0; |
340 | } | 340 | } |
341 | 341 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63a77caa59f1..0faa2546b1cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |||
4044 | * So need not to call smp_call_function_single() in that case. | 4044 | * So need not to call smp_call_function_single() in that case. |
4045 | */ | 4045 | */ |
4046 | if (vcpu->guest_mode && vcpu->cpu != cpu) | 4046 | if (vcpu->guest_mode && vcpu->cpu != cpu) |
4047 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); | 4047 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); |
4048 | put_cpu(); | 4048 | put_cpu(); |
4049 | } | 4049 | } |
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 57d043fa893e..d5a2b39f882b 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
@@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | |||
30 | 30 | ||
31 | rv.msr_no = msr_no; | 31 | rv.msr_no = msr_no; |
32 | if (safe) { | 32 | if (safe) { |
33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); | 33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); |
34 | err = rv.err; | 34 | err = rv.err; |
35 | } else { | 35 | } else { |
36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); | 36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); |
37 | } | 37 | } |
38 | *l = rv.l; | 38 | *l = rv.l; |
39 | *h = rv.h; | 39 | *h = rv.h; |
@@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | |||
64 | rv.l = l; | 64 | rv.l = l; |
65 | rv.h = h; | 65 | rv.h = h; |
66 | if (safe) { | 66 | if (safe) { |
67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); | 67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); |
68 | err = rv.err; | 68 | err = rv.err; |
69 | } else { | 69 | } else { |
70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); | 70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); |
71 | } | 71 | } |
72 | 72 | ||
73 | return err; | 73 | return err; |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index cb34407a9930..04f596eab749 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -1113,7 +1113,7 @@ int safe_smp_processor_id(void) | |||
1113 | /* broadcast a halt to all other CPUs */ | 1113 | /* broadcast a halt to all other CPUs */ |
1114 | static void voyager_smp_send_stop(void) | 1114 | static void voyager_smp_send_stop(void) |
1115 | { | 1115 | { |
1116 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | 1116 | smp_call_function(smp_stop_cpu_function, NULL, 1); |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | /* this function is triggered in time.c when a clock tick fires | 1119 | /* this function is triggered in time.c when a clock tick fires |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index b3786e749b8e..a1651d029ea8 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -331,7 +331,7 @@ static void stop_self(void *v) | |||
331 | 331 | ||
332 | void xen_smp_send_stop(void) | 332 | void xen_smp_send_stop(void) |
333 | { | 333 | { |
334 | smp_call_function(stop_self, NULL, 0, 0); | 334 | smp_call_function(stop_self, NULL, 0); |
335 | } | 335 | } |
336 | 336 | ||
337 | void xen_smp_send_reschedule(int cpu) | 337 | void xen_smp_send_reschedule(int cpu) |