aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig23
-rw-r--r--arch/x86/kernel/acpi/sleep.c30
-rw-r--r--arch/x86/kernel/apic_32.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/non-fatal.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c4
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/io_apic_32.c2
-rw-r--r--arch/x86/kernel/io_apic_64.c2
-rw-r--r--arch/x86/kernel/irqinit_64.c4
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/microcode.c4
-rw-r--r--arch/x86/kernel/nmi.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/smp.c158
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/smpcommon.c56
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/tlb_32.c2
-rw-r--r--arch/x86/kernel/tlb_64.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c4
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lib/msr-on-cpu.c8
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c98
-rw-r--r--arch/x86/mm/pageattr.c4
-rw-r--r--arch/x86/oprofile/nmi_int.c10
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/smp.c135
-rw-r--r--arch/x86/xen/xen-ops.h9
36 files changed, 182 insertions, 428 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6958d6bcaf70..96e0c2ebc388 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -170,6 +170,7 @@ config GENERIC_PENDING_IRQ
170config X86_SMP 170config X86_SMP
171 bool 171 bool
172 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) 172 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
173 select USE_GENERIC_SMP_HELPERS
173 default y 174 default y
174 175
175config X86_32_SMP 176config X86_32_SMP
@@ -447,7 +448,6 @@ config PARAVIRT_DEBUG
447config MEMTEST 448config MEMTEST
448 bool "Memtest" 449 bool "Memtest"
449 depends on X86_64 450 depends on X86_64
450 default y
451 help 451 help
452 This option adds a kernel parameter 'memtest', which allows memtest 452 This option adds a kernel parameter 'memtest', which allows memtest
453 to be set. 453 to be set.
@@ -455,7 +455,7 @@ config MEMTEST
455 memtest=1, mean do 1 test pattern; 455 memtest=1, mean do 1 test pattern;
456 ... 456 ...
457 memtest=4, mean do 4 test patterns. 457 memtest=4, mean do 4 test patterns.
458 If you are unsure how to answer this question, answer Y. 458 If you are unsure how to answer this question, answer N.
459 459
460config X86_SUMMIT_NUMA 460config X86_SUMMIT_NUMA
461 def_bool y 461 def_bool y
@@ -1135,21 +1135,18 @@ config MTRR
1135 See <file:Documentation/mtrr.txt> for more information. 1135 See <file:Documentation/mtrr.txt> for more information.
1136 1136
1137config MTRR_SANITIZER 1137config MTRR_SANITIZER
1138 def_bool y 1138 bool
1139 prompt "MTRR cleanup support" 1139 prompt "MTRR cleanup support"
1140 depends on MTRR 1140 depends on MTRR
1141 help 1141 help
1142 Convert MTRR layout from continuous to discrete, so some X driver 1142 Convert MTRR layout from continuous to discrete, so X drivers can
1143 could add WB entries. 1143 add writeback entries.
1144 1144
1145 Say N here if you see bootup problems (boot crash, boot hang, 1145 Can be disabled with disable_mtrr_cleanup on the kernel command line.
1146 spontaneous reboots). 1146 The largest mtrr entry size for a continous block can be set with
1147 mtrr_chunk_size.
1147 1148
1148 Could be disabled with disable_mtrr_cleanup. Also mtrr_chunk_size 1149 If unsure, say N.
1149 could be used to send largest mtrr entry size for continuous block
1150 to hold holes (aka. UC entries)
1151
1152 If unsure, say Y.
1153 1150
1154config MTRR_SANITIZER_ENABLE_DEFAULT 1151config MTRR_SANITIZER_ENABLE_DEFAULT
1155 int "MTRR cleanup enable value (0-1)" 1152 int "MTRR cleanup enable value (0-1)"
@@ -1166,7 +1163,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
1166 depends on MTRR_SANITIZER 1163 depends on MTRR_SANITIZER
1167 help 1164 help
1168 mtrr cleanup spare entries default, it can be changed via 1165 mtrr cleanup spare entries default, it can be changed via
1169 mtrr_spare_reg_nr= 1166 mtrr_spare_reg_nr=N on the kernel command line.
1170 1167
1171config X86_PAT 1168config X86_PAT
1172 bool 1169 bool
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index e6a4b564ccaa..793ad2045f58 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -23,6 +23,15 @@ static unsigned long acpi_realmode;
23static char temp_stack[10240]; 23static char temp_stack[10240];
24#endif 24#endif
25 25
26/* XXX: this macro should move to asm-x86/segment.h and be shared with the
27 boot code... */
28#define GDT_ENTRY(flags, base, limit) \
29 (((u64)(base & 0xff000000) << 32) | \
30 ((u64)flags << 40) | \
31 ((u64)(limit & 0x00ff0000) << 32) | \
32 ((u64)(base & 0x00ffffff) << 16) | \
33 ((u64)(limit & 0x0000ffff)))
34
26/** 35/**
27 * acpi_save_state_mem - save kernel state 36 * acpi_save_state_mem - save kernel state
28 * 37 *
@@ -51,18 +60,27 @@ int acpi_save_state_mem(void)
51 header->video_mode = saved_video_mode; 60 header->video_mode = saved_video_mode;
52 61
53 header->wakeup_jmp_seg = acpi_wakeup_address >> 4; 62 header->wakeup_jmp_seg = acpi_wakeup_address >> 4;
63
64 /*
65 * Set up the wakeup GDT. We set these up as Big Real Mode,
66 * that is, with limits set to 4 GB. At least the Lenovo
67 * Thinkpad X61 is known to need this for the video BIOS
68 * initialization quirk to work; this is likely to also
69 * be the case for other laptops or integrated video devices.
70 */
71
54 /* GDT[0]: GDT self-pointer */ 72 /* GDT[0]: GDT self-pointer */
55 header->wakeup_gdt[0] = 73 header->wakeup_gdt[0] =
56 (u64)(sizeof(header->wakeup_gdt) - 1) + 74 (u64)(sizeof(header->wakeup_gdt) - 1) +
57 ((u64)(acpi_wakeup_address + 75 ((u64)(acpi_wakeup_address +
58 ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) 76 ((char *)&header->wakeup_gdt - (char *)acpi_realmode))
59 << 16); 77 << 16);
60 /* GDT[1]: real-mode-like code segment */ 78 /* GDT[1]: big real mode-like code segment */
61 header->wakeup_gdt[1] = (0x009bULL << 40) + 79 header->wakeup_gdt[1] =
62 ((u64)acpi_wakeup_address << 16) + 0xffff; 80 GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
63 /* GDT[2]: real-mode-like data segment */ 81 /* GDT[2]: big real mode-like data segment */
64 header->wakeup_gdt[2] = (0x0093ULL << 40) + 82 header->wakeup_gdt[2] =
65 ((u64)acpi_wakeup_address << 16) + 0xffff; 83 GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);
66 84
67#ifndef CONFIG_64BIT 85#ifndef CONFIG_64BIT
68 store_gdt((struct desc_ptr *)&header->pmode_gdt); 86 store_gdt((struct desc_ptr *)&header->pmode_gdt);
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 3e58b676d23b..a437d027f20b 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1340,6 +1340,10 @@ void __init smp_intr_init(void)
1340 1340
1341 /* IPI for generic function call */ 1341 /* IPI for generic function call */
1342 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 1342 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1343
1344 /* IPI for single call function */
1345 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
1346 call_function_single_interrupt);
1343} 1347}
1344#endif 1348#endif
1345 1349
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 987410745182..c4a7ec31394c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -364,7 +364,7 @@ static void mcheck_check_cpu(void *info)
364 364
365static void mcheck_timer(struct work_struct *work) 365static void mcheck_timer(struct work_struct *work)
366{ 366{
367 on_each_cpu(mcheck_check_cpu, NULL, 1, 1); 367 on_each_cpu(mcheck_check_cpu, NULL, 1);
368 368
369 /* 369 /*
370 * Alert userspace if needed. If we logged an MCE, reduce the 370 * Alert userspace if needed. If we logged an MCE, reduce the
@@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
621 * Collect entries that were still getting written before the 621 * Collect entries that were still getting written before the
622 * synchronize. 622 * synchronize.
623 */ 623 */
624 on_each_cpu(collect_tscs, cpu_tsc, 1, 1); 624 on_each_cpu(collect_tscs, cpu_tsc, 1);
625 for (i = next; i < MCE_LOG_LEN; i++) { 625 for (i = next; i < MCE_LOG_LEN; i++) {
626 if (mcelog.entry[i].finished && 626 if (mcelog.entry[i].finished &&
627 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 627 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
@@ -746,7 +746,7 @@ static void mce_restart(void)
746 if (next_interval) 746 if (next_interval)
747 cancel_delayed_work(&mcheck_work); 747 cancel_delayed_work(&mcheck_work);
748 /* Timer race is harmless here */ 748 /* Timer race is harmless here */
749 on_each_cpu(mce_init, NULL, 1, 1); 749 on_each_cpu(mce_init, NULL, 1);
750 next_interval = check_interval * HZ; 750 next_interval = check_interval * HZ;
751 if (next_interval) 751 if (next_interval)
752 schedule_delayed_work(&mcheck_work, 752 schedule_delayed_work(&mcheck_work,
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c
index 00ccb6c14ec2..cc1fccdd31e0 100644
--- a/arch/x86/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c
@@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
59 59
60static void mce_work_fn(struct work_struct *work) 60static void mce_work_fn(struct work_struct *work)
61{ 61{
62 on_each_cpu(mce_checkregs, NULL, 1, 1); 62 on_each_cpu(mce_checkregs, NULL, 1);
63 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); 63 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
64} 64}
65 65
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 105afe12beb0..6f23969c8faf 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -223,7 +223,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
223 atomic_set(&data.gate,0); 223 atomic_set(&data.gate,0);
224 224
225 /* Start the ball rolling on other CPUs */ 225 /* Start the ball rolling on other CPUs */
226 if (smp_call_function(ipi_handler, &data, 1, 0) != 0) 226 if (smp_call_function(ipi_handler, &data, 0) != 0)
227 panic("mtrr: timed out waiting for other CPUs\n"); 227 panic("mtrr: timed out waiting for other CPUs\n");
228 228
229 local_irq_save(flags); 229 local_irq_save(flags);
@@ -1682,7 +1682,7 @@ void mtrr_ap_init(void)
1682 */ 1682 */
1683void mtrr_save_state(void) 1683void mtrr_save_state(void)
1684{ 1684{
1685 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); 1685 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
1686} 1686}
1687 1687
1688static int __init mtrr_init_finialize(void) 1688static int __init mtrr_init_finialize(void)
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 2e9bef6e3aa3..6d4bdc02388a 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -189,7 +189,7 @@ void disable_lapic_nmi_watchdog(void)
189 if (atomic_read(&nmi_active) <= 0) 189 if (atomic_read(&nmi_active) <= 0)
190 return; 190 return;
191 191
192 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); 192 on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
193 193
194 if (wd_ops) 194 if (wd_ops)
195 wd_ops->unreserve(); 195 wd_ops->unreserve();
@@ -213,7 +213,7 @@ void enable_lapic_nmi_watchdog(void)
213 return; 213 return;
214 } 214 }
215 215
216 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); 216 on_each_cpu(setup_apic_nmi_watchdog, NULL, 1);
217 touch_nmi_watchdog(); 217 touch_nmi_watchdog();
218} 218}
219 219
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 71f1c2654bec..2de5fa2bbf77 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -96,7 +96,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
96 for (; count; count -= 16) { 96 for (; count; count -= 16) {
97 cmd.eax = pos; 97 cmd.eax = pos;
98 cmd.ecx = pos >> 32; 98 cmd.ecx = pos >> 32;
99 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); 99 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
100 if (copy_to_user(tmp, &cmd, 16)) 100 if (copy_to_user(tmp, &cmd, 16))
101 return -EFAULT; 101 return -EFAULT;
102 tmp += 16; 102 tmp += 16;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index ba41bf42748d..ae63e584c340 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -816,6 +816,9 @@ END(invalidate_interrupt\num)
816ENTRY(call_function_interrupt) 816ENTRY(call_function_interrupt)
817 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt 817 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
818END(call_function_interrupt) 818END(call_function_interrupt)
819ENTRY(call_function_single_interrupt)
820 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
821END(call_function_single_interrupt)
819ENTRY(irq_move_cleanup_interrupt) 822ENTRY(irq_move_cleanup_interrupt)
820 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt 823 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
821END(irq_move_cleanup_interrupt) 824END(irq_move_cleanup_interrupt)
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 603261a5885c..558abf4c796a 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -1569,7 +1569,7 @@ void /*__init*/ print_local_APIC(void *dummy)
1569 1569
1570void print_all_local_APICs(void) 1570void print_all_local_APICs(void)
1571{ 1571{
1572 on_each_cpu(print_local_APIC, NULL, 1, 1); 1572 on_each_cpu(print_local_APIC, NULL, 1);
1573} 1573}
1574 1574
1575void /*__init*/ print_PIC(void) 1575void /*__init*/ print_PIC(void)
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index b16ef029cf88..6510cde36b35 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1160,7 +1160,7 @@ void __apicdebuginit print_local_APIC(void * dummy)
1160 1160
1161void print_all_local_APICs (void) 1161void print_all_local_APICs (void)
1162{ 1162{
1163 on_each_cpu(print_local_APIC, NULL, 1, 1); 1163 on_each_cpu(print_local_APIC, NULL, 1);
1164} 1164}
1165 1165
1166void __apicdebuginit print_PIC(void) 1166void __apicdebuginit print_PIC(void)
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 31f49e8f46a7..0373e88de95a 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -199,6 +199,10 @@ void __init native_init_IRQ(void)
199 /* IPI for generic function call */ 199 /* IPI for generic function call */
200 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 200 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
201 201
202 /* IPI for generic single function call */
203 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
204 call_function_single_interrupt);
205
202 /* Low priority IPI to cleanup after moving an irq */ 206 /* Low priority IPI to cleanup after moving an irq */
203 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 207 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
204#endif 208#endif
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 21f2bae98c15..a8449571858a 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
68 load_LDT(pc); 68 load_LDT(pc);
69 mask = cpumask_of_cpu(smp_processor_id()); 69 mask = cpumask_of_cpu(smp_processor_id());
70 if (!cpus_equal(current->mm->cpu_vm_mask, mask)) 70 if (!cpus_equal(current->mm->cpu_vm_mask, mask))
71 smp_call_function(flush_ldt, current->mm, 1, 1); 71 smp_call_function(flush_ldt, current->mm, 1);
72 preempt_enable(); 72 preempt_enable();
73#else 73#else
74 load_LDT(pc); 74 load_LDT(pc);
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index f47ba8156f3e..56b933119a04 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -491,7 +491,7 @@ MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
491#define microcode_dev_exit() do { } while(0) 491#define microcode_dev_exit() do { } while(0)
492#endif 492#endif
493 493
494static long get_next_ucode_from_buffer(void **mc, void *buf, 494static long get_next_ucode_from_buffer(void **mc, const u8 *buf,
495 unsigned long size, long offset) 495 unsigned long size, long offset)
496{ 496{
497 microcode_header_t *mc_header; 497 microcode_header_t *mc_header;
@@ -525,7 +525,7 @@ static int cpu_request_microcode(int cpu)
525 char name[30]; 525 char name[30];
526 struct cpuinfo_x86 *c = &cpu_data(cpu); 526 struct cpuinfo_x86 *c = &cpu_data(cpu);
527 const struct firmware *firmware; 527 const struct firmware *firmware;
528 void *buf; 528 const u8 *buf;
529 unsigned long size; 529 unsigned long size;
530 long offset = 0; 530 long offset = 0;
531 int error; 531 int error;
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 716b89284be0..ec024b3baad0 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -130,7 +130,7 @@ int __init check_nmi_watchdog(void)
130 130
131#ifdef CONFIG_SMP 131#ifdef CONFIG_SMP
132 if (nmi_watchdog == NMI_LOCAL_APIC) 132 if (nmi_watchdog == NMI_LOCAL_APIC)
133 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 133 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
134#endif 134#endif
135 135
136 for_each_possible_cpu(cpu) 136 for_each_possible_cpu(cpu)
@@ -272,7 +272,7 @@ static void __acpi_nmi_enable(void *__unused)
272void acpi_nmi_enable(void) 272void acpi_nmi_enable(void)
273{ 273{
274 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) 274 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
275 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); 275 on_each_cpu(__acpi_nmi_enable, NULL, 1);
276} 276}
277 277
278static void __acpi_nmi_disable(void *__unused) 278static void __acpi_nmi_disable(void *__unused)
@@ -286,7 +286,7 @@ static void __acpi_nmi_disable(void *__unused)
286void acpi_nmi_disable(void) 286void acpi_nmi_disable(void)
287{ 287{
288 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) 288 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
289 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); 289 on_each_cpu(__acpi_nmi_disable, NULL, 1);
290} 290}
291 291
292void setup_apic_nmi_watchdog(void *unused) 292void setup_apic_nmi_watchdog(void *unused)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 4061d63aabe7..7dceea947232 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -132,7 +132,7 @@ void cpu_idle_wait(void)
132{ 132{
133 smp_mb(); 133 smp_mb();
134 /* kick all the CPUs so that they exit out of pm_idle */ 134 /* kick all the CPUs so that they exit out of pm_idle */
135 smp_call_function(do_nothing, NULL, 0, 1); 135 smp_call_function(do_nothing, NULL, 1);
136} 136}
137EXPORT_SYMBOL_GPL(cpu_idle_wait); 137EXPORT_SYMBOL_GPL(cpu_idle_wait);
138 138
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 79bdcd11c66e..d13858818100 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -266,6 +266,8 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
266 hpet_print_force_info(); 266 hpet_print_force_info();
267} 267}
268 268
269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
270 old_ich_force_enable_hpet_user);
269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, 271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
270 old_ich_force_enable_hpet_user); 272 old_ich_force_enable_hpet_user);
271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, 273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 0cb7aadc87cd..361b7a4c640c 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu)
121 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
122} 122}
123 123
124/* 124void native_send_call_func_single_ipi(int cpu)
125 * Structure and data for smp_call_function(). This is designed to minimise
126 * static memory requirements. It also looks cleaner.
127 */
128static DEFINE_SPINLOCK(call_lock);
129
130struct call_data_struct {
131 void (*func) (void *info);
132 void *info;
133 atomic_t started;
134 atomic_t finished;
135 int wait;
136};
137
138void lock_ipi_call_lock(void)
139{ 125{
140 spin_lock_irq(&call_lock); 126 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
141}
142
143void unlock_ipi_call_lock(void)
144{
145 spin_unlock_irq(&call_lock);
146}
147
148static struct call_data_struct *call_data;
149
150static void __smp_call_function(void (*func) (void *info), void *info,
151 int nonatomic, int wait)
152{
153 struct call_data_struct data;
154 int cpus = num_online_cpus() - 1;
155
156 if (!cpus)
157 return;
158
159 data.func = func;
160 data.info = info;
161 atomic_set(&data.started, 0);
162 data.wait = wait;
163 if (wait)
164 atomic_set(&data.finished, 0);
165
166 call_data = &data;
167 mb();
168
169 /* Send a message to all other CPUs and wait for them to respond */
170 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
171
172 /* Wait for response */
173 while (atomic_read(&data.started) != cpus)
174 cpu_relax();
175
176 if (wait)
177 while (atomic_read(&data.finished) != cpus)
178 cpu_relax();
179} 127}
180 128
181 129void native_send_call_func_ipi(cpumask_t mask)
182/**
183 * smp_call_function_mask(): Run a function on a set of other CPUs.
184 * @mask: The set of cpus to run on. Must not include the current cpu.
185 * @func: The function to run. This must be fast and non-blocking.
186 * @info: An arbitrary pointer to pass to the function.
187 * @wait: If true, wait (atomically) until function has completed on other CPUs.
188 *
189 * Returns 0 on success, else a negative status code.
190 *
191 * If @wait is true, then returns once @func has returned; otherwise
192 * it returns just before the target cpu calls @func.
193 *
194 * You must not call this function with disabled interrupts or from a
195 * hardware interrupt handler or from a bottom half handler.
196 */
197static int
198native_smp_call_function_mask(cpumask_t mask,
199 void (*func)(void *), void *info,
200 int wait)
201{ 130{
202 struct call_data_struct data;
203 cpumask_t allbutself; 131 cpumask_t allbutself;
204 int cpus;
205
206 /* Can deadlock when called with interrupts disabled */
207 WARN_ON(irqs_disabled());
208
209 /* Holding any lock stops cpus from going down. */
210 spin_lock(&call_lock);
211 132
212 allbutself = cpu_online_map; 133 allbutself = cpu_online_map;
213 cpu_clear(smp_processor_id(), allbutself); 134 cpu_clear(smp_processor_id(), allbutself);
214 135
215 cpus_and(mask, mask, allbutself);
216 cpus = cpus_weight(mask);
217
218 if (!cpus) {
219 spin_unlock(&call_lock);
220 return 0;
221 }
222
223 data.func = func;
224 data.info = info;
225 atomic_set(&data.started, 0);
226 data.wait = wait;
227 if (wait)
228 atomic_set(&data.finished, 0);
229
230 call_data = &data;
231 wmb();
232
233 /* Send a message to other CPUs */
234 if (cpus_equal(mask, allbutself) && 136 if (cpus_equal(mask, allbutself) &&
235 cpus_equal(cpu_online_map, cpu_callout_map)) 137 cpus_equal(cpu_online_map, cpu_callout_map))
236 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 138 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
237 else 139 else
238 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 140 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
239
240 /* Wait for response */
241 while (atomic_read(&data.started) != cpus)
242 cpu_relax();
243
244 if (wait)
245 while (atomic_read(&data.finished) != cpus)
246 cpu_relax();
247 spin_unlock(&call_lock);
248
249 return 0;
250} 141}
251 142
252static void stop_this_cpu(void *dummy) 143static void stop_this_cpu(void *dummy)
@@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy)
268 159
269static void native_smp_send_stop(void) 160static void native_smp_send_stop(void)
270{ 161{
271 int nolock;
272 unsigned long flags; 162 unsigned long flags;
273 163
274 if (reboot_force) 164 if (reboot_force)
275 return; 165 return;
276 166
277 /* Don't deadlock on the call lock in panic */ 167 smp_call_function(stop_this_cpu, NULL, 0);
278 nolock = !spin_trylock(&call_lock);
279 local_irq_save(flags); 168 local_irq_save(flags);
280 __smp_call_function(stop_this_cpu, NULL, 0, 0);
281 if (!nolock)
282 spin_unlock(&call_lock);
283 disable_local_APIC(); 169 disable_local_APIC();
284 local_irq_restore(flags); 170 local_irq_restore(flags);
285} 171}
@@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
301 187
302void smp_call_function_interrupt(struct pt_regs *regs) 188void smp_call_function_interrupt(struct pt_regs *regs)
303{ 189{
304 void (*func) (void *info) = call_data->func;
305 void *info = call_data->info;
306 int wait = call_data->wait;
307
308 ack_APIC_irq(); 190 ack_APIC_irq();
309 /*
310 * Notify initiating CPU that I've grabbed the data and am
311 * about to execute the function
312 */
313 mb();
314 atomic_inc(&call_data->started);
315 /*
316 * At this point the info structure may be out of scope unless wait==1
317 */
318 irq_enter(); 191 irq_enter();
319 (*func)(info); 192 generic_smp_call_function_interrupt();
320#ifdef CONFIG_X86_32 193#ifdef CONFIG_X86_32
321 __get_cpu_var(irq_stat).irq_call_count++; 194 __get_cpu_var(irq_stat).irq_call_count++;
322#else 195#else
323 add_pda(irq_call_count, 1); 196 add_pda(irq_call_count, 1);
324#endif 197#endif
325 irq_exit(); 198 irq_exit();
199}
326 200
327 if (wait) { 201void smp_call_function_single_interrupt(struct pt_regs *regs)
328 mb(); 202{
329 atomic_inc(&call_data->finished); 203 ack_APIC_irq();
330 } 204 irq_enter();
205 generic_smp_call_function_single_interrupt();
206#ifdef CONFIG_X86_32
207 __get_cpu_var(irq_stat).irq_call_count++;
208#else
209 add_pda(irq_call_count, 1);
210#endif
211 irq_exit();
331} 212}
332 213
333struct smp_ops smp_ops = { 214struct smp_ops smp_ops = {
@@ -338,7 +219,8 @@ struct smp_ops smp_ops = {
338 219
339 .smp_send_stop = native_smp_send_stop, 220 .smp_send_stop = native_smp_send_stop,
340 .smp_send_reschedule = native_smp_send_reschedule, 221 .smp_send_reschedule = native_smp_send_reschedule,
341 .smp_call_function_mask = native_smp_call_function_mask, 222
223 .send_call_func_ipi = native_send_call_func_ipi,
224 .send_call_func_single_ipi = native_send_call_func_single_ipi,
342}; 225};
343EXPORT_SYMBOL_GPL(smp_ops); 226EXPORT_SYMBOL_GPL(smp_ops);
344
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f35c2d8016ac..687376ab07e8 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -327,12 +327,12 @@ static void __cpuinit start_secondary(void *unused)
327 * lock helps us to not include this cpu in a currently in progress 327 * lock helps us to not include this cpu in a currently in progress
328 * smp_call_function(). 328 * smp_call_function().
329 */ 329 */
330 lock_ipi_call_lock(); 330 ipi_call_lock_irq();
331#ifdef CONFIG_X86_IO_APIC 331#ifdef CONFIG_X86_IO_APIC
332 setup_vector_irq(smp_processor_id()); 332 setup_vector_irq(smp_processor_id());
333#endif 333#endif
334 cpu_set(smp_processor_id(), cpu_online_map); 334 cpu_set(smp_processor_id(), cpu_online_map);
335 unlock_ipi_call_lock(); 335 ipi_call_unlock_irq();
336 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 336 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
337 337
338 setup_secondary_clock(); 338 setup_secondary_clock();
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 3449064d141a..99941b37eca0 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu)
25 per_cpu(cpu_number, cpu) = cpu; 25 per_cpu(cpu_number, cpu) = cpu;
26} 26}
27#endif 27#endif
28
29/**
30 * smp_call_function(): Run a function on all other CPUs.
31 * @func: The function to run. This must be fast and non-blocking.
32 * @info: An arbitrary pointer to pass to the function.
33 * @nonatomic: Unused.
34 * @wait: If true, wait (atomically) until function has completed on other CPUs.
35 *
36 * Returns 0 on success, else a negative status code.
37 *
38 * If @wait is true, then returns once @func has returned; otherwise
39 * it returns just before the target cpu calls @func.
40 *
41 * You must not call this function with disabled interrupts or from a
42 * hardware interrupt handler or from a bottom half handler.
43 */
44int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
45 int wait)
46{
47 return smp_call_function_mask(cpu_online_map, func, info, wait);
48}
49EXPORT_SYMBOL(smp_call_function);
50
51/**
52 * smp_call_function_single - Run a function on a specific CPU
53 * @cpu: The target CPU. Cannot be the calling CPU.
54 * @func: The function to run. This must be fast and non-blocking.
55 * @info: An arbitrary pointer to pass to the function.
56 * @nonatomic: Unused.
57 * @wait: If true, wait until function has completed on other CPUs.
58 *
59 * Returns 0 on success, else a negative status code.
60 *
61 * If @wait is true, then returns once @func has returned; otherwise
62 * it returns just before the target cpu calls @func.
63 */
64int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
65 int nonatomic, int wait)
66{
67 /* prevent preemption and reschedule on another processor */
68 int ret;
69 int me = get_cpu();
70 if (cpu == me) {
71 local_irq_disable();
72 func(info);
73 local_irq_enable();
74 put_cpu();
75 return 0;
76 }
77
78 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
79
80 put_cpu();
81 return ret;
82}
83EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index c28c342c162f..a03e7f6d90c3 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -74,6 +74,7 @@ void save_stack_trace(struct stack_trace *trace)
74 if (trace->nr_entries < trace->max_entries) 74 if (trace->nr_entries < trace->max_entries)
75 trace->entries[trace->nr_entries++] = ULONG_MAX; 75 trace->entries[trace->nr_entries++] = ULONG_MAX;
76} 76}
77EXPORT_SYMBOL_GPL(save_stack_trace);
77 78
78void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 79void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
79{ 80{
@@ -81,3 +82,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
81 if (trace->nr_entries < trace->max_entries) 82 if (trace->nr_entries < trace->max_entries)
82 trace->entries[trace->nr_entries++] = ULONG_MAX; 83 trace->entries[trace->nr_entries++] = ULONG_MAX;
83} 84}
85EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index 9bb2363851af..fec1ecedc9b7 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info)
238 238
239void flush_tlb_all(void) 239void flush_tlb_all(void)
240{ 240{
241 on_each_cpu(do_flush_tlb_all, NULL, 1, 1); 241 on_each_cpu(do_flush_tlb_all, NULL, 1);
242} 242}
243 243
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index 5039d0f097a2..dcbf7a1159ea 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -275,5 +275,5 @@ static void do_flush_tlb_all(void *info)
275 275
276void flush_tlb_all(void) 276void flush_tlb_all(void)
277{ 277{
278 on_each_cpu(do_flush_tlb_all, NULL, 1, 1); 278 on_each_cpu(do_flush_tlb_all, NULL, 1);
279} 279}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3c36f92160c9..7603c0553909 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -358,6 +358,7 @@ static cycle_t read_tsc(void)
358 ret : clocksource_tsc.cycle_last; 358 ret : clocksource_tsc.cycle_last;
359} 359}
360 360
361#ifdef CONFIG_X86_64
361static cycle_t __vsyscall_fn vread_tsc(void) 362static cycle_t __vsyscall_fn vread_tsc(void)
362{ 363{
363 cycle_t ret = (cycle_t)vget_cycles(); 364 cycle_t ret = (cycle_t)vget_cycles();
@@ -365,6 +366,7 @@ static cycle_t __vsyscall_fn vread_tsc(void)
365 return ret >= __vsyscall_gtod_data.clock.cycle_last ? 366 return ret >= __vsyscall_gtod_data.clock.cycle_last ?
366 ret : __vsyscall_gtod_data.clock.cycle_last; 367 ret : __vsyscall_gtod_data.clock.cycle_last;
367} 368}
369#endif
368 370
369static struct clocksource clocksource_tsc = { 371static struct clocksource clocksource_tsc = {
370 .name = "tsc", 372 .name = "tsc",
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index e50740d32314..0b8b6690a86d 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -279,7 +279,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
279{ 279{
280 long cpu = (long)arg; 280 long cpu = (long)arg;
281 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 281 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
282 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); 282 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
283 return NOTIFY_DONE; 283 return NOTIFY_DONE;
284} 284}
285 285
@@ -302,7 +302,7 @@ static int __init vsyscall_init(void)
302#ifdef CONFIG_SYSCTL 302#ifdef CONFIG_SYSCTL
303 register_sysctl_table(kernel_root_table2); 303 register_sysctl_table(kernel_root_table2);
304#endif 304#endif
305 on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); 305 on_each_cpu(cpu_vsyscall_init, NULL, 1);
306 hotcpu_notifier(cpu_vsyscall_notifier, 0); 306 hotcpu_notifier(cpu_vsyscall_notifier, 0);
307 return 0; 307 return 0;
308} 308}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 540e95179074..10ce6ee4c491 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
335{ 335{
336 if (vmx->vcpu.cpu == -1) 336 if (vmx->vcpu.cpu == -1)
337 return; 337 return;
338 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); 338 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
339 vmx->launched = 0; 339 vmx->launched = 0;
340} 340}
341 341
@@ -2968,7 +2968,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2968 struct vcpu_vmx *vmx = to_vmx(vcpu); 2968 struct vcpu_vmx *vmx = to_vmx(vcpu);
2969 2969
2970 if (vmx->vmcs) { 2970 if (vmx->vmcs) {
2971 on_each_cpu(__vcpu_clear, vmx, 0, 1); 2971 on_each_cpu(__vcpu_clear, vmx, 1);
2972 free_vmcs(vmx->vmcs); 2972 free_vmcs(vmx->vmcs);
2973 vmx->vmcs = NULL; 2973 vmx->vmcs = NULL;
2974 } 2974 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63a77caa59f1..0faa2546b1cd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4044 * So need not to call smp_call_function_single() in that case. 4044 * So need not to call smp_call_function_single() in that case.
4045 */ 4045 */
4046 if (vcpu->guest_mode && vcpu->cpu != cpu) 4046 if (vcpu->guest_mode && vcpu->cpu != cpu)
4047 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); 4047 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
4048 put_cpu(); 4048 put_cpu();
4049} 4049}
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index 57d043fa893e..d5a2b39f882b 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
30 30
31 rv.msr_no = msr_no; 31 rv.msr_no = msr_no;
32 if (safe) { 32 if (safe) {
33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); 33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
34 err = rv.err; 34 err = rv.err;
35 } else { 35 } else {
36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); 36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
37 } 37 }
38 *l = rv.l; 38 *l = rv.l;
39 *h = rv.h; 39 *h = rv.h;
@@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
64 rv.l = l; 64 rv.l = l;
65 rv.h = h; 65 rv.h = h;
66 if (safe) { 66 if (safe) {
67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); 67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
68 err = rv.err; 68 err = rv.err;
69 } else { 69 } else {
70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); 70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
71 } 71 }
72 72
73 return err; 73 return err;
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 8dedd01e909f..ee0fba092157 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -950,94 +950,24 @@ static void smp_stop_cpu_function(void *dummy)
950 halt(); 950 halt();
951} 951}
952 952
953static DEFINE_SPINLOCK(call_lock);
954
955struct call_data_struct {
956 void (*func) (void *info);
957 void *info;
958 volatile unsigned long started;
959 volatile unsigned long finished;
960 int wait;
961};
962
963static struct call_data_struct *call_data;
964
965/* execute a thread on a new CPU. The function to be called must be 953/* execute a thread on a new CPU. The function to be called must be
966 * previously set up. This is used to schedule a function for 954 * previously set up. This is used to schedule a function for
967 * execution on all CPUs - set up the function then broadcast a 955 * execution on all CPUs - set up the function then broadcast a
968 * function_interrupt CPI to come here on each CPU */ 956 * function_interrupt CPI to come here on each CPU */
969static void smp_call_function_interrupt(void) 957static void smp_call_function_interrupt(void)
970{ 958{
971 void (*func) (void *info) = call_data->func;
972 void *info = call_data->info;
973 /* must take copy of wait because call_data may be replaced
974 * unless the function is waiting for us to finish */
975 int wait = call_data->wait;
976 __u8 cpu = smp_processor_id();
977
978 /*
979 * Notify initiating CPU that I've grabbed the data and am
980 * about to execute the function
981 */
982 mb();
983 if (!test_and_clear_bit(cpu, &call_data->started)) {
984 /* If the bit wasn't set, this could be a replay */
985 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
986 " with no call pending\n", cpu);
987 return;
988 }
989 /*
990 * At this point the info structure may be out of scope unless wait==1
991 */
992 irq_enter(); 959 irq_enter();
993 (*func) (info); 960 generic_smp_call_function_interrupt();
994 __get_cpu_var(irq_stat).irq_call_count++; 961 __get_cpu_var(irq_stat).irq_call_count++;
995 irq_exit(); 962 irq_exit();
996 if (wait) {
997 mb();
998 clear_bit(cpu, &call_data->finished);
999 }
1000} 963}
1001 964
1002static int 965static void smp_call_function_single_interrupt(void)
1003voyager_smp_call_function_mask(cpumask_t cpumask,
1004 void (*func) (void *info), void *info, int wait)
1005{ 966{
1006 struct call_data_struct data; 967 irq_enter();
1007 u32 mask = cpus_addr(cpumask)[0]; 968 generic_smp_call_function_single_interrupt();
1008 969 __get_cpu_var(irq_stat).irq_call_count++;
1009 mask &= ~(1 << smp_processor_id()); 970 irq_exit();
1010
1011 if (!mask)
1012 return 0;
1013
1014 /* Can deadlock when called with interrupts disabled */
1015 WARN_ON(irqs_disabled());
1016
1017 data.func = func;
1018 data.info = info;
1019 data.started = mask;
1020 data.wait = wait;
1021 if (wait)
1022 data.finished = mask;
1023
1024 spin_lock(&call_lock);
1025 call_data = &data;
1026 wmb();
1027 /* Send a message to all other CPUs and wait for them to respond */
1028 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1029
1030 /* Wait for response */
1031 while (data.started)
1032 barrier();
1033
1034 if (wait)
1035 while (data.finished)
1036 barrier();
1037
1038 spin_unlock(&call_lock);
1039
1040 return 0;
1041} 971}
1042 972
1043/* Sorry about the name. In an APIC based system, the APICs 973/* Sorry about the name. In an APIC based system, the APICs
@@ -1094,6 +1024,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs)
1094 smp_call_function_interrupt(); 1024 smp_call_function_interrupt();
1095} 1025}
1096 1026
1027void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
1028{
1029 ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
1030 smp_call_function_single_interrupt();
1031}
1032
1097void smp_vic_cpi_interrupt(struct pt_regs *regs) 1033void smp_vic_cpi_interrupt(struct pt_regs *regs)
1098{ 1034{
1099 struct pt_regs *old_regs = set_irq_regs(regs); 1035 struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1114,6 +1050,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs)
1114 smp_enable_irq_interrupt(); 1050 smp_enable_irq_interrupt();
1115 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) 1051 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1116 smp_call_function_interrupt(); 1052 smp_call_function_interrupt();
1053 if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
1054 smp_call_function_single_interrupt();
1117 set_irq_regs(old_regs); 1055 set_irq_regs(old_regs);
1118} 1056}
1119 1057
@@ -1129,7 +1067,7 @@ static void do_flush_tlb_all(void *info)
1129/* flush the TLB of every active CPU in the system */ 1067/* flush the TLB of every active CPU in the system */
1130void flush_tlb_all(void) 1068void flush_tlb_all(void)
1131{ 1069{
1132 on_each_cpu(do_flush_tlb_all, 0, 1, 1); 1070 on_each_cpu(do_flush_tlb_all, 0, 1);
1133} 1071}
1134 1072
1135/* send a reschedule CPI to one CPU by physical CPU number*/ 1073/* send a reschedule CPI to one CPU by physical CPU number*/
@@ -1161,7 +1099,7 @@ int safe_smp_processor_id(void)
1161/* broadcast a halt to all other CPUs */ 1099/* broadcast a halt to all other CPUs */
1162static void voyager_smp_send_stop(void) 1100static void voyager_smp_send_stop(void)
1163{ 1101{
1164 smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1102 smp_call_function(smp_stop_cpu_function, NULL, 1);
1165} 1103}
1166 1104
1167/* this function is triggered in time.c when a clock tick fires 1105/* this function is triggered in time.c when a clock tick fires
@@ -1848,5 +1786,7 @@ struct smp_ops smp_ops = {
1848 1786
1849 .smp_send_stop = voyager_smp_send_stop, 1787 .smp_send_stop = voyager_smp_send_stop,
1850 .smp_send_reschedule = voyager_smp_send_reschedule, 1788 .smp_send_reschedule = voyager_smp_send_reschedule,
1851 .smp_call_function_mask = voyager_smp_call_function_mask, 1789
1790 .send_call_func_ipi = native_send_call_func_ipi,
1791 .send_call_func_single_ipi = native_send_call_func_single_ipi,
1852}; 1792};
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 47f4e2e4a096..65c6e46bf059 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -141,7 +141,7 @@ static void cpa_flush_all(unsigned long cache)
141{ 141{
142 BUG_ON(irqs_disabled()); 142 BUG_ON(irqs_disabled());
143 143
144 on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); 144 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
145} 145}
146 146
147static void __cpa_flush_range(void *arg) 147static void __cpa_flush_range(void *arg)
@@ -162,7 +162,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
162 BUG_ON(irqs_disabled()); 162 BUG_ON(irqs_disabled());
163 WARN_ON(PAGE_ALIGN(start) != start); 163 WARN_ON(PAGE_ALIGN(start) != start);
164 164
165 on_each_cpu(__cpa_flush_range, NULL, 1, 1); 165 on_each_cpu(__cpa_flush_range, NULL, 1);
166 166
167 if (!cache) 167 if (!cache)
168 return; 168 return;
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 2b6ad5b9f9d5..7f3329b55d2e 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -218,8 +218,8 @@ static int nmi_setup(void)
218 } 218 }
219 219
220 } 220 }
221 on_each_cpu(nmi_save_registers, NULL, 0, 1); 221 on_each_cpu(nmi_save_registers, NULL, 1);
222 on_each_cpu(nmi_cpu_setup, NULL, 0, 1); 222 on_each_cpu(nmi_cpu_setup, NULL, 1);
223 nmi_enabled = 1; 223 nmi_enabled = 1;
224 return 0; 224 return 0;
225} 225}
@@ -271,7 +271,7 @@ static void nmi_shutdown(void)
271{ 271{
272 struct op_msrs *msrs = &get_cpu_var(cpu_msrs); 272 struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
273 nmi_enabled = 0; 273 nmi_enabled = 0;
274 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); 274 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
275 unregister_die_notifier(&profile_exceptions_nb); 275 unregister_die_notifier(&profile_exceptions_nb);
276 model->shutdown(msrs); 276 model->shutdown(msrs);
277 free_msrs(); 277 free_msrs();
@@ -286,7 +286,7 @@ static void nmi_cpu_start(void *dummy)
286 286
287static int nmi_start(void) 287static int nmi_start(void)
288{ 288{
289 on_each_cpu(nmi_cpu_start, NULL, 0, 1); 289 on_each_cpu(nmi_cpu_start, NULL, 1);
290 return 0; 290 return 0;
291} 291}
292 292
@@ -298,7 +298,7 @@ static void nmi_cpu_stop(void *dummy)
298 298
299static void nmi_stop(void) 299static void nmi_stop(void)
300{ 300{
301 on_each_cpu(nmi_cpu_stop, NULL, 0, 1); 301 on_each_cpu(nmi_cpu_stop, NULL, 1);
302} 302}
303 303
304struct op_counter_config counter_config[OP_MAX_COUNTER]; 304struct op_counter_config counter_config[OP_MAX_COUNTER];
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index a18141ae3f02..dbf532369711 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -578,7 +578,7 @@ static int __init enable_pci_io_ecs(void)
578 /* assume all cpus from fam10h have IO ECS */ 578 /* assume all cpus from fam10h have IO ECS */
579 if (boot_cpu_data.x86 < 0x10) 579 if (boot_cpu_data.x86 < 0x10)
580 return 0; 580 return 0;
581 on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1, 1); 581 on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1);
582 pci_probe |= PCI_HAS_IO_ECS; 582 pci_probe |= PCI_HAS_IO_ECS;
583 return 0; 583 return 0;
584} 584}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dcd4e51f2f16..bb508456ef52 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1214,7 +1214,9 @@ static const struct smp_ops xen_smp_ops __initdata = {
1214 1214
1215 .smp_send_stop = xen_smp_send_stop, 1215 .smp_send_stop = xen_smp_send_stop,
1216 .smp_send_reschedule = xen_smp_send_reschedule, 1216 .smp_send_reschedule = xen_smp_send_reschedule,
1217 .smp_call_function_mask = xen_smp_call_function_mask, 1217
1218 .send_call_func_ipi = xen_smp_send_call_function_ipi,
1219 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
1218}; 1220};
1219#endif /* CONFIG_SMP */ 1221#endif /* CONFIG_SMP */
1220 1222
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 42b3b9ed641d..ff0aa74afaa1 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -796,7 +796,7 @@ static void drop_mm_ref(struct mm_struct *mm)
796 } 796 }
797 797
798 if (!cpus_empty(mask)) 798 if (!cpus_empty(mask))
799 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); 799 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
800} 800}
801#else 801#else
802static void drop_mm_ref(struct mm_struct *mm) 802static void drop_mm_ref(struct mm_struct *mm)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d2e3c20127d7..233156f39b7f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -36,27 +36,14 @@
36#include "mmu.h" 36#include "mmu.h"
37 37
38cpumask_t xen_cpu_initialized_map; 38cpumask_t xen_cpu_initialized_map;
39static DEFINE_PER_CPU(int, resched_irq) = -1;
40static DEFINE_PER_CPU(int, callfunc_irq) = -1;
41static DEFINE_PER_CPU(int, debug_irq) = -1;
42
43/*
44 * Structure and data for smp_call_function(). This is designed to minimise
45 * static memory requirements. It also looks cleaner.
46 */
47static DEFINE_SPINLOCK(call_lock);
48 39
49struct call_data_struct { 40static DEFINE_PER_CPU(int, resched_irq);
50 void (*func) (void *info); 41static DEFINE_PER_CPU(int, callfunc_irq);
51 void *info; 42static DEFINE_PER_CPU(int, callfuncsingle_irq);
52 atomic_t started; 43static DEFINE_PER_CPU(int, debug_irq) = -1;
53 atomic_t finished;
54 int wait;
55};
56 44
57static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 45static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
58 46static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
59static struct call_data_struct *call_data;
60 47
61/* 48/*
62 * Reschedule call back. Nothing to do, 49 * Reschedule call back. Nothing to do,
@@ -128,6 +115,17 @@ static int xen_smp_intr_init(unsigned int cpu)
128 goto fail; 115 goto fail;
129 per_cpu(debug_irq, cpu) = rc; 116 per_cpu(debug_irq, cpu) = rc;
130 117
118 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
119 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
120 cpu,
121 xen_call_function_single_interrupt,
122 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
123 callfunc_name,
124 NULL);
125 if (rc < 0)
126 goto fail;
127 per_cpu(callfuncsingle_irq, cpu) = rc;
128
131 return 0; 129 return 0;
132 130
133 fail: 131 fail:
@@ -137,6 +135,9 @@ static int xen_smp_intr_init(unsigned int cpu)
137 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 135 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
138 if (per_cpu(debug_irq, cpu) >= 0) 136 if (per_cpu(debug_irq, cpu) >= 0)
139 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 137 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
138 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
139 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
140
140 return rc; 141 return rc;
141} 142}
142 143
@@ -336,7 +337,7 @@ static void stop_self(void *v)
336 337
337void xen_smp_send_stop(void) 338void xen_smp_send_stop(void)
338{ 339{
339 smp_call_function(stop_self, NULL, 0, 0); 340 smp_call_function(stop_self, NULL, 0);
340} 341}
341 342
342void xen_smp_send_reschedule(int cpu) 343void xen_smp_send_reschedule(int cpu)
@@ -344,7 +345,6 @@ void xen_smp_send_reschedule(int cpu)
344 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 345 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
345} 346}
346 347
347
348static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 348static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
349{ 349{
350 unsigned cpu; 350 unsigned cpu;
@@ -355,83 +355,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
355 xen_send_IPI_one(cpu, vector); 355 xen_send_IPI_one(cpu, vector);
356} 356}
357 357
358void xen_smp_send_call_function_ipi(cpumask_t mask)
359{
360 int cpu;
361
362 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
363
364 /* Make sure other vcpus get a chance to run if they need to. */
365 for_each_cpu_mask(cpu, mask) {
366 if (xen_vcpu_stolen(cpu)) {
367 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
368 break;
369 }
370 }
371}
372
373void xen_smp_send_call_function_single_ipi(int cpu)
374{
375 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
376}
377
358static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 378static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
359{ 379{
360 void (*func) (void *info) = call_data->func;
361 void *info = call_data->info;
362 int wait = call_data->wait;
363
364 /*
365 * Notify initiating CPU that I've grabbed the data and am
366 * about to execute the function
367 */
368 mb();
369 atomic_inc(&call_data->started);
370 /*
371 * At this point the info structure may be out of scope unless wait==1
372 */
373 irq_enter(); 380 irq_enter();
374 (*func)(info); 381 generic_smp_call_function_interrupt();
375 __get_cpu_var(irq_stat).irq_call_count++; 382 __get_cpu_var(irq_stat).irq_call_count++;
376 irq_exit(); 383 irq_exit();
377 384
378 if (wait) {
379 mb(); /* commit everything before setting finished */
380 atomic_inc(&call_data->finished);
381 }
382
383 return IRQ_HANDLED; 385 return IRQ_HANDLED;
384} 386}
385 387
386int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), 388static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
387 void *info, int wait)
388{ 389{
389 struct call_data_struct data; 390 irq_enter();
390 int cpus, cpu; 391 generic_smp_call_function_single_interrupt();
391 bool yield; 392 __get_cpu_var(irq_stat).irq_call_count++;
392 393 irq_exit();
393 /* Holding any lock stops cpus from going down. */
394 spin_lock(&call_lock);
395
396 cpu_clear(smp_processor_id(), mask);
397
398 cpus = cpus_weight(mask);
399 if (!cpus) {
400 spin_unlock(&call_lock);
401 return 0;
402 }
403
404 /* Can deadlock when called with interrupts disabled */
405 WARN_ON(irqs_disabled());
406
407 data.func = func;
408 data.info = info;
409 atomic_set(&data.started, 0);
410 data.wait = wait;
411 if (wait)
412 atomic_set(&data.finished, 0);
413
414 call_data = &data;
415 mb(); /* write everything before IPI */
416
417 /* Send a message to other CPUs and wait for them to respond */
418 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
419
420 /* Make sure other vcpus get a chance to run if they need to. */
421 yield = false;
422 for_each_cpu_mask(cpu, mask)
423 if (xen_vcpu_stolen(cpu))
424 yield = true;
425
426 if (yield)
427 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
428
429 /* Wait for response */
430 while (atomic_read(&data.started) != cpus ||
431 (wait && atomic_read(&data.finished) != cpus))
432 cpu_relax();
433
434 spin_unlock(&call_lock);
435 394
436 return 0; 395 return IRQ_HANDLED;
437} 396}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index d852ddbb3448..6f4b1045c1c2 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -55,13 +55,8 @@ void xen_smp_cpus_done(unsigned int max_cpus);
55 55
56void xen_smp_send_stop(void); 56void xen_smp_send_stop(void);
57void xen_smp_send_reschedule(int cpu); 57void xen_smp_send_reschedule(int cpu);
58int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, 58void xen_smp_send_call_function_ipi(cpumask_t mask);
59 int wait); 59void xen_smp_send_call_function_single_ipi(int cpu);
60int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
61 int nonatomic, int wait);
62
63int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
64 void *info, int wait);
65 60
66extern cpumask_t xen_cpu_initialized_map; 61extern cpumask_t xen_cpu_initialized_map;
67 62