aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/boot/tty.c2
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/ds.h6
-rw-r--r--arch/x86/include/asm/ftrace.h34
-rw-r--r--arch/x86/include/asm/iomap.h30
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/mmzone_32.h4
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/kernel/Makefile8
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/amd_iommu.c2
-rw-r--r--arch/x86/kernel/amd_iommu_init.c6
-rw-r--r--arch/x86/kernel/ds.c81
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/entry_32.S42
-rw-r--r--arch/x86/kernel/entry_64.S5
-rw-r--r--arch/x86/kernel/es7000_32.c9
-rw-r--r--arch/x86/kernel/ftrace.c312
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/io_apic.c36
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/reboot.c9
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/stacktrace.c64
-rw-r--r--arch/x86/kernel/tsc_sync.c4
-rw-r--r--arch/x86/kernel/vsyscall_64.c3
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/i8254.c4
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/vmx.h1
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c16
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/numa_32.c35
-rw-r--r--arch/x86/oprofile/op_model_ppro.c2
-rw-r--r--arch/x86/power/hibernate_32.c4
-rw-r--r--arch/x86/vdso/vclock_gettime.c3
-rw-r--r--arch/x86/xen/mmu.c21
46 files changed, 682 insertions, 128 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4cf0ab13d187..e49a4fd718fe 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,11 +29,14 @@ config X86
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
32 select HAVE_FUNCTION_RET_TRACER if X86_32
33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
33 select HAVE_ARCH_KGDB if !X86_VOYAGER 35 select HAVE_ARCH_KGDB if !X86_VOYAGER
34 select HAVE_ARCH_TRACEHOOK 36 select HAVE_ARCH_TRACEHOOK
35 select HAVE_GENERIC_DMA_COHERENT if X86_32 37 select HAVE_GENERIC_DMA_COHERENT if X86_32
36 select HAVE_EFFICIENT_UNALIGNED_ACCESS 38 select HAVE_EFFICIENT_UNALIGNED_ACCESS
39 select USER_STACKTRACE_SUPPORT
37 40
38config ARCH_DEFCONFIG 41config ARCH_DEFCONFIG
39 string 42 string
@@ -167,9 +170,12 @@ config GENERIC_PENDING_IRQ
167config X86_SMP 170config X86_SMP
168 bool 171 bool
169 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) 172 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
170 select USE_GENERIC_SMP_HELPERS
171 default y 173 default y
172 174
175config USE_GENERIC_SMP_HELPERS
176 def_bool y
177 depends on SMP
178
173config X86_32_SMP 179config X86_32_SMP
174 def_bool y 180 def_bool y
175 depends on X86_32 && SMP 181 depends on X86_32 && SMP
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2a3dfbd5e677..fa013f529b74 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -186,14 +186,10 @@ config IOMMU_LEAK
186 Add a simple leak tracer to the IOMMU code. This is useful when you 186 Add a simple leak tracer to the IOMMU code. This is useful when you
187 are debugging a buggy device driver that leaks IOMMU mappings. 187 are debugging a buggy device driver that leaks IOMMU mappings.
188 188
189config MMIOTRACE_HOOKS
190 bool
191
192config MMIOTRACE 189config MMIOTRACE
193 bool "Memory mapped IO tracing" 190 bool "Memory mapped IO tracing"
194 depends on DEBUG_KERNEL && PCI 191 depends on DEBUG_KERNEL && PCI
195 select TRACING 192 select TRACING
196 select MMIOTRACE_HOOKS
197 help 193 help
198 Mmiotrace traces Memory Mapped I/O access and is meant for 194 Mmiotrace traces Memory Mapped I/O access and is meant for
199 debugging and reverse engineering. It is called from the ioremap 195 debugging and reverse engineering. It is called from the ioremap
diff --git a/arch/x86/boot/tty.c b/arch/x86/boot/tty.c
index 0be77b39328a..7e8e8b25f5f6 100644
--- a/arch/x86/boot/tty.c
+++ b/arch/x86/boot/tty.c
@@ -74,7 +74,7 @@ static int kbd_pending(void)
74{ 74{
75 u8 pending; 75 u8 pending;
76 asm volatile("int $0x16; setnz %0" 76 asm volatile("int $0x16; setnz %0"
77 : "=rm" (pending) 77 : "=qm" (pending)
78 : "a" (0x0100)); 78 : "a" (0x0100));
79 return pending; 79 return pending;
80} 80}
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 8d676d8ecde9..9830681446ad 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -113,7 +113,6 @@ static inline void acpi_disable_pci(void)
113 acpi_pci_disabled = 1; 113 acpi_pci_disabled = 1;
114 acpi_noirq_set(); 114 acpi_noirq_set();
115} 115}
116extern int acpi_irq_balance_set(char *str);
117 116
118/* routines for saving/restoring kernel state */ 117/* routines for saving/restoring kernel state */
119extern int acpi_save_state_mem(void); 118extern int acpi_save_state_mem(void);
diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h
index 72c5a190bf48..a95008457ea4 100644
--- a/arch/x86/include/asm/ds.h
+++ b/arch/x86/include/asm/ds.h
@@ -23,12 +23,13 @@
23#ifndef _ASM_X86_DS_H 23#ifndef _ASM_X86_DS_H
24#define _ASM_X86_DS_H 24#define _ASM_X86_DS_H
25 25
26#ifdef CONFIG_X86_DS
27 26
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/init.h> 28#include <linux/init.h>
30 29
31 30
31#ifdef CONFIG_X86_DS
32
32struct task_struct; 33struct task_struct;
33 34
34/* 35/*
@@ -232,7 +233,8 @@ extern void ds_free(struct ds_context *context);
232 233
233#else /* CONFIG_X86_DS */ 234#else /* CONFIG_X86_DS */
234 235
235#define ds_init_intel(config) do {} while (0) 236struct cpuinfo_x86;
237static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
236 238
237#endif /* CONFIG_X86_DS */ 239#endif /* CONFIG_X86_DS */
238#endif /* _ASM_X86_DS_H */ 240#endif /* _ASM_X86_DS_H */
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9e8bc29b8b17..754a3e082f94 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -17,8 +17,40 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
17 */ 17 */
18 return addr - 1; 18 return addr - 1;
19} 19}
20#endif
21 20
21#ifdef CONFIG_DYNAMIC_FTRACE
22
23struct dyn_arch_ftrace {
24 /* No extra data needed for x86 */
25};
26
27#endif /* CONFIG_DYNAMIC_FTRACE */
28#endif /* __ASSEMBLY__ */
22#endif /* CONFIG_FUNCTION_TRACER */ 29#endif /* CONFIG_FUNCTION_TRACER */
23 30
31#ifdef CONFIG_FUNCTION_RET_TRACER
32
33#ifndef __ASSEMBLY__
34
35/*
36 * Stack of return addresses for functions
37 * of a thread.
38 * Used in struct thread_info
39 */
40struct ftrace_ret_stack {
41 unsigned long ret;
42 unsigned long func;
43 unsigned long long calltime;
44};
45
46/*
47 * Primary handler of a function return.
48 * It relays on ftrace_return_to_handler.
49 * Defined in entry32.S
50 */
51extern void return_to_handler(void);
52
53#endif /* __ASSEMBLY__ */
54#endif /* CONFIG_FUNCTION_RET_TRACER */
55
24#endif /* _ASM_X86_FTRACE_H */ 56#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
new file mode 100644
index 000000000000..c1f06289b14b
--- /dev/null
+++ b/arch/x86/include/asm/iomap.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright © 2008 Ingo Molnar
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/uaccess.h>
22#include <asm/cacheflush.h>
23#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
26void *
27iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
28
29void
30iounmap_atomic(void *kvaddr, enum km_type type);
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index e4a552d44465..0b500c5b6446 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,7 +6,6 @@ extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled;
10 9
11extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); 10extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
12 11
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 485bdf059ffb..07f1af494ca5 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void)
34 34
35extern int early_pfn_to_nid(unsigned long pfn); 35extern int early_pfn_to_nid(unsigned long pfn);
36 36
37extern void resume_map_numa_kva(pgd_t *pgd);
38
37#else /* !CONFIG_NUMA */ 39#else /* !CONFIG_NUMA */
38 40
39#define get_memcfg_numa get_memcfg_numa_flat 41#define get_memcfg_numa get_memcfg_numa_flat
40 42
43static inline void resume_map_numa_kva(pgd_t *pgd) {}
44
41#endif /* CONFIG_NUMA */ 45#endif /* CONFIG_NUMA */
42 46
43#ifdef CONFIG_DISCONTIGMEM 47#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e44d379faad2..0921b4018c11 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -20,6 +20,8 @@
20struct task_struct; 20struct task_struct;
21struct exec_domain; 21struct exec_domain;
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/ftrace.h>
24#include <asm/atomic.h>
23 25
24struct thread_info { 26struct thread_info {
25 struct task_struct *task; /* main task structure */ 27 struct task_struct *task; /* main task structure */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 664f15280f14..f8cfd00db450 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -46,7 +46,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
46 return ret; 46 return ret;
47 case 10: 47 case 10:
48 __get_user_asm(*(u64 *)dst, (u64 __user *)src, 48 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
49 ret, "q", "", "=r", 16); 49 ret, "q", "", "=r", 10);
50 if (unlikely(ret)) 50 if (unlikely(ret))
51 return ret; 51 return ret;
52 __get_user_asm(*(u16 *)(8 + (char *)dst), 52 __get_user_asm(*(u16 *)(8 + (char *)dst),
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 834b2c1d89fb..d2e415e6666f 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -639,8 +639,8 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
640#define __NR_timerfd_gettime 287 640#define __NR_timerfd_gettime 287
641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
642#define __NR_paccept 288 642#define __NR_accept4 288
643__SYSCALL(__NR_paccept, sys_paccept) 643__SYSCALL(__NR_accept4, sys_accept4)
644#define __NR_signalfd4 289 644#define __NR_signalfd4 289
645__SYSCALL(__NR_signalfd4, sys_signalfd4) 645__SYSCALL(__NR_signalfd4, sys_signalfd4)
646#define __NR_eventfd2 290 646#define __NR_eventfd2 290
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e489ff9cb3e2..af2bc36ca1c4 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 14CFLAGS_REMOVE_ftrace.o = -pg
15endif 15endif
16 16
17ifdef CONFIG_FUNCTION_RET_TRACER
18# Don't trace __switch_to() but let it for function tracer
19CFLAGS_REMOVE_process_32.o = -pg
20endif
21
17# 22#
18# vsyscalls (which work on the user stack) should have 23# vsyscalls (which work on the user stack) should have
19# no stack-protector checks: 24# no stack-protector checks:
@@ -41,7 +46,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
41obj-y += process.o 46obj-y += process.o
42obj-y += i387.o xsave.o 47obj-y += i387.o xsave.o
43obj-y += ptrace.o 48obj-y += ptrace.o
44obj-y += ds.o 49obj-$(CONFIG_X86_DS) += ds.o
45obj-$(CONFIG_X86_32) += tls.o 50obj-$(CONFIG_X86_32) += tls.o
46obj-$(CONFIG_IA32_EMULATION) += tls.o 51obj-$(CONFIG_IA32_EMULATION) += tls.o
47obj-y += step.o 52obj-y += step.o
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
65obj-$(CONFIG_X86_IO_APIC) += io_apic.o 70obj-$(CONFIG_X86_IO_APIC) += io_apic.o
66obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
67obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
73obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
68obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
69obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
70obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8c1f76abae9e..4c51a2f8fd31 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1343,7 +1343,6 @@ static void __init acpi_process_madt(void)
1343 error = acpi_parse_madt_ioapic_entries(); 1343 error = acpi_parse_madt_ioapic_entries();
1344 if (!error) { 1344 if (!error) {
1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; 1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
1346 acpi_irq_balance_set(NULL);
1347 acpi_ioapic = 1; 1346 acpi_ioapic = 1;
1348 1347
1349 smp_found_config = 1; 1348 smp_found_config = 1;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 331b318304eb..e4899e0e8787 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -537,7 +537,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
537 address >>= PAGE_SHIFT; 537 address >>= PAGE_SHIFT;
538 iommu_area_free(dom->bitmap, address, pages); 538 iommu_area_free(dom->bitmap, address, pages);
539 539
540 if (address + pages >= dom->next_bit) 540 if (address >= dom->next_bit)
541 dom->need_flush = true; 541 dom->need_flush = true;
542} 542}
543 543
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 0cdcda35a05f..30ae2701b3df 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
121LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 121LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
122 we find in ACPI */ 122 we find in ACPI */
123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
124int amd_iommu_isolate; /* if 1, device isolation is enabled */ 124int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
126 126
127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str)
1213 for (; *str; ++str) { 1213 for (; *str; ++str) {
1214 if (strncmp(str, "isolate", 7) == 0) 1214 if (strncmp(str, "isolate", 7) == 0)
1215 amd_iommu_isolate = 1; 1215 amd_iommu_isolate = 1;
1216 if (strncmp(str, "fullflush", 11) == 0) 1216 if (strncmp(str, "share", 5) == 0)
1217 amd_iommu_isolate = 0;
1218 if (strncmp(str, "fullflush", 9) == 0)
1217 amd_iommu_unmap_flush = true; 1219 amd_iommu_unmap_flush = true;
1218 } 1220 }
1219 1221
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index c570252905a1..d6938d9351cf 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -21,8 +21,6 @@
21 */ 21 */
22 22
23 23
24#ifdef CONFIG_X86_DS
25
26#include <asm/ds.h> 24#include <asm/ds.h>
27 25
28#include <linux/errno.h> 26#include <linux/errno.h>
@@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
211static inline struct ds_context *ds_get_context(struct task_struct *task) 209static inline struct ds_context *ds_get_context(struct task_struct *task)
212{ 210{
213 struct ds_context *context; 211 struct ds_context *context;
212 unsigned long irq;
214 213
215 spin_lock(&ds_lock); 214 spin_lock_irqsave(&ds_lock, irq);
216 215
217 context = (task ? task->thread.ds_ctx : this_system_context); 216 context = (task ? task->thread.ds_ctx : this_system_context);
218 if (context) 217 if (context)
219 context->count++; 218 context->count++;
220 219
221 spin_unlock(&ds_lock); 220 spin_unlock_irqrestore(&ds_lock, irq);
222 221
223 return context; 222 return context;
224} 223}
@@ -226,18 +225,16 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
226/* 225/*
227 * Same as ds_get_context, but allocates the context and it's DS 226 * Same as ds_get_context, but allocates the context and it's DS
228 * structure, if necessary; returns NULL; if out of memory. 227 * structure, if necessary; returns NULL; if out of memory.
229 *
230 * pre: requires ds_lock to be held
231 */ 228 */
232static inline struct ds_context *ds_alloc_context(struct task_struct *task) 229static inline struct ds_context *ds_alloc_context(struct task_struct *task)
233{ 230{
234 struct ds_context **p_context = 231 struct ds_context **p_context =
235 (task ? &task->thread.ds_ctx : &this_system_context); 232 (task ? &task->thread.ds_ctx : &this_system_context);
236 struct ds_context *context = *p_context; 233 struct ds_context *context = *p_context;
234 unsigned long irq;
237 235
238 if (!context) { 236 if (!context) {
239 context = kzalloc(sizeof(*context), GFP_KERNEL); 237 context = kzalloc(sizeof(*context), GFP_KERNEL);
240
241 if (!context) 238 if (!context)
242 return NULL; 239 return NULL;
243 240
@@ -247,18 +244,27 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
247 return NULL; 244 return NULL;
248 } 245 }
249 246
250 *p_context = context; 247 spin_lock_irqsave(&ds_lock, irq);
251 248
252 context->this = p_context; 249 if (*p_context) {
253 context->task = task; 250 kfree(context->ds);
251 kfree(context);
252
253 context = *p_context;
254 } else {
255 *p_context = context;
254 256
255 if (task) 257 context->this = p_context;
256 set_tsk_thread_flag(task, TIF_DS_AREA_MSR); 258 context->task = task;
257 259
258 if (!task || (task == current)) 260 if (task)
259 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); 261 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
260 262
261 get_tracer(task); 263 if (!task || (task == current))
264 wrmsrl(MSR_IA32_DS_AREA,
265 (unsigned long)context->ds);
266 }
267 spin_unlock_irqrestore(&ds_lock, irq);
262 } 268 }
263 269
264 context->count++; 270 context->count++;
@@ -272,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
272 */ 278 */
273static inline void ds_put_context(struct ds_context *context) 279static inline void ds_put_context(struct ds_context *context)
274{ 280{
281 unsigned long irq;
282
275 if (!context) 283 if (!context)
276 return; 284 return;
277 285
278 spin_lock(&ds_lock); 286 spin_lock_irqsave(&ds_lock, irq);
279 287
280 if (--context->count) 288 if (--context->count)
281 goto out; 289 goto out;
@@ -297,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
297 kfree(context->ds); 305 kfree(context->ds);
298 kfree(context); 306 kfree(context);
299 out: 307 out:
300 spin_unlock(&ds_lock); 308 spin_unlock_irqrestore(&ds_lock, irq);
301} 309}
302 310
303 311
@@ -368,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
368 struct ds_context *context; 376 struct ds_context *context;
369 unsigned long buffer, adj; 377 unsigned long buffer, adj;
370 const unsigned long alignment = (1 << 3); 378 const unsigned long alignment = (1 << 3);
379 unsigned long irq;
371 int error = 0; 380 int error = 0;
372 381
373 if (!ds_cfg.sizeof_ds) 382 if (!ds_cfg.sizeof_ds)
@@ -382,25 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
382 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
383 392
384 393
385 spin_lock(&ds_lock);
386
387 if (!check_tracer(task))
388 return -EPERM;
389
390 error = -ENOMEM;
391 context = ds_alloc_context(task); 394 context = ds_alloc_context(task);
392 if (!context) 395 if (!context)
396 return -ENOMEM;
397
398 spin_lock_irqsave(&ds_lock, irq);
399
400 error = -EPERM;
401 if (!check_tracer(task))
393 goto out_unlock; 402 goto out_unlock;
394 403
404 get_tracer(task);
405
395 error = -EALREADY; 406 error = -EALREADY;
396 if (context->owner[qual] == current) 407 if (context->owner[qual] == current)
397 goto out_unlock; 408 goto out_put_tracer;
398 error = -EPERM; 409 error = -EPERM;
399 if (context->owner[qual] != NULL) 410 if (context->owner[qual] != NULL)
400 goto out_unlock; 411 goto out_put_tracer;
401 context->owner[qual] = current; 412 context->owner[qual] = current;
402 413
403 spin_unlock(&ds_lock); 414 spin_unlock_irqrestore(&ds_lock, irq);
404 415
405 416
406 error = -ENOMEM; 417 error = -ENOMEM;
@@ -448,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
448 out_release: 459 out_release:
449 context->owner[qual] = NULL; 460 context->owner[qual] = NULL;
450 ds_put_context(context); 461 ds_put_context(context);
462 put_tracer(task);
463 return error;
464
465 out_put_tracer:
466 spin_unlock_irqrestore(&ds_lock, irq);
467 ds_put_context(context);
468 put_tracer(task);
451 return error; 469 return error;
452 470
453 out_unlock: 471 out_unlock:
454 spin_unlock(&ds_lock); 472 spin_unlock_irqrestore(&ds_lock, irq);
455 ds_put_context(context); 473 ds_put_context(context);
456 return error; 474 return error;
457} 475}
@@ -801,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = {
801 .sizeof_ds = sizeof(long) * 12, 819 .sizeof_ds = sizeof(long) * 12,
802 .sizeof_field = sizeof(long), 820 .sizeof_field = sizeof(long),
803 .sizeof_rec[ds_bts] = sizeof(long) * 3, 821 .sizeof_rec[ds_bts] = sizeof(long) * 3,
822#ifdef __i386__
804 .sizeof_rec[ds_pebs] = sizeof(long) * 10 823 .sizeof_rec[ds_pebs] = sizeof(long) * 10
824#else
825 .sizeof_rec[ds_pebs] = sizeof(long) * 18
826#endif
805}; 827};
806static const struct ds_configuration ds_cfg_64 = { 828static const struct ds_configuration ds_cfg_64 = {
807 .sizeof_ds = 8 * 12, 829 .sizeof_ds = 8 * 12,
808 .sizeof_field = 8, 830 .sizeof_field = 8,
809 .sizeof_rec[ds_bts] = 8 * 3, 831 .sizeof_rec[ds_bts] = 8 * 3,
832#ifdef __i386__
810 .sizeof_rec[ds_pebs] = 8 * 10 833 .sizeof_rec[ds_pebs] = 8 * 10
834#else
835 .sizeof_rec[ds_pebs] = 8 * 18
836#endif
811}; 837};
812 838
813static inline void 839static inline void
@@ -860,4 +886,3 @@ void ds_free(struct ds_context *context)
860 while (leftovers--) 886 while (leftovers--)
861 ds_put_context(context); 887 ds_put_context(context);
862} 888}
863#endif /* CONFIG_X86_DS */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 3ce029ffaa55..1b894b72c0f5 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -188,20 +188,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
188} 188}
189#endif 189#endif
190 190
191#ifdef CONFIG_DMAR
192static void __init intel_g33_dmar(int num, int slot, int func)
193{
194 struct acpi_table_header *dmar_tbl;
195 acpi_status status;
196
197 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
198 if (ACPI_SUCCESS(status)) {
199 printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n");
200 dmar_disabled = 1;
201 }
202}
203#endif
204
205#define QFLAG_APPLY_ONCE 0x1 191#define QFLAG_APPLY_ONCE 0x1
206#define QFLAG_APPLIED 0x2 192#define QFLAG_APPLIED 0x2
207#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 193#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -225,10 +211,6 @@ static struct chipset early_qrk[] __initdata = {
225 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 211 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
226 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 212 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
227 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 213 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
228#ifdef CONFIG_DMAR
229 { PCI_VENDOR_ID_INTEL, 0x29c0,
230 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar },
231#endif
232 {} 214 {}
233}; 215};
234 216
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 28b597ef9ca1..74defe21ba42 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1157,6 +1157,9 @@ ENTRY(mcount)
1157END(mcount) 1157END(mcount)
1158 1158
1159ENTRY(ftrace_caller) 1159ENTRY(ftrace_caller)
1160 cmpl $0, function_trace_stop
1161 jne ftrace_stub
1162
1160 pushl %eax 1163 pushl %eax
1161 pushl %ecx 1164 pushl %ecx
1162 pushl %edx 1165 pushl %edx
@@ -1180,8 +1183,15 @@ END(ftrace_caller)
1180#else /* ! CONFIG_DYNAMIC_FTRACE */ 1183#else /* ! CONFIG_DYNAMIC_FTRACE */
1181 1184
1182ENTRY(mcount) 1185ENTRY(mcount)
1186 cmpl $0, function_trace_stop
1187 jne ftrace_stub
1188
1183 cmpl $ftrace_stub, ftrace_trace_function 1189 cmpl $ftrace_stub, ftrace_trace_function
1184 jnz trace 1190 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return
1193 jnz ftrace_return_caller
1194#endif
1185.globl ftrace_stub 1195.globl ftrace_stub
1186ftrace_stub: 1196ftrace_stub:
1187 ret 1197 ret
@@ -1200,12 +1210,42 @@ trace:
1200 popl %edx 1210 popl %edx
1201 popl %ecx 1211 popl %ecx
1202 popl %eax 1212 popl %eax
1203
1204 jmp ftrace_stub 1213 jmp ftrace_stub
1205END(mcount) 1214END(mcount)
1206#endif /* CONFIG_DYNAMIC_FTRACE */ 1215#endif /* CONFIG_DYNAMIC_FTRACE */
1207#endif /* CONFIG_FUNCTION_TRACER */ 1216#endif /* CONFIG_FUNCTION_TRACER */
1208 1217
1218#ifdef CONFIG_FUNCTION_RET_TRACER
1219ENTRY(ftrace_return_caller)
1220 cmpl $0, function_trace_stop
1221 jne ftrace_stub
1222
1223 pushl %eax
1224 pushl %ecx
1225 pushl %edx
1226 movl 0xc(%esp), %edx
1227 lea 0x4(%ebp), %eax
1228 call prepare_ftrace_return
1229 popl %edx
1230 popl %ecx
1231 popl %eax
1232 ret
1233END(ftrace_return_caller)
1234
1235.globl return_to_handler
1236return_to_handler:
1237 pushl $0
1238 pushl %eax
1239 pushl %ecx
1240 pushl %edx
1241 call ftrace_return_to_handler
1242 movl %eax, 0xc(%esp)
1243 popl %edx
1244 popl %ecx
1245 popl %eax
1246 ret
1247#endif
1248
1209.section .rodata,"a" 1249.section .rodata,"a"
1210#include "syscall_table_32.S" 1250#include "syscall_table_32.S"
1211 1251
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b86f332c96a6..08aa6b10933c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -68,6 +68,8 @@ ENTRY(mcount)
68END(mcount) 68END(mcount)
69 69
70ENTRY(ftrace_caller) 70ENTRY(ftrace_caller)
71 cmpl $0, function_trace_stop
72 jne ftrace_stub
71 73
72 /* taken from glibc */ 74 /* taken from glibc */
73 subq $0x38, %rsp 75 subq $0x38, %rsp
@@ -103,6 +105,9 @@ END(ftrace_caller)
103 105
104#else /* ! CONFIG_DYNAMIC_FTRACE */ 106#else /* ! CONFIG_DYNAMIC_FTRACE */
105ENTRY(mcount) 107ENTRY(mcount)
108 cmpl $0, function_trace_stop
109 jne ftrace_stub
110
106 cmpq $ftrace_stub, ftrace_trace_function 111 cmpq $ftrace_stub, ftrace_trace_function
107 jnz trace 112 jnz trace
108.globl ftrace_stub 113.globl ftrace_stub
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index f454c78fcef6..0aa2c443d600 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
250{ 250{
251 struct acpi_table_header *header = NULL; 251 struct acpi_table_header *header = NULL;
252 int i = 0; 252 int i = 0;
253 acpi_size tbl_size;
254 253
255 while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { 254 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
256 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { 255 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
257 struct oem_table *t = (struct oem_table *)header; 256 struct oem_table *t = (struct oem_table *)header;
258 257
259 oem_addrX = t->OEMTableAddr; 258 oem_addrX = t->OEMTableAddr;
260 oem_size = t->OEMTableSize; 259 oem_size = t->OEMTableSize;
261 early_acpi_os_unmap_memory(header, tbl_size);
262 260
263 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, 261 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
264 oem_size); 262 oem_size);
265 return 0; 263 return 0;
266 } 264 }
267 early_acpi_os_unmap_memory(header, tbl_size);
268 } 265 }
269 return -1; 266 return -1;
270} 267}
271 268
272void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) 269void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
273{ 270{
274 if (!oem_addr)
275 return;
276
277 __acpi_unmap_table((char *)oem_addr, oem_size);
278} 271}
279#endif 272#endif
280 273
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 50ea0ac8c9bf..bb137f7297ed 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -14,14 +14,17 @@
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/sched.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/list.h> 19#include <linux/list.h>
19 20
20#include <asm/ftrace.h> 21#include <asm/ftrace.h>
22#include <linux/ftrace.h>
21#include <asm/nops.h> 23#include <asm/nops.h>
24#include <asm/nmi.h>
22 25
23 26
24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 27#ifdef CONFIG_DYNAMIC_FTRACE
25 28
26union ftrace_code_union { 29union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 30 char code[MCOUNT_INSN_SIZE];
@@ -31,18 +34,12 @@ union ftrace_code_union {
31 } __attribute__((packed)); 34 } __attribute__((packed));
32}; 35};
33 36
34
35static int ftrace_calc_offset(long ip, long addr) 37static int ftrace_calc_offset(long ip, long addr)
36{ 38{
37 return (int)(addr - ip); 39 return (int)(addr - ip);
38} 40}
39 41
40unsigned char *ftrace_nop_replace(void) 42static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
41{
42 return ftrace_nop;
43}
44
45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
46{ 43{
47 static union ftrace_code_union calc; 44 static union ftrace_code_union calc;
48 45
@@ -56,7 +53,143 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
56 return calc.code; 53 return calc.code;
57} 54}
58 55
59int 56/*
57 * Modifying code must take extra care. On an SMP machine, if
58 * the code being modified is also being executed on another CPU
59 * that CPU will have undefined results and possibly take a GPF.
60 * We use kstop_machine to stop other CPUS from exectuing code.
61 * But this does not stop NMIs from happening. We still need
62 * to protect against that. We separate out the modification of
63 * the code to take care of this.
64 *
65 * Two buffers are added: An IP buffer and a "code" buffer.
66 *
67 * 1) Put the instruction pointer into the IP buffer
68 * and the new code into the "code" buffer.
69 * 2) Set a flag that says we are modifying code
70 * 3) Wait for any running NMIs to finish.
71 * 4) Write the code
72 * 5) clear the flag.
73 * 6) Wait for any running NMIs to finish.
74 *
75 * If an NMI is executed, the first thing it does is to call
76 * "ftrace_nmi_enter". This will check if the flag is set to write
77 * and if it is, it will write what is in the IP and "code" buffers.
78 *
79 * The trick is, it does not matter if everyone is writing the same
80 * content to the code location. Also, if a CPU is executing code
81 * it is OK to write to that code location if the contents being written
82 * are the same as what exists.
83 */
84
85static atomic_t in_nmi = ATOMIC_INIT(0);
86static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */
89static void *mod_code_newcode; /* holds the text to write to the IP */
90
91static unsigned nmi_wait_count;
92static atomic_t nmi_update_count = ATOMIC_INIT(0);
93
94int ftrace_arch_read_dyn_info(char *buf, int size)
95{
96 int r;
97
98 r = snprintf(buf, size, "%u %u",
99 nmi_wait_count,
100 atomic_read(&nmi_update_count));
101 return r;
102}
103
104static void ftrace_mod_code(void)
105{
106 /*
107 * Yes, more than one CPU process can be writing to mod_code_status.
108 * (and the code itself)
109 * But if one were to fail, then they all should, and if one were
110 * to succeed, then they all should.
111 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE);
114
115}
116
117void ftrace_nmi_enter(void)
118{
119 atomic_inc(&in_nmi);
120 /* Must have in_nmi seen before reading write flag */
121 smp_mb();
122 if (mod_code_write) {
123 ftrace_mod_code();
124 atomic_inc(&nmi_update_count);
125 }
126}
127
128void ftrace_nmi_exit(void)
129{
130 /* Finish all executions before clearing in_nmi */
131 smp_wmb();
132 atomic_dec(&in_nmi);
133}
134
135static void wait_for_nmi(void)
136{
137 int waited = 0;
138
139 while (atomic_read(&in_nmi)) {
140 waited = 1;
141 cpu_relax();
142 }
143
144 if (waited)
145 nmi_wait_count++;
146}
147
148static int
149do_ftrace_mod_code(unsigned long ip, void *new_code)
150{
151 mod_code_ip = (void *)ip;
152 mod_code_newcode = new_code;
153
154 /* The buffers need to be visible before we let NMIs write them */
155 smp_wmb();
156
157 mod_code_write = 1;
158
159 /* Make sure write bit is visible before we wait on NMIs */
160 smp_mb();
161
162 wait_for_nmi();
163
164 /* Make sure all running NMIs have finished before we write the code */
165 smp_mb();
166
167 ftrace_mod_code();
168
169 /* Make sure the write happens before clearing the bit */
170 smp_wmb();
171
172 mod_code_write = 0;
173
174 /* make sure NMIs see the cleared bit */
175 smp_mb();
176
177 wait_for_nmi();
178
179 return mod_code_status;
180}
181
182
183
184
185static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
186
187static unsigned char *ftrace_nop_replace(void)
188{
189 return ftrace_nop;
190}
191
192static int
60ftrace_modify_code(unsigned long ip, unsigned char *old_code, 193ftrace_modify_code(unsigned long ip, unsigned char *old_code,
61 unsigned char *new_code) 194 unsigned char *new_code)
62{ 195{
@@ -81,7 +214,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
81 return -EINVAL; 214 return -EINVAL;
82 215
83 /* replace the text with the new text */ 216 /* replace the text with the new text */
84 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 217 if (do_ftrace_mod_code(ip, new_code))
85 return -EPERM; 218 return -EPERM;
86 219
87 sync_core(); 220 sync_core();
@@ -89,6 +222,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
89 return 0; 222 return 0;
90} 223}
91 224
225int ftrace_make_nop(struct module *mod,
226 struct dyn_ftrace *rec, unsigned long addr)
227{
228 unsigned char *new, *old;
229 unsigned long ip = rec->ip;
230
231 old = ftrace_call_replace(ip, addr);
232 new = ftrace_nop_replace();
233
234 return ftrace_modify_code(rec->ip, old, new);
235}
236
237int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
238{
239 unsigned char *new, *old;
240 unsigned long ip = rec->ip;
241
242 old = ftrace_nop_replace();
243 new = ftrace_call_replace(ip, addr);
244
245 return ftrace_modify_code(rec->ip, old, new);
246}
247
92int ftrace_update_ftrace_func(ftrace_func_t func) 248int ftrace_update_ftrace_func(ftrace_func_t func)
93{ 249{
94 unsigned long ip = (unsigned long)(&ftrace_call); 250 unsigned long ip = (unsigned long)(&ftrace_call);
@@ -165,3 +321,139 @@ int __init ftrace_dyn_arch_init(void *data)
165 321
166 return 0; 322 return 0;
167} 323}
324#endif
325
326#ifdef CONFIG_FUNCTION_RET_TRACER
327
328#ifndef CONFIG_DYNAMIC_FTRACE
329
330/*
331 * These functions are picked from those used on
332 * this page for dynamic ftrace. They have been
333 * simplified to ignore all traces in NMI context.
334 */
335static atomic_t in_nmi;
336
337void ftrace_nmi_enter(void)
338{
339 atomic_inc(&in_nmi);
340}
341
342void ftrace_nmi_exit(void)
343{
344 atomic_dec(&in_nmi);
345}
346#endif /* !CONFIG_DYNAMIC_FTRACE */
347
348/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func)
351{
352 int index;
353
354 if (!current->ret_stack)
355 return -EBUSY;
356
357 /* The return trace stack is full */
358 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
359 atomic_inc(&current->trace_overrun);
360 return -EBUSY;
361 }
362
363 index = ++current->curr_ret_stack;
364 barrier();
365 current->ret_stack[index].ret = ret;
366 current->ret_stack[index].func = func;
367 current->ret_stack[index].calltime = time;
368
369 return 0;
370}
371
372/* Retrieve a function return address to the trace stack on thread info.*/
373static void pop_return_trace(unsigned long *ret, unsigned long long *time,
374 unsigned long *func, unsigned long *overrun)
375{
376 int index;
377
378 index = current->curr_ret_stack;
379 *ret = current->ret_stack[index].ret;
380 *func = current->ret_stack[index].func;
381 *time = current->ret_stack[index].calltime;
382 *overrun = atomic_read(&current->trace_overrun);
383 current->curr_ret_stack--;
384}
385
386/*
387 * Send the trace to the ring-buffer.
388 * @return the original return address.
389 */
390unsigned long ftrace_return_to_handler(void)
391{
392 struct ftrace_retfunc trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
394 &trace.overrun);
395 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_function_return(&trace);
397
398 return trace.ret;
399}
400
401/*
402 * Hook the return address and push it in the stack of return addrs
403 * in current thread info.
404 */
405void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
406{
407 unsigned long old;
408 unsigned long long calltime;
409 int faulted;
410 unsigned long return_hooker = (unsigned long)
411 &return_to_handler;
412
413 /* Nmi's are currently unsupported */
414 if (atomic_read(&in_nmi))
415 return;
416
417 /*
418 * Protect against fault, even if it shouldn't
419 * happen. This tool is too much intrusive to
420 * ignore such a protection.
421 */
422 asm volatile(
423 "1: movl (%[parent_old]), %[old]\n"
424 "2: movl %[return_hooker], (%[parent_replaced])\n"
425 " movl $0, %[faulted]\n"
426
427 ".section .fixup, \"ax\"\n"
428 "3: movl $1, %[faulted]\n"
429 ".previous\n"
430
431 ".section __ex_table, \"a\"\n"
432 " .long 1b, 3b\n"
433 " .long 2b, 3b\n"
434 ".previous\n"
435
436 : [parent_replaced] "=r" (parent), [old] "=r" (old),
437 [faulted] "=r" (faulted)
438 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
439 : "memory"
440 );
441
442 if (WARN_ON(faulted)) {
443 unregister_ftrace_return();
444 return;
445 }
446
447 if (WARN_ON(!__kernel_text_address(old))) {
448 unregister_ftrace_return();
449 *parent = old;
450 return;
451 }
452
453 calltime = cpu_clock(raw_smp_processor_id());
454
455 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
456 *parent = old;
457}
458
459#endif /* CONFIG_FUNCTION_RET_TRACER */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 77017e834cf7..067d8de913f6 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -322,7 +322,7 @@ static int hpet_next_event(unsigned long delta,
322 * what we wrote hit the chip before we compare it to the 322 * what we wrote hit the chip before we compare it to the
323 * counter. 323 * counter.
324 */ 324 */
325 WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); 325 WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
326 326
327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
328} 328}
@@ -445,7 +445,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
445{ 445{
446 446
447 if (request_irq(dev->irq, hpet_interrupt_handler, 447 if (request_irq(dev->irq, hpet_interrupt_handler,
448 IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) 448 IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
449 return -1; 449 return -1;
450 450
451 disable_irq(dev->irq); 451 disable_irq(dev->irq);
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 1f20608d4ca8..b0f61f0dcd0a 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
58 stts(); 58 stts();
59} 59}
60 60
61void __init init_thread_xstate(void) 61void __cpuinit init_thread_xstate(void)
62{ 62{
63 if (!HAVE_HWFP) { 63 if (!HAVE_HWFP) {
64 xstate_size = sizeof(struct i387_soft_struct); 64 xstate_size = sizeof(struct i387_soft_struct);
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 7a3f2028e2eb..1fec0f9b1508 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq)
1140 1140
1141 cfg->vector = 0; 1141 cfg->vector = 0;
1142 cpus_clear(cfg->domain); 1142 cpus_clear(cfg->domain);
1143
1144 if (likely(!cfg->move_in_progress))
1145 return;
1146 cpus_and(mask, cfg->old_domain, cpu_online_map);
1147 for_each_cpu_mask_nr(cpu, mask) {
1148 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1149 vector++) {
1150 if (per_cpu(vector_irq, cpu)[vector] != irq)
1151 continue;
1152 per_cpu(vector_irq, cpu)[vector] = -1;
1153 break;
1154 }
1155 }
1156 cfg->move_in_progress = 0;
1143} 1157}
1144 1158
1145void __setup_vector_irq(int cpu) 1159void __setup_vector_irq(int cpu)
@@ -3594,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic)
3594 3608
3595int __init probe_nr_irqs(void) 3609int __init probe_nr_irqs(void)
3596{ 3610{
3597 int idx; 3611 return NR_IRQS;
3598 int nr = 0;
3599#ifndef CONFIG_XEN
3600 int nr_min = 32;
3601#else
3602 int nr_min = NR_IRQS;
3603#endif
3604
3605 for (idx = 0; idx < nr_ioapics; idx++)
3606 nr += io_apic_get_redir_entries(idx) + 1;
3607
3608 /* double it for hotplug and msi and nmi */
3609 nr <<= 1;
3610
3611 /* something wrong ? */
3612 if (nr < nr_min)
3613 nr = nr_min;
3614 if (WARN_ON(nr > NR_IRQS))
3615 nr = NR_IRQS;
3616
3617 return nr;
3618} 3612}
3619 3613
3620/* -------------------------------------------------------------------------- 3614/* --------------------------------------------------------------------------
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 774ac4991568..1c9cc431ea4f 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt)
128} 128}
129 129
130#ifdef CONFIG_X86_LOCAL_APIC 130#ifdef CONFIG_X86_LOCAL_APIC
131static void kvm_setup_secondary_clock(void) 131static void __devinit kvm_setup_secondary_clock(void)
132{ 132{
133 /* 133 /*
134 * Now that the first cpu already had this clocksource initialized, 134 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index e1e731d78f38..d28bbdc35e4e 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p)
1567 ++p; 1567 ++p;
1568 if (*p == '\0') 1568 if (*p == '\0')
1569 break; 1569 break;
1570 bridge = simple_strtol(p, &endp, 0); 1570 bridge = simple_strtoul(p, &endp, 0);
1571 if (p == endp) 1571 if (p == endp)
1572 break; 1572 break;
1573 1573
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..cc5a2545dd41 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -169,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
169 DMI_MATCH(DMI_BOARD_NAME, "0KW626"), 169 DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
170 }, 170 },
171 }, 171 },
172 { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
173 .callback = set_bios_reboot,
174 .ident = "Dell OptiPlex 330",
175 .matches = {
176 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
177 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
178 DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
179 },
180 },
172 { /* Handle problems with rebooting on Dell 2400's */ 181 { /* Handle problems with rebooting on Dell 2400's */
173 .callback = set_bios_reboot, 182 .callback = set_bios_reboot,
174 .ident = "Dell PowerEdge 2400", 183 .ident = "Dell PowerEdge 2400",
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0fa6790c1dd3..9d5674f7b6cc 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
764 .callback = dmi_low_memory_corruption, 764 .callback = dmi_low_memory_corruption,
765 .ident = "Phoenix BIOS", 765 .ident = "Phoenix BIOS",
766 .matches = { 766 .matches = {
767 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 767 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
768 }, 768 },
769 }, 769 },
770#endif 770#endif
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index a03e7f6d90c3..10786af95545 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -6,6 +6,7 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/stacktrace.h> 7#include <linux/stacktrace.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/uaccess.h>
9#include <asm/stacktrace.h> 10#include <asm/stacktrace.h>
10 11
11static void save_stack_warning(void *data, char *msg) 12static void save_stack_warning(void *data, char *msg)
@@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
83 trace->entries[trace->nr_entries++] = ULONG_MAX; 84 trace->entries[trace->nr_entries++] = ULONG_MAX;
84} 85}
85EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 86EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
87
88/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
89
90struct stack_frame {
91 const void __user *next_fp;
92 unsigned long ret_addr;
93};
94
95static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
96{
97 int ret;
98
99 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
100 return 0;
101
102 ret = 1;
103 pagefault_disable();
104 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
105 ret = 0;
106 pagefault_enable();
107
108 return ret;
109}
110
111static inline void __save_stack_trace_user(struct stack_trace *trace)
112{
113 const struct pt_regs *regs = task_pt_regs(current);
114 const void __user *fp = (const void __user *)regs->bp;
115
116 if (trace->nr_entries < trace->max_entries)
117 trace->entries[trace->nr_entries++] = regs->ip;
118
119 while (trace->nr_entries < trace->max_entries) {
120 struct stack_frame frame;
121
122 frame.next_fp = NULL;
123 frame.ret_addr = 0;
124 if (!copy_stack_frame(fp, &frame))
125 break;
126 if ((unsigned long)fp < regs->sp)
127 break;
128 if (frame.ret_addr) {
129 trace->entries[trace->nr_entries++] =
130 frame.ret_addr;
131 }
132 if (fp == frame.next_fp)
133 break;
134 fp = frame.next_fp;
135 }
136}
137
138void save_stack_trace_user(struct stack_trace *trace)
139{
140 /*
141 * Trace user stack if we are not a kernel thread
142 */
143 if (current->mm) {
144 __save_stack_trace_user(trace);
145 }
146 if (trace->nr_entries < trace->max_entries)
147 trace->entries[trace->nr_entries++] = ULONG_MAX;
148}
149
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9ffb01c31c40..1c0dfbca87c1 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
46 cycles_t start, now, prev, end; 46 cycles_t start, now, prev, end;
47 int i; 47 int i;
48 48
49 rdtsc_barrier();
49 start = get_cycles(); 50 start = get_cycles();
51 rdtsc_barrier();
50 /* 52 /*
51 * The measurement runs for 20 msecs: 53 * The measurement runs for 20 msecs:
52 */ 54 */
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
61 */ 63 */
62 __raw_spin_lock(&sync_lock); 64 __raw_spin_lock(&sync_lock);
63 prev = last_tsc; 65 prev = last_tsc;
66 rdtsc_barrier();
64 now = get_cycles(); 67 now = get_cycles();
68 rdtsc_barrier();
65 last_tsc = now; 69 last_tsc = now;
66 __raw_spin_unlock(&sync_lock); 70 __raw_spin_unlock(&sync_lock);
67 71
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 0b8b6690a86d..6f3d3d4cd973 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -17,6 +17,9 @@
17 * want per guest time just set the kernel.vsyscall64 sysctl to 0. 17 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
18 */ 18 */
19 19
20/* Disable profiling for userspace code: */
21#define DISABLE_BRANCH_PROFILING
22
20#include <linux/time.h> 23#include <linux/time.h>
21#include <linux/init.h> 24#include <linux/init.h>
22#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index b13acb75e822..15c3e6999182 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -310,7 +310,7 @@ static void __init setup_xstate_init(void)
310/* 310/*
311 * Enable and initialize the xsave feature. 311 * Enable and initialize the xsave feature.
312 */ 312 */
313void __init xsave_cntxt_init(void) 313void __ref xsave_cntxt_init(void)
314{ 314{
315 unsigned int eax, ebx, ecx, edx; 315 unsigned int eax, ebx, ecx, edx;
316 316
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ce3251ce5504..b81125f0bdee 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -20,6 +20,8 @@ if VIRTUALIZATION
20config KVM 20config KVM
21 tristate "Kernel-based Virtual Machine (KVM) support" 21 tristate "Kernel-based Virtual Machine (KVM) support"
22 depends on HAVE_KVM 22 depends on HAVE_KVM
23 # for device assignment:
24 depends on PCI
23 select PREEMPT_NOTIFIERS 25 select PREEMPT_NOTIFIERS
24 select MMU_NOTIFIER 26 select MMU_NOTIFIER
25 select ANON_INODES 27 select ANON_INODES
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 8772dc946823..59ebd37ad79e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
548 mutex_lock(&kvm->lock); 548 mutex_lock(&kvm->lock);
549 pit->irq_source_id = kvm_request_irq_source_id(kvm); 549 pit->irq_source_id = kvm_request_irq_source_id(kvm);
550 mutex_unlock(&kvm->lock); 550 mutex_unlock(&kvm->lock);
551 if (pit->irq_source_id < 0) 551 if (pit->irq_source_id < 0) {
552 kfree(pit);
552 return NULL; 553 return NULL;
554 }
553 555
554 mutex_init(&pit->pit_state.lock); 556 mutex_init(&pit->pit_state.lock);
555 mutex_lock(&pit->pit_state.lock); 557 mutex_lock(&pit->pit_state.lock);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a5e64881d9b..f1983d9477cd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
314 if (r) 314 if (r)
315 goto out; 315 goto out;
316 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, 316 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
317 rmap_desc_cache, 1); 317 rmap_desc_cache, 4);
318 if (r) 318 if (r)
319 goto out; 319 goto out;
320 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); 320 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2643b430d83a..d06b4dc0e2ea 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3564,7 +3564,8 @@ static int __init vmx_init(void)
3564 bypass_guest_pf = 0; 3564 bypass_guest_pf = 0;
3565 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3565 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3566 VMX_EPT_WRITABLE_MASK | 3566 VMX_EPT_WRITABLE_MASK |
3567 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); 3567 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
3568 VMX_EPT_IGMT_BIT);
3568 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, 3569 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3569 VMX_EPT_EXECUTABLE_MASK); 3570 VMX_EPT_EXECUTABLE_MASK);
3570 kvm_enable_tdp(); 3571 kvm_enable_tdp();
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 3e010d21fdd7..ec5edc339da6 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -352,6 +352,7 @@ enum vmcs_field {
352#define VMX_EPT_READABLE_MASK 0x1ull 352#define VMX_EPT_READABLE_MASK 0x1ull
353#define VMX_EPT_WRITABLE_MASK 0x2ull 353#define VMX_EPT_WRITABLE_MASK 0x2ull
354#define VMX_EPT_EXECUTABLE_MASK 0x4ull 354#define VMX_EPT_EXECUTABLE_MASK 0x4ull
355#define VMX_EPT_IGMT_BIT (1ull << 6)
355 356
356#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 357#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
357 358
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 0e331652681e..52145007bd7e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -7,6 +7,7 @@
7 * This file provides all the same external entries as smp.c but uses 7 * This file provides all the same external entries as smp.c but uses
8 * the voyager hal to provide the functionality 8 * the voyager hal to provide the functionality
9 */ 9 */
10#include <linux/cpu.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/mm.h> 12#include <linux/mm.h>
12#include <linux/kernel_stat.h> 13#include <linux/kernel_stat.h>
@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
1790 x86_write_percpu(cpu_number, hard_smp_processor_id()); 1791 x86_write_percpu(cpu_number, hard_smp_processor_id());
1791} 1792}
1792 1793
1794static void voyager_send_call_func(cpumask_t callmask)
1795{
1796 __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
1797 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1798}
1799
1800static void voyager_send_call_func_single(int cpu)
1801{
1802 send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
1803}
1804
1793struct smp_ops smp_ops = { 1805struct smp_ops smp_ops = {
1794 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, 1806 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
1795 .smp_prepare_cpus = voyager_smp_prepare_cpus, 1807 .smp_prepare_cpus = voyager_smp_prepare_cpus,
@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
1799 .smp_send_stop = voyager_smp_send_stop, 1811 .smp_send_stop = voyager_smp_send_stop,
1800 .smp_send_reschedule = voyager_smp_send_reschedule, 1812 .smp_send_reschedule = voyager_smp_send_reschedule,
1801 1813
1802 .send_call_func_ipi = native_send_call_func_ipi, 1814 .send_call_func_ipi = voyager_send_call_func,
1803 .send_call_func_single_ipi = native_send_call_func_single_ipi, 1815 .send_call_func_single_ipi = voyager_send_call_func_single,
1804}; 1816};
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fea4565ff576..d8cc96a2738f 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
8 8
9obj-$(CONFIG_HIGHMEM) += highmem_32.o 9obj-$(CONFIG_HIGHMEM) += highmem_32.o
10 10
11obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o
12obj-$(CONFIG_MMIOTRACE) += mmiotrace.o 11obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
13mmiotrace-y := pf_in.o mmio-mod.o 12mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
14obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 13obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
15 14
16obj-$(CONFIG_NUMA) += numa_$(BITS).o 15obj-$(CONFIG_NUMA) += numa_$(BITS).o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 31e8730fa246..4152d3c3b138 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -53,7 +53,7 @@
53 53
54static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) 54static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
55{ 55{
56#ifdef CONFIG_MMIOTRACE_HOOKS 56#ifdef CONFIG_MMIOTRACE
57 if (unlikely(is_kmmio_active())) 57 if (unlikely(is_kmmio_active()))
58 if (kmmio_handler(regs, addr) == 1) 58 if (kmmio_handler(regs, addr) == 1)
59 return -1; 59 return -1;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 847c164725f4..8518c678d83f 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
222 } 222 }
223} 223}
224 224
225#ifdef CONFIG_HIBERNATION
226/**
227 * resume_map_numa_kva - add KVA mapping to the temporary page tables created
228 * during resume from hibernation
229 * @pgd_base - temporary resume page directory
230 */
231void resume_map_numa_kva(pgd_t *pgd_base)
232{
233 int node;
234
235 for_each_online_node(node) {
236 unsigned long start_va, start_pfn, size, pfn;
237
238 start_va = (unsigned long)node_remap_start_vaddr[node];
239 start_pfn = node_remap_start_pfn[node];
240 size = node_remap_size[node];
241
242 printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
243
244 for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
245 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
246 pgd_t *pgd = pgd_base + pgd_index(vaddr);
247 pud_t *pud = pud_offset(pgd, vaddr);
248 pmd_t *pmd = pmd_offset(pud, vaddr);
249
250 set_pmd(pmd, pfn_pmd(start_pfn + pfn,
251 PAGE_KERNEL_LARGE_EXEC));
252
253 printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
254 __FUNCTION__, vaddr, start_pfn + pfn);
255 }
256 }
257}
258#endif
259
225static unsigned long calculate_numa_remap_pages(void) 260static unsigned long calculate_numa_remap_pages(void)
226{ 261{
227 int nid; 262 int nid;
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 3f1b81a83e2e..716d26f0e5d4 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -69,7 +69,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
69 int i; 69 int i;
70 70
71 if (!reset_value) { 71 if (!reset_value) {
72 reset_value = kmalloc(sizeof(unsigned) * num_counters, 72 reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
73 GFP_ATOMIC); 73 GFP_ATOMIC);
74 if (!reset_value) 74 if (!reset_value)
75 return; 75 return;
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index f2b6e3f11bfc..81197c62d5b3 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -12,6 +12,7 @@
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmzone.h>
15 16
16/* Defined in hibernate_asm_32.S */ 17/* Defined in hibernate_asm_32.S */
17extern int restore_image(void); 18extern int restore_image(void);
@@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
127 } 128 }
128 } 129 }
129 } 130 }
131
132 resume_map_numa_kva(pgd_base);
133
130 return 0; 134 return 0;
131} 135}
132 136
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 1ef0f90813d6..d9d35824c56f 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -9,6 +9,9 @@
9 * Also alternative() doesn't work. 9 * Also alternative() doesn't work.
10 */ 10 */
11 11
12/* Disable profiling for userspace code: */
13#define DISABLE_BRANCH_PROFILING
14
12#include <linux/kernel.h> 15#include <linux/kernel.h>
13#include <linux/posix-timers.h> 16#include <linux/posix-timers.h>
14#include <linux/time.h> 17#include <linux/time.h>
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 688936044dc9..636ef4caa52d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -661,12 +661,11 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
661 * For 64-bit, we must skip the Xen hole in the middle of the address 661 * For 64-bit, we must skip the Xen hole in the middle of the address
662 * space, just after the big x86-64 virtual hole. 662 * space, just after the big x86-64 virtual hole.
663 */ 663 */
664static int xen_pgd_walk(struct mm_struct *mm, 664static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
665 int (*func)(struct mm_struct *mm, struct page *, 665 int (*func)(struct mm_struct *mm, struct page *,
666 enum pt_level), 666 enum pt_level),
667 unsigned long limit) 667 unsigned long limit)
668{ 668{
669 pgd_t *pgd = mm->pgd;
670 int flush = 0; 669 int flush = 0;
671 unsigned hole_low, hole_high; 670 unsigned hole_low, hole_high;
672 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; 671 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
@@ -753,6 +752,14 @@ out:
753 return flush; 752 return flush;
754} 753}
755 754
755static int xen_pgd_walk(struct mm_struct *mm,
756 int (*func)(struct mm_struct *mm, struct page *,
757 enum pt_level),
758 unsigned long limit)
759{
760 return __xen_pgd_walk(mm, mm->pgd, func, limit);
761}
762
756/* If we're using split pte locks, then take the page's lock and 763/* If we're using split pte locks, then take the page's lock and
757 return a pointer to it. Otherwise return NULL. */ 764 return a pointer to it. Otherwise return NULL. */
758static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) 765static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
@@ -854,7 +861,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
854 861
855 xen_mc_batch(); 862 xen_mc_batch();
856 863
857 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { 864 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
858 /* re-enable interrupts for flushing */ 865 /* re-enable interrupts for flushing */
859 xen_mc_issue(0); 866 xen_mc_issue(0);
860 867
@@ -998,7 +1005,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
998 PT_PMD); 1005 PT_PMD);
999#endif 1006#endif
1000 1007
1001 xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT); 1008 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1002 1009
1003 xen_mc_issue(0); 1010 xen_mc_issue(0);
1004} 1011}